Browse Source

coordinator receives unassignment ack

pull/5890/head
chrislu 7 months ago
parent
commit
d40b350f54
  1. 7
      weed/mq/broker/broker_grpc_sub_coordinator.go
  2. 9
      weed/mq/client/sub_client/connect_to_sub_coordinator.go
  3. 7
      weed/mq/client/sub_client/subscribe.go
  4. 32
      weed/mq/client/sub_client/subscriber.go
  5. 6
      weed/mq/sub_coordinator/consumer_group.go
  6. 4
      weed/pb/mq.proto
  7. 1041
      weed/pb/mq_pb/mq.pb.go

7
weed/mq/broker/broker_grpc_sub_coordinator.go

@ -38,11 +38,16 @@ func (b *MessageQueueBroker) SubscriberToSubCoordinator(stream mq_pb.SeaweedMess
go func() {
// process ack messages
for {
_, err := stream.Recv()
req, err := stream.Recv()
if err != nil {
glog.V(0).Infof("subscriber %s/%s/%s receive: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err)
}
if ackUnAssignment := req.GetAckUnAssignment(); ackUnAssignment != nil {
glog.V(0).Infof("subscriber %s/%s/%s ack close of %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, ackUnAssignment)
cgi.AckUnAssignment(ackUnAssignment)
}
select {
case <-ctx.Done():
err := ctx.Err()

9
weed/mq/client/sub_client/connect_to_sub_coordinator.go

@ -56,6 +56,15 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() {
return err
}
go func() {
for reply := range sub.brokerPartitionAssignmentAckChan {
if err := stream.Send(reply); err != nil {
glog.V(0).Infof("subscriber %s reply: %v", sub.ContentConfig.Topic, err)
return
}
}
}()
// keep receiving messages from the sub coordinator
for {
resp, err := stream.Recv()

7
weed/mq/client/sub_client/subscribe.go

@ -65,6 +65,13 @@ func (sub *TopicSubscriber) startProcessors() {
} else {
glog.V(0).Infof("subscriber %s/%s partition %+v at %v completed", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker)
}
sub.brokerPartitionAssignmentAckChan <- &mq_pb.SubscriberToSubCoordinatorRequest{
Message: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignment{
AckUnAssignment: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage{
Partition: assigned.Partition,
},
},
}
}(assigned.PartitionAssignment, topicPartition)
}
if unAssignment := message.GetUnAssignment(); unAssignment != nil {

32
weed/mq/client/sub_client/subscriber.go

@ -27,25 +27,27 @@ type OnEachMessageFunc func(key, value []byte) (err error)
type OnCompletionFunc func()
type TopicSubscriber struct {
SubscriberConfig *SubscriberConfiguration
ContentConfig *ContentConfiguration
brokerPartitionAssignmentChan chan *mq_pb.SubscriberToSubCoordinatorResponse
OnEachMessageFunc OnEachMessageFunc
OnCompletionFunc OnCompletionFunc
bootstrapBrokers []string
waitForMoreMessage bool
activeProcessors map[topic.Partition]*ProcessorState
activeProcessorsLock sync.Mutex
SubscriberConfig *SubscriberConfiguration
ContentConfig *ContentConfiguration
brokerPartitionAssignmentChan chan *mq_pb.SubscriberToSubCoordinatorResponse
brokerPartitionAssignmentAckChan chan *mq_pb.SubscriberToSubCoordinatorRequest
OnEachMessageFunc OnEachMessageFunc
OnCompletionFunc OnCompletionFunc
bootstrapBrokers []string
waitForMoreMessage bool
activeProcessors map[topic.Partition]*ProcessorState
activeProcessorsLock sync.Mutex
}
func NewTopicSubscriber(bootstrapBrokers []string, subscriber *SubscriberConfiguration, content *ContentConfiguration) *TopicSubscriber {
return &TopicSubscriber{
SubscriberConfig: subscriber,
ContentConfig: content,
brokerPartitionAssignmentChan: make(chan *mq_pb.SubscriberToSubCoordinatorResponse, 1024),
bootstrapBrokers: bootstrapBrokers,
waitForMoreMessage: true,
activeProcessors: make(map[topic.Partition]*ProcessorState),
SubscriberConfig: subscriber,
ContentConfig: content,
brokerPartitionAssignmentChan: make(chan *mq_pb.SubscriberToSubCoordinatorResponse, 1024),
brokerPartitionAssignmentAckChan: make(chan *mq_pb.SubscriberToSubCoordinatorRequest, 1024),
bootstrapBrokers: bootstrapBrokers,
waitForMoreMessage: true,
activeProcessors: make(map[topic.Partition]*ProcessorState),
}
}

6
weed/mq/sub_coordinator/consumer_group.go

@ -1,6 +1,7 @@
package sub_coordinator
import (
"fmt"
cmap "github.com/orcaman/concurrent-map/v2"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
@ -16,6 +17,11 @@ type ConsumerGroupInstance struct {
ResponseChan chan *mq_pb.SubscriberToSubCoordinatorResponse
MaxPartitionCount int32
}
func (i ConsumerGroupInstance) AckUnAssignment(assignment *mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage) {
fmt.Printf("ack unassignment %v\n", assignment)
}
type ConsumerGroup struct {
topic topic.Topic
// map a consumer group instance id to a consumer group instance

4
weed/pb/mq.proto

@ -177,8 +177,12 @@ message SubscriberToSubCoordinatorRequest {
// Default is 10 seconds.
int32 rebalance_seconds = 5;
}
message AckUnAssignmentMessage {
Partition partition = 1;
}
oneof message {
InitMessage init = 1;
AckUnAssignmentMessage ack_un_assignment = 2;
}
}
message SubscriberToSubCoordinatorResponse {

1041
weed/pb/mq_pb/mq.pb.go
File diff suppressed because it is too large
View File

Loading…
Cancel
Save