Browse Source

coordinator receives unassignment ack

pull/5890/head
chrislu 8 months ago
parent
commit
d40b350f54
  1. 7
      weed/mq/broker/broker_grpc_sub_coordinator.go
  2. 9
      weed/mq/client/sub_client/connect_to_sub_coordinator.go
  3. 7
      weed/mq/client/sub_client/subscribe.go
  4. 2
      weed/mq/client/sub_client/subscriber.go
  5. 6
      weed/mq/sub_coordinator/consumer_group.go
  6. 4
      weed/pb/mq.proto
  7. 959
      weed/pb/mq_pb/mq.pb.go

7
weed/mq/broker/broker_grpc_sub_coordinator.go

@ -38,11 +38,16 @@ func (b *MessageQueueBroker) SubscriberToSubCoordinator(stream mq_pb.SeaweedMess
go func() { go func() {
// process ack messages // process ack messages
for { for {
_, err := stream.Recv()
req, err := stream.Recv()
if err != nil { if err != nil {
glog.V(0).Infof("subscriber %s/%s/%s receive: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err) glog.V(0).Infof("subscriber %s/%s/%s receive: %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, err)
} }
if ackUnAssignment := req.GetAckUnAssignment(); ackUnAssignment != nil {
glog.V(0).Infof("subscriber %s/%s/%s ack close of %v", initMessage.ConsumerGroup, initMessage.ConsumerGroupInstanceId, initMessage.Topic, ackUnAssignment)
cgi.AckUnAssignment(ackUnAssignment)
}
select { select {
case <-ctx.Done(): case <-ctx.Done():
err := ctx.Err() err := ctx.Err()

9
weed/mq/client/sub_client/connect_to_sub_coordinator.go

@ -56,6 +56,15 @@ func (sub *TopicSubscriber) doKeepConnectedToSubCoordinator() {
return err return err
} }
go func() {
for reply := range sub.brokerPartitionAssignmentAckChan {
if err := stream.Send(reply); err != nil {
glog.V(0).Infof("subscriber %s reply: %v", sub.ContentConfig.Topic, err)
return
}
}
}()
// keep receiving messages from the sub coordinator // keep receiving messages from the sub coordinator
for { for {
resp, err := stream.Recv() resp, err := stream.Recv()

7
weed/mq/client/sub_client/subscribe.go

@ -65,6 +65,13 @@ func (sub *TopicSubscriber) startProcessors() {
} else { } else {
glog.V(0).Infof("subscriber %s/%s partition %+v at %v completed", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker) glog.V(0).Infof("subscriber %s/%s partition %+v at %v completed", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker)
} }
sub.brokerPartitionAssignmentAckChan <- &mq_pb.SubscriberToSubCoordinatorRequest{
Message: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignment{
AckUnAssignment: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage{
Partition: assigned.Partition,
},
},
}
}(assigned.PartitionAssignment, topicPartition) }(assigned.PartitionAssignment, topicPartition)
} }
if unAssignment := message.GetUnAssignment(); unAssignment != nil { if unAssignment := message.GetUnAssignment(); unAssignment != nil {

2
weed/mq/client/sub_client/subscriber.go

@ -30,6 +30,7 @@ type TopicSubscriber struct {
SubscriberConfig *SubscriberConfiguration SubscriberConfig *SubscriberConfiguration
ContentConfig *ContentConfiguration ContentConfig *ContentConfiguration
brokerPartitionAssignmentChan chan *mq_pb.SubscriberToSubCoordinatorResponse brokerPartitionAssignmentChan chan *mq_pb.SubscriberToSubCoordinatorResponse
brokerPartitionAssignmentAckChan chan *mq_pb.SubscriberToSubCoordinatorRequest
OnEachMessageFunc OnEachMessageFunc OnEachMessageFunc OnEachMessageFunc
OnCompletionFunc OnCompletionFunc OnCompletionFunc OnCompletionFunc
bootstrapBrokers []string bootstrapBrokers []string
@ -43,6 +44,7 @@ func NewTopicSubscriber(bootstrapBrokers []string, subscriber *SubscriberConfigu
SubscriberConfig: subscriber, SubscriberConfig: subscriber,
ContentConfig: content, ContentConfig: content,
brokerPartitionAssignmentChan: make(chan *mq_pb.SubscriberToSubCoordinatorResponse, 1024), brokerPartitionAssignmentChan: make(chan *mq_pb.SubscriberToSubCoordinatorResponse, 1024),
brokerPartitionAssignmentAckChan: make(chan *mq_pb.SubscriberToSubCoordinatorRequest, 1024),
bootstrapBrokers: bootstrapBrokers, bootstrapBrokers: bootstrapBrokers,
waitForMoreMessage: true, waitForMoreMessage: true,
activeProcessors: make(map[topic.Partition]*ProcessorState), activeProcessors: make(map[topic.Partition]*ProcessorState),

6
weed/mq/sub_coordinator/consumer_group.go

@ -1,6 +1,7 @@
package sub_coordinator package sub_coordinator
import ( import (
"fmt"
cmap "github.com/orcaman/concurrent-map/v2" cmap "github.com/orcaman/concurrent-map/v2"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer" "github.com/seaweedfs/seaweedfs/weed/mq/pub_balancer"
@ -16,6 +17,11 @@ type ConsumerGroupInstance struct {
ResponseChan chan *mq_pb.SubscriberToSubCoordinatorResponse ResponseChan chan *mq_pb.SubscriberToSubCoordinatorResponse
MaxPartitionCount int32 MaxPartitionCount int32
} }
func (i ConsumerGroupInstance) AckUnAssignment(assignment *mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage) {
fmt.Printf("ack unassignment %v\n", assignment)
}
type ConsumerGroup struct { type ConsumerGroup struct {
topic topic.Topic topic topic.Topic
// map a consumer group instance id to a consumer group instance // map a consumer group instance id to a consumer group instance

4
weed/pb/mq.proto

@ -177,8 +177,12 @@ message SubscriberToSubCoordinatorRequest {
// Default is 10 seconds. // Default is 10 seconds.
int32 rebalance_seconds = 5; int32 rebalance_seconds = 5;
} }
message AckUnAssignmentMessage {
Partition partition = 1;
}
oneof message { oneof message {
InitMessage init = 1; InitMessage init = 1;
AckUnAssignmentMessage ack_un_assignment = 2;
} }
} }
message SubscriberToSubCoordinatorResponse { message SubscriberToSubCoordinatorResponse {

959
weed/pb/mq_pb/mq.pb.go
File diff suppressed because it is too large
View File

Loading…
Cancel
Save