You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

137 lines
4.5 KiB

1 year ago
1 year ago
  1. package sub_client
  2. import (
  3. "github.com/seaweedfs/seaweedfs/weed/glog"
  4. "github.com/seaweedfs/seaweedfs/weed/mq/topic"
  5. "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
  6. "github.com/seaweedfs/seaweedfs/weed/util"
  7. "sync"
  8. "time"
  9. )
  10. type ProcessorState struct {
  11. stopCh chan struct{}
  12. }
  13. // Subscribe subscribes to a topic's specified partitions.
  14. // If a partition is moved to another broker, the subscriber will automatically reconnect to the new broker.
  15. func (sub *TopicSubscriber) Subscribe() error {
  16. go sub.startProcessors()
  17. // loop forever
  18. // TODO shutdown the subscriber when not needed anymore
  19. sub.doKeepConnectedToSubCoordinator()
  20. return nil
  21. }
  22. func (sub *TopicSubscriber) startProcessors() {
  23. // listen to the messages from the sub coordinator
  24. // start one processor per partition
  25. var wg sync.WaitGroup
  26. semaphore := make(chan struct{}, sub.SubscriberConfig.MaxPartitionCount)
  27. for message := range sub.brokerPartitionAssignmentChan {
  28. if assigned := message.GetAssignment(); assigned != nil {
  29. wg.Add(1)
  30. semaphore <- struct{}{}
  31. topicPartition := topic.FromPbPartition(assigned.PartitionAssignment.Partition)
  32. // wait until no covering partition is still in progress
  33. sub.waitUntilNoOverlappingPartitionInFlight(topicPartition)
  34. // start a processors
  35. stopChan := make(chan struct{})
  36. sub.activeProcessorsLock.Lock()
  37. sub.activeProcessors[topicPartition] = &ProcessorState{
  38. stopCh: stopChan,
  39. }
  40. sub.activeProcessorsLock.Unlock()
  41. go func(assigned *mq_pb.BrokerPartitionAssignment, topicPartition topic.Partition) {
  42. defer func() {
  43. sub.activeProcessorsLock.Lock()
  44. delete(sub.activeProcessors, topicPartition)
  45. sub.activeProcessorsLock.Unlock()
  46. <-semaphore
  47. wg.Done()
  48. }()
  49. glog.V(0).Infof("subscriber %s/%s assigned partition %+v at %v", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker)
  50. sub.brokerPartitionAssignmentAckChan <- &mq_pb.SubscriberToSubCoordinatorRequest{
  51. Message: &mq_pb.SubscriberToSubCoordinatorRequest_AckAssignment{
  52. AckAssignment: &mq_pb.SubscriberToSubCoordinatorRequest_AckAssignmentMessage{
  53. Partition: assigned.Partition,
  54. },
  55. },
  56. }
  57. executors := util.NewLimitedConcurrentExecutor(int(sub.SubscriberConfig.SlidingWindowSize))
  58. onDataMessageFn := func(m *mq_pb.SubscribeMessageResponse_Data) {
  59. executors.Execute(func() {
  60. processErr := sub.OnEachMessageFunc(m.Data.Key, m.Data.Value)
  61. if processErr == nil {
  62. sub.PartitionOffsetChan <- KeyedOffset{
  63. Key: m.Data.Key,
  64. Offset: m.Data.TsNs,
  65. }
  66. }
  67. })
  68. }
  69. err := sub.onEachPartition(assigned, stopChan, onDataMessageFn)
  70. if err != nil {
  71. glog.V(0).Infof("subscriber %s/%s partition %+v at %v: %v", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker, err)
  72. } else {
  73. glog.V(0).Infof("subscriber %s/%s partition %+v at %v completed", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker)
  74. }
  75. sub.brokerPartitionAssignmentAckChan <- &mq_pb.SubscriberToSubCoordinatorRequest{
  76. Message: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignment{
  77. AckUnAssignment: &mq_pb.SubscriberToSubCoordinatorRequest_AckUnAssignmentMessage{
  78. Partition: assigned.Partition,
  79. },
  80. },
  81. }
  82. }(assigned.PartitionAssignment, topicPartition)
  83. }
  84. if unAssignment := message.GetUnAssignment(); unAssignment != nil {
  85. topicPartition := topic.FromPbPartition(unAssignment.Partition)
  86. sub.activeProcessorsLock.Lock()
  87. if processor, found := sub.activeProcessors[topicPartition]; found {
  88. close(processor.stopCh)
  89. delete(sub.activeProcessors, topicPartition)
  90. }
  91. sub.activeProcessorsLock.Unlock()
  92. }
  93. }
  94. wg.Wait()
  95. }
  96. func (sub *TopicSubscriber) waitUntilNoOverlappingPartitionInFlight(topicPartition topic.Partition) {
  97. foundOverlapping := true
  98. for foundOverlapping {
  99. sub.activeProcessorsLock.Lock()
  100. foundOverlapping = false
  101. var overlappedPartition topic.Partition
  102. for partition, _ := range sub.activeProcessors {
  103. if partition.Overlaps(topicPartition) {
  104. if partition.Equals(topicPartition) {
  105. continue
  106. }
  107. foundOverlapping = true
  108. overlappedPartition = partition
  109. break
  110. }
  111. }
  112. sub.activeProcessorsLock.Unlock()
  113. if foundOverlapping {
  114. glog.V(0).Infof("subscriber %s new partition %v waiting for partition %+v to complete", sub.ContentConfig.Topic, topicPartition, overlappedPartition)
  115. time.Sleep(1 * time.Second)
  116. }
  117. }
  118. }