You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

97 lines
3.0 KiB

1 year ago
1 year ago
  1. package sub_client
  2. import (
  3. "github.com/seaweedfs/seaweedfs/weed/glog"
  4. "github.com/seaweedfs/seaweedfs/weed/mq/topic"
  5. "github.com/seaweedfs/seaweedfs/weed/pb/mq_pb"
  6. "sync"
  7. "time"
  8. )
  9. type ProcessorState struct {
  10. }
  11. // Subscribe subscribes to a topic's specified partitions.
  12. // If a partition is moved to another broker, the subscriber will automatically reconnect to the new broker.
  13. func (sub *TopicSubscriber) Subscribe() error {
  14. go sub.startProcessors()
  15. // loop forever
  16. sub.doKeepConnectedToSubCoordinator()
  17. return nil
  18. }
  19. func (sub *TopicSubscriber) startProcessors() {
  20. // listen to the messages from the sub coordinator
  21. // start one processor per partition
  22. var wg sync.WaitGroup
  23. semaphore := make(chan struct{}, sub.SubscriberConfig.MaxPartitionCount)
  24. for message := range sub.brokerPartitionAssignmentChan {
  25. if assigned := message.GetAssignment(); assigned != nil {
  26. wg.Add(1)
  27. semaphore <- struct{}{}
  28. topicPartition := topic.FromPbPartition(assigned.PartitionAssignment.Partition)
  29. // wait until no covering partition is still in progress
  30. sub.waitUntilNoOverlappingPartitionInFlight(topicPartition)
  31. // start a processors
  32. sub.activeProcessorsLock.Lock()
  33. sub.activeProcessors[topicPartition] = &ProcessorState{}
  34. sub.activeProcessorsLock.Unlock()
  35. go func(assigned *mq_pb.BrokerPartitionAssignment, topicPartition topic.Partition) {
  36. defer func() {
  37. sub.activeProcessorsLock.Lock()
  38. delete(sub.activeProcessors, topicPartition)
  39. sub.activeProcessorsLock.Unlock()
  40. <-semaphore
  41. wg.Done()
  42. }()
  43. glog.V(0).Infof("subscriber %s/%s assigned partition %+v at %v", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker)
  44. err := sub.onEachPartition(assigned)
  45. if err != nil {
  46. glog.V(0).Infof("subscriber %s/%s partition %+v at %v: %v", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker, err)
  47. } else {
  48. glog.V(0).Infof("subscriber %s/%s partition %+v at %v completed", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup, assigned.Partition, assigned.LeaderBroker)
  49. }
  50. }(assigned.PartitionAssignment, topicPartition)
  51. }
  52. if unAssignment := message.GetUnAssignment(); unAssignment != nil {
  53. }
  54. }
  55. wg.Wait()
  56. }
  57. func (sub *TopicSubscriber) waitUntilNoOverlappingPartitionInFlight(topicPartition topic.Partition) {
  58. foundOverlapping := true
  59. for foundOverlapping {
  60. sub.activeProcessorsLock.Lock()
  61. foundOverlapping = false
  62. var overlappedPartition topic.Partition
  63. for partition, _ := range sub.activeProcessors {
  64. if partition.Overlaps(topicPartition) {
  65. if partition.Equals(topicPartition) {
  66. continue
  67. }
  68. foundOverlapping = true
  69. overlappedPartition = partition
  70. break
  71. }
  72. }
  73. sub.activeProcessorsLock.Unlock()
  74. if foundOverlapping {
  75. glog.V(0).Infof("subscriber %s new partition %v waiting for partition %+v to complete", sub.ContentConfig.Topic, topicPartition, overlappedPartition)
  76. time.Sleep(1 * time.Second)
  77. }
  78. }
  79. }