Browse Source
Merge branch 'refs/heads/master' into fix_rclone_tests
Merge branch 'refs/heads/master' into fix_rclone_tests
# Conflicts: # .github/workflows/s3tests.yml # Makefilepull/5443/head
259 changed files with 11334 additions and 5773 deletions
-
8.github/workflows/binaries_dev.yml
-
4.github/workflows/binaries_release0.yml
-
4.github/workflows/binaries_release1.yml
-
4.github/workflows/binaries_release2.yml
-
4.github/workflows/binaries_release3.yml
-
4.github/workflows/binaries_release4.yml
-
10.github/workflows/container_dev.yml
-
10.github/workflows/container_latest.yml
-
8.github/workflows/container_release1.yml
-
8.github/workflows/container_release2.yml
-
8.github/workflows/container_release3.yml
-
10.github/workflows/container_release4.yml
-
10.github/workflows/container_release5.yml
-
2.github/workflows/depsreview.yml
-
2.github/workflows/helm_ci.yml
-
127.github/workflows/s3tests.yml
-
7.gitignore
-
2Makefile
-
1README.md
-
4docker/Dockerfile.local
-
2docker/Dockerfile.s3tests
-
6docker/Makefile
-
7docker/compose/local-brokers-compose.yml
-
184go.mod
-
1354go.sum
-
5k8s/charts/seaweedfs/Chart.yaml
-
4k8s/charts/seaweedfs/README.md
-
3214k8s/charts/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json
-
45k8s/charts/seaweedfs/templates/_helpers.tpl
-
35k8s/charts/seaweedfs/templates/cluster-role.yaml
-
4k8s/charts/seaweedfs/templates/filer-ingress.yaml
-
3k8s/charts/seaweedfs/templates/filer-servicemonitor.yaml
-
34k8s/charts/seaweedfs/templates/filer-statefulset.yaml
-
4k8s/charts/seaweedfs/templates/master-ingress.yaml
-
3k8s/charts/seaweedfs/templates/master-servicemonitor.yaml
-
28k8s/charts/seaweedfs/templates/master-statefulset.yaml
-
6k8s/charts/seaweedfs/templates/post-install-bucket-hook.yaml
-
24k8s/charts/seaweedfs/templates/s3-deployment.yaml
-
3k8s/charts/seaweedfs/templates/s3-servicemonitor.yaml
-
37k8s/charts/seaweedfs/templates/service-account.yaml
-
3k8s/charts/seaweedfs/templates/volume-servicemonitor.yaml
-
28k8s/charts/seaweedfs/templates/volume-statefulset.yaml
-
162k8s/charts/seaweedfs/values.yaml
-
BINnote/keepsec.png
-
BINnote/piknik.png
-
13other/java/client/src/main/proto/filer.proto
-
3unmaintained/repeated_vacuum/repeated_vacuum.go
-
3unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go
-
3unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go
-
3unmaintained/volume_tailer/volume_tailer.go
-
3weed/cluster/lock_manager/distributed_lock_manager.go
-
11weed/cluster/lock_manager/lock_ring.go
-
3weed/command/backup.go
-
8weed/command/benchmark.go
-
3weed/command/download.go
-
2weed/command/filer_backup.go
-
2weed/command/filer_copy.go
-
6weed/command/filer_meta_backup.go
-
2weed/command/filer_remote_gateway_buckets.go
-
2weed/command/filer_remote_sync_dir.go
-
48weed/command/fix.go
-
9weed/command/master.go
-
2weed/command/master_follower.go
-
7weed/command/scaffold/filer.toml
-
2weed/command/server.go
-
4weed/command/upload.go
-
6weed/filer/abstract_sql/abstract_sql_store.go
-
6weed/filer/filechunk_manifest.go
-
38weed/filer/filer.go
-
25weed/filer/filer_conf.go
-
6weed/filer/filer_delete_entry.go
-
59weed/filer/filer_deletion.go
-
2weed/filer/filer_notify.go
-
12weed/filer/filerstore_wrapper.go
-
2weed/filer/leveldb2/leveldb2_store.go
-
3weed/filer/leveldb3/leveldb3_store.go
-
2weed/filer/meta_aggregator.go
-
38weed/filer/meta_replay.go
-
72weed/filer/mongodb/mongodb_store.go
-
12weed/filer/remote_storage.go
-
17weed/filer/stream.go
-
2weed/filer/ydb/ydb_store.go
-
2weed/ftpd/ftp_server.go
-
5weed/iamapi/iamapi_management_handlers.go
-
2weed/iamapi/iamapi_server.go
-
2weed/iamapi/iamapi_test.go
-
2weed/images/cropping.go
-
2weed/images/resizing.go
-
6weed/mount/inode_to_path.go
-
6weed/mount/page_writer/page_chunk_mem.go
-
6weed/mount/page_writer/page_chunk_swapfile.go
-
23weed/mount/weedfs.go
-
6weed/mount/weedfs_file_sync.go
-
9weed/mq/broker/broker_grpc_assign.go
-
9weed/mq/broker/broker_grpc_configure.go
-
128weed/mq/broker/broker_grpc_pub.go
-
188weed/mq/broker/broker_grpc_pub_follow.go
-
212weed/mq/broker/broker_grpc_sub.go
-
9weed/mq/broker/broker_grpc_sub_coordinator.go
-
5weed/mq/broker/broker_server.go
1354
go.sum
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -1,5 +1,6 @@ |
|||||
apiVersion: v1 |
apiVersion: v1 |
||||
description: SeaweedFS |
description: SeaweedFS |
||||
name: seaweedfs |
name: seaweedfs |
||||
appVersion: "3.64" |
|
||||
version: 3.64.0 |
|
||||
|
appVersion: "3.69" |
||||
|
# Dev note: Trigger a helm chart release by `git tag -a helm-<version>` |
||||
|
version: 4.0.0 |
3214
k8s/charts/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,35 @@ |
|||||
|
{{- if .Values.global.createClusterRole }} |
||||
|
#hack for delete pod master after migration |
||||
|
--- |
||||
|
kind: ClusterRole |
||||
|
apiVersion: rbac.authorization.k8s.io/v1 |
||||
|
metadata: |
||||
|
name: {{ .Values.global.serviceAccountName }}-rw-cr |
||||
|
labels: |
||||
|
app.kubernetes.io/name: {{ template "seaweedfs.name" . }} |
||||
|
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} |
||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }} |
||||
|
app.kubernetes.io/instance: {{ .Release.Name }} |
||||
|
rules: |
||||
|
- apiGroups: [""] |
||||
|
resources: ["pods"] |
||||
|
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] |
||||
|
--- |
||||
|
kind: ClusterRoleBinding |
||||
|
apiVersion: rbac.authorization.k8s.io/v1 |
||||
|
metadata: |
||||
|
name: system:serviceaccount:{{ .Values.global.serviceAccountName }}:default |
||||
|
labels: |
||||
|
app.kubernetes.io/name: {{ template "seaweedfs.name" . }} |
||||
|
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} |
||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }} |
||||
|
app.kubernetes.io/instance: {{ .Release.Name }} |
||||
|
subjects: |
||||
|
- kind: ServiceAccount |
||||
|
name: {{ .Values.global.serviceAccountName }} |
||||
|
namespace: {{ .Release.Namespace }} |
||||
|
roleRef: |
||||
|
apiGroup: rbac.authorization.k8s.io |
||||
|
kind: ClusterRole |
||||
|
name: {{ .Values.global.serviceAccountName }}-rw-cr |
||||
|
{{- end }} |
After Width: 100 | Height: 36 | Size: 7.8 KiB |
Before Width: 165 | Height: 50 | Size: 7.2 KiB After Width: 156 | Height: 35 | Size: 7.4 KiB |
@ -1,96 +1,150 @@ |
|||||
package broker |
package broker |
||||
|
|
||||
import ( |
import ( |
||||
"context" |
|
||||
"fmt" |
"fmt" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/filer" |
||||
"github.com/seaweedfs/seaweedfs/weed/glog" |
"github.com/seaweedfs/seaweedfs/weed/glog" |
||||
"github.com/seaweedfs/seaweedfs/weed/pb" |
|
||||
|
"github.com/seaweedfs/seaweedfs/weed/mq/topic" |
||||
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" |
"github.com/seaweedfs/seaweedfs/weed/pb/mq_pb" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/util/buffered_queue" |
||||
|
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer" |
||||
"io" |
"io" |
||||
"math/rand" |
|
||||
"sync" |
|
||||
"time" |
"time" |
||||
) |
) |
||||
|
|
||||
func (b *MessageQueueBroker) PublishFollowMe(c context.Context, request *mq_pb.PublishFollowMeRequest) (*mq_pb.PublishFollowMeResponse, error) { |
|
||||
glog.V(0).Infof("PublishFollowMe %v", request) |
|
||||
var wg sync.WaitGroup |
|
||||
wg.Add(1) |
|
||||
var ret error |
|
||||
go b.withBrokerClient(true, pb.ServerAddress(request.BrokerSelf), func(client mq_pb.SeaweedMessagingClient) error { |
|
||||
followerId := rand.Int31() |
|
||||
subscribeClient, err := client.FollowInMemoryMessages(context.Background(), &mq_pb.FollowInMemoryMessagesRequest{ |
|
||||
Message: &mq_pb.FollowInMemoryMessagesRequest_Init{ |
|
||||
Init: &mq_pb.FollowInMemoryMessagesRequest_InitMessage{ |
|
||||
ConsumerGroup: string(b.option.BrokerAddress()), |
|
||||
ConsumerId: fmt.Sprintf("followMe-%d", followerId), |
|
||||
FollowerId: followerId, |
|
||||
Topic: request.Topic, |
|
||||
PartitionOffset: &mq_pb.PartitionOffset{ |
|
||||
Partition: request.Partition, |
|
||||
StartTsNs: 0, |
|
||||
StartType: mq_pb.PartitionOffsetStartType_EARLIEST_IN_MEMORY, |
|
||||
}, |
|
||||
}, |
|
||||
}, |
|
||||
}) |
|
||||
|
type memBuffer struct { |
||||
|
buf []byte |
||||
|
startTime time.Time |
||||
|
stopTime time.Time |
||||
|
} |
||||
|
|
||||
|
func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_PublishFollowMeServer) (err error) { |
||||
|
var req *mq_pb.PublishFollowMeRequest |
||||
|
req, err = stream.Recv() |
||||
if err != nil { |
if err != nil { |
||||
glog.Errorf("FollowInMemoryMessages error: %v", err) |
|
||||
ret = err |
|
||||
return err |
return err |
||||
} |
} |
||||
|
initMessage := req.GetInit() |
||||
|
if initMessage == nil { |
||||
|
return fmt.Errorf("missing init message") |
||||
|
} |
||||
|
|
||||
|
// create an in-memory queue of buffered messages
|
||||
|
inMemoryBuffers := buffered_queue.NewBufferedQueue[memBuffer](4) |
||||
|
logBuffer := b.buildFollowerLogBuffer(inMemoryBuffers) |
||||
|
|
||||
|
lastFlushTsNs := time.Now().UnixNano() |
||||
|
|
||||
// receive first hello message
|
|
||||
resp, err := subscribeClient.Recv() |
|
||||
|
// follow each published messages
|
||||
|
for { |
||||
|
// receive a message
|
||||
|
req, err = stream.Recv() |
||||
if err != nil { |
if err != nil { |
||||
return fmt.Errorf("FollowInMemoryMessages recv first message error: %v", err) |
|
||||
|
if err == io.EOF { |
||||
|
err = nil |
||||
|
break |
||||
} |
} |
||||
if resp == nil { |
|
||||
glog.V(0).Infof("doFollowInMemoryMessage recv first message nil response") |
|
||||
return io.ErrUnexpectedEOF |
|
||||
|
glog.V(0).Infof("topic %v partition %v publish stream error: %v", initMessage.Topic, initMessage.Partition, err) |
||||
|
break |
||||
} |
} |
||||
wg.Done() |
|
||||
|
|
||||
b.doFollowInMemoryMessage(context.Background(), subscribeClient) |
|
||||
|
// Process the received message
|
||||
|
if dataMessage := req.GetData(); dataMessage != nil { |
||||
|
|
||||
return nil |
|
||||
}) |
|
||||
wg.Wait() |
|
||||
return &mq_pb.PublishFollowMeResponse{}, ret |
|
||||
} |
|
||||
|
// TODO: change this to DataMessage
|
||||
|
// log the message
|
||||
|
logBuffer.AddToBuffer(dataMessage) |
||||
|
|
||||
func (b *MessageQueueBroker) doFollowInMemoryMessage(c context.Context, client mq_pb.SeaweedMessaging_FollowInMemoryMessagesClient) { |
|
||||
for { |
|
||||
resp, err := client.Recv() |
|
||||
if err != nil { |
|
||||
if err != io.EOF { |
|
||||
glog.V(0).Infof("doFollowInMemoryMessage error: %v", err) |
|
||||
|
// send back the ack
|
||||
|
if err := stream.Send(&mq_pb.PublishFollowMeResponse{ |
||||
|
AckTsNs: dataMessage.TsNs, |
||||
|
}); err != nil { |
||||
|
glog.Errorf("Error sending response %v: %v", dataMessage, err) |
||||
} |
} |
||||
return |
|
||||
|
// println("ack", string(dataMessage.Key), dataMessage.TsNs)
|
||||
|
} else if closeMessage := req.GetClose(); closeMessage != nil { |
||||
|
glog.V(0).Infof("topic %v partition %v publish stream closed: %v", initMessage.Topic, initMessage.Partition, closeMessage) |
||||
|
break |
||||
|
} else if flushMessage := req.GetFlush(); flushMessage != nil { |
||||
|
glog.V(0).Infof("topic %v partition %v publish stream flushed: %v", initMessage.Topic, initMessage.Partition, flushMessage) |
||||
|
|
||||
|
lastFlushTsNs = flushMessage.TsNs |
||||
|
|
||||
|
// drop already flushed messages
|
||||
|
for mem, found := inMemoryBuffers.PeekHead(); found; mem, found = inMemoryBuffers.PeekHead() { |
||||
|
if mem.stopTime.UnixNano() <= flushMessage.TsNs { |
||||
|
inMemoryBuffers.Dequeue() |
||||
|
// println("dropping flushed messages: ", mem.startTime.UnixNano(), mem.stopTime.UnixNano(), len(mem.buf))
|
||||
|
} else { |
||||
|
break |
||||
} |
} |
||||
if resp == nil { |
|
||||
glog.V(0).Infof("doFollowInMemoryMessage nil response") |
|
||||
return |
|
||||
} |
} |
||||
if resp.Message != nil { |
|
||||
// process ctrl message or data message
|
|
||||
switch m := resp.Message.(type) { |
|
||||
case *mq_pb.FollowInMemoryMessagesResponse_Data: |
|
||||
// process data message
|
|
||||
print("d") |
|
||||
case *mq_pb.FollowInMemoryMessagesResponse_Ctrl: |
|
||||
// process ctrl message
|
|
||||
if m.Ctrl.FlushedSequence > 0 { |
|
||||
flushTime := time.Unix(0, m.Ctrl.FlushedSequence) |
|
||||
glog.V(0).Infof("doFollowInMemoryMessage flushTime: %v", flushTime) |
|
||||
} |
|
||||
if m.Ctrl.FollowerChangedToId != 0 { |
|
||||
// follower changed
|
|
||||
glog.V(0).Infof("doFollowInMemoryMessage follower changed to %d", m.Ctrl.FollowerChangedToId) |
|
||||
return |
|
||||
|
|
||||
|
} else { |
||||
|
glog.Errorf("unknown message: %v", req) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition) |
||||
|
|
||||
|
logBuffer.ShutdownLogBuffer() |
||||
|
// wait until all messages are sent to inMemoryBuffers
|
||||
|
for !logBuffer.IsAllFlushed() { |
||||
|
time.Sleep(113 * time.Millisecond) |
||||
} |
} |
||||
|
|
||||
|
topicDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, t.Namespace, t.Name) |
||||
|
partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(topic.TIME_FORMAT) |
||||
|
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, p.RangeStart, p.RangeStop) |
||||
|
|
||||
|
// flush the remaining messages
|
||||
|
inMemoryBuffers.CloseInput() |
||||
|
for mem, found := inMemoryBuffers.Dequeue(); found; mem, found = inMemoryBuffers.Dequeue() { |
||||
|
if len(mem.buf) == 0 { |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
startTime, stopTime := mem.startTime.UTC(), mem.stopTime.UTC() |
||||
|
|
||||
|
if stopTime.UnixNano() <= lastFlushTsNs { |
||||
|
glog.V(0).Infof("dropping remaining data at %v %v", t, p) |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
// TODO trim data earlier than lastFlushTsNs
|
||||
|
|
||||
|
targetFile := fmt.Sprintf("%s/%s", partitionDir, startTime.Format(topic.TIME_FORMAT)) |
||||
|
|
||||
|
for { |
||||
|
if err := b.appendToFile(targetFile, mem.buf); err != nil { |
||||
|
glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err) |
||||
|
time.Sleep(737 * time.Millisecond) |
||||
|
} else { |
||||
|
break |
||||
} |
} |
||||
} |
} |
||||
|
|
||||
|
glog.V(0).Infof("flushed remaining data at %v to %s size %d", mem.stopTime.UnixNano(), targetFile, len(mem.buf)) |
||||
|
} |
||||
|
|
||||
|
glog.V(0).Infof("shut down follower for %v %v", t, p) |
||||
|
|
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (b *MessageQueueBroker) buildFollowerLogBuffer(inMemoryBuffers *buffered_queue.BufferedQueue[memBuffer]) *log_buffer.LogBuffer { |
||||
|
lb := log_buffer.NewLogBuffer("follower", |
||||
|
2*time.Minute, func(logBuffer *log_buffer.LogBuffer, startTime, stopTime time.Time, buf []byte) { |
||||
|
if len(buf) == 0 { |
||||
|
return |
||||
} |
} |
||||
|
inMemoryBuffers.Enqueue(memBuffer{ |
||||
|
buf: buf, |
||||
|
startTime: startTime, |
||||
|
stopTime: stopTime, |
||||
|
}) |
||||
|
glog.V(0).Infof("queue up %d~%d size %d", startTime.UnixNano(), stopTime.UnixNano(), len(buf)) |
||||
|
}, nil, func() { |
||||
|
}) |
||||
|
return lb |
||||
} |
} |
Some files were not shown because too many files changed in this diff
Write
Preview
Loading…
Cancel
Save
Reference in new issue