Browse Source
Merge remote-tracking branch 'origin/master' into filer_mongodb
Merge remote-tracking branch 'origin/master' into filer_mongodb
# Conflicts: # go.mod # go.sum # weed/server/filer_server.gopull/1280/head
bukton
5 years ago
101 changed files with 3018 additions and 1122 deletions
-
6README.md
-
12go.mod
-
17go.sum
-
2k8s/seaweedfs/Chart.yaml
-
2k8s/seaweedfs/values.yaml
-
19other/java/client/src/main/proto/filer.proto
-
75unmaintained/see_log_entry/see_log_entry.go
-
1weed/command/filer_replication.go
-
3weed/command/master.go
-
12weed/command/mount.go
-
6weed/command/mount_std.go
-
28weed/command/msg_broker.go
-
17weed/command/scaffold.go
-
15weed/command/shell.go
-
3weed/command/volume.go
-
2weed/command/watch.go
-
7weed/command/webdav.go
-
1weed/filer2/entry.go
-
6weed/filer2/entry_codec.go
-
37weed/filer2/filechunks.go
-
14weed/filer2/filer.go
-
19weed/filer2/filer_buckets.go
-
24weed/filer2/filer_notify.go
-
47weed/filer2/filer_notify_append.go
-
4weed/filer2/leveldb/leveldb_store_test.go
-
4weed/filer2/leveldb2/leveldb2_store_test.go
-
14weed/filer2/reader_at.go
-
42weed/filer2/redis2/redis_cluster_store.go
-
36weed/filer2/redis2/redis_store.go
-
162weed/filer2/redis2/universal_redis_store.go
-
4weed/filer2/stream.go
-
6weed/filer2/topics.go
-
4weed/filesys/dir.go
-
18weed/filesys/dirty_page.go
-
30weed/filesys/filehandle.go
-
32weed/filesys/fscache.go
-
72weed/filesys/wfs.go
-
2weed/images/orientation.go
-
113weed/messaging/broker/broker_append.go
-
15weed/messaging/broker/broker_grpc_server.go
-
99weed/messaging/broker/broker_grpc_server_publish.go
-
88weed/messaging/broker/broker_grpc_server_subscribe.go
-
11weed/messaging/broker/broker_server.go
-
80weed/messaging/broker/topic_lock.go
-
12weed/operation/upload_content.go
-
2weed/pb/Makefile
-
19weed/pb/filer.proto
-
448weed/pb/filer_pb/filer.pb.go
-
110weed/pb/messaging.proto
-
824weed/pb/messaging_pb/messaging.pb.go
-
66weed/pb/queue.proto
-
516weed/pb/queue_pb/queue.pb.go
-
5weed/pb/shared_values.go
-
2weed/replication/sink/azuresink/azure_sink.go
-
2weed/replication/sink/b2sink/b2_sink.go
-
4weed/replication/sink/filersink/filer_sink.go
-
2weed/replication/sink/gcssink/gcs_sink.go
-
9weed/replication/sink/s3sink/s3_sink.go
-
4weed/replication/sub/notification_aws_sqs.go
-
4weed/s3api/filer_multipart.go
-
2weed/s3api/s3api_objects_list_handlers.go
-
35weed/server/filer_grpc_server.go
-
8weed/server/filer_grpc_server_listen.go
-
28weed/server/filer_grpc_server_rename.go
-
12weed/server/filer_server.go
-
21weed/server/filer_server_handlers_read.go
-
42weed/server/filer_server_handlers_write.go
-
16weed/server/filer_server_handlers_write_autochunk.go
-
4weed/server/filer_server_handlers_write_cipher.go
-
14weed/server/master_grpc_server.go
-
11weed/server/master_server.go
-
23weed/server/msg_broker_grpc_server.go
-
2weed/server/volume_grpc_tail.go
-
8weed/server/volume_server_handlers_write.go
-
10weed/server/volume_server_ui/templates.go
-
13weed/server/webdav_server.go
-
2weed/shell/commands.go
-
4weed/storage/backend/memory_map/memory_map_backend.go
-
7weed/storage/backend/volume_create.go
-
7weed/storage/backend/volume_create_linux.go
-
7weed/storage/backend/volume_create_windows.go
-
6weed/storage/store.go
-
2weed/storage/volume_loading.go
-
7weed/storage/volume_read_write.go
-
4weed/storage/volume_vacuum.go
-
2weed/storage/volume_vacuum_test.go
-
10weed/topology/store_replicate.go
-
7weed/util/bytes.go
-
113weed/util/chunk_cache/chunk_cache.go
-
12weed/util/chunk_cache/chunk_cache_in_memory.go
-
145weed/util/chunk_cache/chunk_cache_on_disk.go
-
59weed/util/chunk_cache/chunk_cache_on_disk_test.go
-
89weed/util/chunk_cache/on_disk_cache_layer.go
-
3weed/util/config.go
-
2weed/util/constants.go
-
2weed/util/http_util.go
-
54weed/util/log_buffer/log_buffer.go
-
40weed/util/log_buffer/sealed_buffer.go
-
25weed/util/network.go
-
16weed/util/parse.go
@ -1,4 +1,4 @@ |
|||
apiVersion: v1 |
|||
description: SeaweedFS |
|||
name: seaweedfs |
|||
version: 1.71 |
|||
version: 1.74 |
@ -0,0 +1,75 @@ |
|||
package main |
|||
|
|||
import ( |
|||
"flag" |
|||
"fmt" |
|||
"io" |
|||
"log" |
|||
"os" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer2" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
var ( |
|||
logdataFile = flag.String("logdata", "", "log data file saved under "+ filer2.SystemLogDir) |
|||
) |
|||
|
|||
func main() { |
|||
flag.Parse() |
|||
|
|||
dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644) |
|||
if err != nil { |
|||
log.Fatalf("failed to open %s: %v", *logdataFile, err) |
|||
} |
|||
defer dst.Close() |
|||
|
|||
err = walkLogEntryFile(dst) |
|||
if err != nil { |
|||
log.Fatalf("failed to visit %s: %v", *logdataFile, err) |
|||
} |
|||
|
|||
} |
|||
|
|||
func walkLogEntryFile(dst *os.File) error { |
|||
|
|||
sizeBuf := make([]byte, 4) |
|||
|
|||
for { |
|||
if n, err := dst.Read(sizeBuf); n != 4 { |
|||
if err == io.EOF { |
|||
return nil |
|||
} |
|||
return err |
|||
} |
|||
|
|||
size := util.BytesToUint32(sizeBuf) |
|||
|
|||
data := make([]byte, int(size)) |
|||
|
|||
if n, err := dst.Read(data); n != len(data) { |
|||
return err |
|||
} |
|||
|
|||
logEntry := &filer_pb.LogEntry{} |
|||
err := proto.Unmarshal(data, logEntry) |
|||
if err != nil { |
|||
log.Printf("unexpected unmarshal filer_pb.LogEntry: %v", err) |
|||
return nil |
|||
} |
|||
|
|||
event := &filer_pb.SubscribeMetadataResponse{} |
|||
err = proto.Unmarshal(logEntry.Data, event) |
|||
if err != nil { |
|||
log.Printf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) |
|||
return nil |
|||
} |
|||
|
|||
fmt.Printf("event: %+v\n", event) |
|||
|
|||
} |
|||
|
|||
} |
@ -0,0 +1,42 @@ |
|||
package redis2 |
|||
|
|||
import ( |
|||
"github.com/chrislusf/seaweedfs/weed/filer2" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
"github.com/go-redis/redis" |
|||
) |
|||
|
|||
func init() { |
|||
filer2.Stores = append(filer2.Stores, &RedisCluster2Store{}) |
|||
} |
|||
|
|||
type RedisCluster2Store struct { |
|||
UniversalRedis2Store |
|||
} |
|||
|
|||
func (store *RedisCluster2Store) GetName() string { |
|||
return "redis_cluster2" |
|||
} |
|||
|
|||
func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) { |
|||
|
|||
configuration.SetDefault(prefix+"useReadOnly", true) |
|||
configuration.SetDefault(prefix+"routeByLatency", true) |
|||
|
|||
return store.initialize( |
|||
configuration.GetStringSlice(prefix+"addresses"), |
|||
configuration.GetString(prefix+"password"), |
|||
configuration.GetBool(prefix+"useReadOnly"), |
|||
configuration.GetBool(prefix+"routeByLatency"), |
|||
) |
|||
} |
|||
|
|||
func (store *RedisCluster2Store) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) { |
|||
store.Client = redis.NewClusterClient(&redis.ClusterOptions{ |
|||
Addrs: addresses, |
|||
Password: password, |
|||
ReadOnly: readOnly, |
|||
RouteByLatency: routeByLatency, |
|||
}) |
|||
return |
|||
} |
@ -0,0 +1,36 @@ |
|||
package redis2 |
|||
|
|||
import ( |
|||
"github.com/chrislusf/seaweedfs/weed/filer2" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
"github.com/go-redis/redis" |
|||
) |
|||
|
|||
func init() { |
|||
filer2.Stores = append(filer2.Stores, &Redis2Store{}) |
|||
} |
|||
|
|||
type Redis2Store struct { |
|||
UniversalRedis2Store |
|||
} |
|||
|
|||
func (store *Redis2Store) GetName() string { |
|||
return "redis2" |
|||
} |
|||
|
|||
func (store *Redis2Store) Initialize(configuration util.Configuration, prefix string) (err error) { |
|||
return store.initialize( |
|||
configuration.GetString(prefix+"address"), |
|||
configuration.GetString(prefix+"password"), |
|||
configuration.GetInt(prefix+"database"), |
|||
) |
|||
} |
|||
|
|||
func (store *Redis2Store) initialize(hostPort string, password string, database int) (err error) { |
|||
store.Client = redis.NewClient(&redis.Options{ |
|||
Addr: hostPort, |
|||
Password: password, |
|||
DB: database, |
|||
}) |
|||
return |
|||
} |
@ -0,0 +1,162 @@ |
|||
package redis2 |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"time" |
|||
|
|||
"github.com/go-redis/redis" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer2" |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
const ( |
|||
DIR_LIST_MARKER = "\x00" |
|||
) |
|||
|
|||
type UniversalRedis2Store struct { |
|||
Client redis.UniversalClient |
|||
} |
|||
|
|||
func (store *UniversalRedis2Store) BeginTransaction(ctx context.Context) (context.Context, error) { |
|||
return ctx, nil |
|||
} |
|||
func (store *UniversalRedis2Store) CommitTransaction(ctx context.Context) error { |
|||
return nil |
|||
} |
|||
func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) error { |
|||
return nil |
|||
} |
|||
|
|||
func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { |
|||
|
|||
value, err := entry.EncodeAttributesAndChunks() |
|||
if err != nil { |
|||
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) |
|||
} |
|||
|
|||
if err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil { |
|||
return fmt.Errorf("persisting %s : %v", entry.FullPath, err) |
|||
} |
|||
|
|||
dir, name := entry.FullPath.DirAndName() |
|||
if name != "" { |
|||
if err = store.Client.ZAddNX(genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil { |
|||
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err) |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { |
|||
|
|||
return store.InsertEntry(ctx, entry) |
|||
} |
|||
|
|||
func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) { |
|||
|
|||
data, err := store.Client.Get(string(fullpath)).Result() |
|||
if err == redis.Nil { |
|||
return nil, filer_pb.ErrNotFound |
|||
} |
|||
|
|||
if err != nil { |
|||
return nil, fmt.Errorf("get %s : %v", fullpath, err) |
|||
} |
|||
|
|||
entry = &filer2.Entry{ |
|||
FullPath: fullpath, |
|||
} |
|||
err = entry.DecodeAttributesAndChunks([]byte(data)) |
|||
if err != nil { |
|||
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) |
|||
} |
|||
|
|||
return entry, nil |
|||
} |
|||
|
|||
func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { |
|||
|
|||
_, err = store.Client.Del(string(fullpath)).Result() |
|||
|
|||
if err != nil { |
|||
return fmt.Errorf("delete %s : %v", fullpath, err) |
|||
} |
|||
|
|||
dir, name := fullpath.DirAndName() |
|||
if name != "" { |
|||
_, err = store.Client.ZRem(genDirectoryListKey(dir), name).Result() |
|||
if err != nil { |
|||
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) { |
|||
|
|||
members, err := store.Client.ZRange(genDirectoryListKey(string(fullpath)), 0, -1).Result() |
|||
if err != nil { |
|||
return fmt.Errorf("delete folder %s : %v", fullpath, err) |
|||
} |
|||
|
|||
for _, fileName := range members { |
|||
path := util.NewFullPath(string(fullpath), fileName) |
|||
_, err = store.Client.Del(string(path)).Result() |
|||
if err != nil { |
|||
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, |
|||
limit int) (entries []*filer2.Entry, err error) { |
|||
|
|||
dirListKey := genDirectoryListKey(string(fullpath)) |
|||
start := int64(0) |
|||
if startFileName != "" { |
|||
start, _ = store.Client.ZRank(dirListKey, startFileName).Result() |
|||
if !inclusive { |
|||
start++ |
|||
} |
|||
} |
|||
members, err := store.Client.ZRange(dirListKey, start, start+int64(limit)-1).Result() |
|||
if err != nil { |
|||
return nil, fmt.Errorf("list %s : %v", fullpath, err) |
|||
} |
|||
|
|||
// fetch entry meta
|
|||
for _, fileName := range members { |
|||
path := util.NewFullPath(string(fullpath), fileName) |
|||
entry, err := store.FindEntry(ctx, path) |
|||
if err != nil { |
|||
glog.V(0).Infof("list %s : %v", path, err) |
|||
} else { |
|||
if entry.TtlSec > 0 { |
|||
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { |
|||
store.Client.Del(string(path)).Result() |
|||
store.Client.ZRem(dirListKey, fileName).Result() |
|||
continue |
|||
} |
|||
} |
|||
entries = append(entries, entry) |
|||
} |
|||
} |
|||
|
|||
return entries, err |
|||
} |
|||
|
|||
func genDirectoryListKey(dir string) (dirList string) { |
|||
return dir + DIR_LIST_MARKER |
|||
} |
|||
|
|||
func (store *UniversalRedis2Store) Shutdown() { |
|||
store.Client.Close() |
|||
} |
@ -0,0 +1,6 @@ |
|||
package filer2 |
|||
|
|||
const ( |
|||
TopicsDir = "/topics" |
|||
SystemLogDir = TopicsDir + "/.system/log" |
|||
) |
@ -0,0 +1,113 @@ |
|||
package broker |
|||
|
|||
import ( |
|||
"context" |
|||
"fmt" |
|||
"time" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/operation" |
|||
"github.com/chrislusf/seaweedfs/weed/pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/security" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messaging_pb.TopicConfiguration, data []byte) error { |
|||
|
|||
assignResult, uploadResult, err2 := broker.assignAndUpload(topicConfig, data) |
|||
if err2 != nil { |
|||
return err2 |
|||
} |
|||
|
|||
dir, name := util.FullPath(targetFile).DirAndName() |
|||
|
|||
chunk := &filer_pb.FileChunk{ |
|||
FileId: assignResult.Fid, |
|||
Offset: 0, // needs to be fixed during appending
|
|||
Size: uint64(uploadResult.Size), |
|||
Mtime: time.Now().UnixNano(), |
|||
ETag: uploadResult.ETag, |
|||
IsGzipped: uploadResult.Gzip > 0, |
|||
} |
|||
|
|||
// append the chunk
|
|||
if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
|||
|
|||
request := &filer_pb.AppendToEntryRequest{ |
|||
Directory: dir, |
|||
EntryName: name, |
|||
Chunks: []*filer_pb.FileChunk{chunk}, |
|||
} |
|||
|
|||
_, err := client.AppendToEntry(context.Background(), request) |
|||
if err != nil { |
|||
glog.V(0).Infof("append to file %v: %v", request, err) |
|||
return err |
|||
} |
|||
|
|||
return nil |
|||
}); err != nil { |
|||
return fmt.Errorf("append to file %v: %v", targetFile, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
|||
|
|||
func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConfiguration, data []byte) (*operation.AssignResult, *operation.UploadResult, error) { |
|||
|
|||
var assignResult = &operation.AssignResult{} |
|||
|
|||
// assign a volume location
|
|||
if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { |
|||
|
|||
request := &filer_pb.AssignVolumeRequest{ |
|||
Count: 1, |
|||
Replication: topicConfig.Replication, |
|||
Collection: topicConfig.Collection, |
|||
} |
|||
|
|||
resp, err := client.AssignVolume(context.Background(), request) |
|||
if err != nil { |
|||
glog.V(0).Infof("assign volume failure %v: %v", request, err) |
|||
return err |
|||
} |
|||
if resp.Error != "" { |
|||
return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) |
|||
} |
|||
|
|||
assignResult.Auth = security.EncodedJwt(resp.Auth) |
|||
assignResult.Fid = resp.FileId |
|||
assignResult.Url = resp.Url |
|||
assignResult.PublicUrl = resp.PublicUrl |
|||
assignResult.Count = uint64(resp.Count) |
|||
|
|||
return nil |
|||
}); err != nil { |
|||
return nil, nil, err |
|||
} |
|||
|
|||
// upload data
|
|||
targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) |
|||
uploadResult, err := operation.UploadData(targetUrl, "", broker.option.Cipher, data, false, "", nil, assignResult.Auth) |
|||
if err != nil { |
|||
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) |
|||
} |
|||
// println("uploaded to", targetUrl)
|
|||
return assignResult, uploadResult, nil |
|||
} |
|||
|
|||
func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) (err error) { |
|||
|
|||
for _, filer := range broker.option.Filers { |
|||
if err = pb.WithFilerClient(filer, broker.grpcDialOption, fn); err != nil { |
|||
glog.V(0).Infof("fail to connect to %s: %v", filer, err) |
|||
} else { |
|||
break |
|||
} |
|||
} |
|||
|
|||
return |
|||
|
|||
} |
@ -0,0 +1,15 @@ |
|||
package broker |
|||
|
|||
import ( |
|||
"context" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" |
|||
) |
|||
|
|||
func (broker *MessageBroker) ConfigureTopic(c context.Context, request *messaging_pb.ConfigureTopicRequest) (*messaging_pb.ConfigureTopicResponse, error) { |
|||
panic("implement me") |
|||
} |
|||
|
|||
func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *messaging_pb.GetTopicConfigurationRequest) (*messaging_pb.GetTopicConfigurationResponse, error) { |
|||
panic("implement me") |
|||
} |
@ -0,0 +1,99 @@ |
|||
package broker |
|||
|
|||
import ( |
|||
"fmt" |
|||
"io" |
|||
"time" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/filer2" |
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" |
|||
) |
|||
|
|||
func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_PublishServer) error { |
|||
|
|||
// process initial request
|
|||
in, err := stream.Recv() |
|||
if err == io.EOF { |
|||
return nil |
|||
} |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
// TODO look it up
|
|||
topicConfig := &messaging_pb.TopicConfiguration{ |
|||
|
|||
} |
|||
|
|||
// get lock
|
|||
tp := TopicPartition{ |
|||
Namespace: in.Init.Namespace, |
|||
Topic: in.Init.Topic, |
|||
Partition: in.Init.Partition, |
|||
} |
|||
logBuffer := broker.topicLocks.RequestPublisherLock(tp, func(startTime, stopTime time.Time, buf []byte) { |
|||
|
|||
targetFile := fmt.Sprintf( |
|||
"%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d", |
|||
filer2.TopicsDir, tp.Namespace, tp.Topic, |
|||
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), |
|||
tp.Partition, |
|||
) |
|||
|
|||
if err := broker.appendToFile(targetFile, topicConfig, buf); err != nil { |
|||
glog.V(0).Infof("log write failed %s: %v", targetFile, err) |
|||
} |
|||
|
|||
}) |
|||
defer broker.topicLocks.ReleaseLock(tp, true) |
|||
|
|||
updatesChan := make(chan int32) |
|||
|
|||
go func() { |
|||
for update := range updatesChan { |
|||
if err := stream.Send(&messaging_pb.PublishResponse{ |
|||
Config: &messaging_pb.PublishResponse_ConfigMessage{ |
|||
PartitionCount: update, |
|||
}, |
|||
}); err != nil { |
|||
glog.V(0).Infof("err sending publish response: %v", err) |
|||
return |
|||
} |
|||
} |
|||
}() |
|||
|
|||
|
|||
// process each message
|
|||
for { |
|||
in, err := stream.Recv() |
|||
if err == io.EOF { |
|||
return nil |
|||
} |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
if in.Data == nil { |
|||
continue |
|||
} |
|||
|
|||
m := &messaging_pb.Message{ |
|||
Timestamp: time.Now().UnixNano(), |
|||
Key: in.Data.Key, |
|||
Value: in.Data.Value, |
|||
Headers: in.Data.Headers, |
|||
} |
|||
|
|||
data, err := proto.Marshal(m) |
|||
if err != nil { |
|||
glog.Errorf("marshall error: %v\n", err) |
|||
continue |
|||
} |
|||
|
|||
logBuffer.AddToBuffer(in.Data.Key, data) |
|||
|
|||
} |
|||
} |
@ -0,0 +1,88 @@ |
|||
package broker |
|||
|
|||
import ( |
|||
"io" |
|||
"sync" |
|||
"time" |
|||
|
|||
"github.com/golang/protobuf/proto" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error { |
|||
|
|||
// process initial request
|
|||
in, err := stream.Recv() |
|||
if err == io.EOF { |
|||
return nil |
|||
} |
|||
if err != nil { |
|||
return err |
|||
} |
|||
|
|||
subscriberId := in.Init.SubscriberId |
|||
|
|||
// get lock
|
|||
tp := TopicPartition{ |
|||
Namespace: in.Init.Namespace, |
|||
Topic: in.Init.Topic, |
|||
Partition: in.Init.Partition, |
|||
} |
|||
lock := broker.topicLocks.RequestSubscriberLock(tp) |
|||
defer broker.topicLocks.ReleaseLock(tp, false) |
|||
cond := sync.NewCond(&lock.Mutex) |
|||
|
|||
lastReadTime := time.Now() |
|||
switch in.Init.StartPosition { |
|||
case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP: |
|||
lastReadTime = time.Unix(0, in.Init.TimestampNs) |
|||
case messaging_pb.SubscriberMessage_InitMessage_LATEST: |
|||
case messaging_pb.SubscriberMessage_InitMessage_EARLIEST: |
|||
} |
|||
|
|||
// how to process each message
|
|||
// an error returned will end the subscription
|
|||
eachMessageFn := func(m *messaging_pb.Message) error { |
|||
err := stream.Send(&messaging_pb.BrokerMessage{ |
|||
Data: m, |
|||
}) |
|||
if err != nil { |
|||
glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err) |
|||
} |
|||
return err |
|||
} |
|||
|
|||
// loop through all messages
|
|||
for { |
|||
|
|||
_, buf := lock.logBuffer.ReadFromBuffer(lastReadTime) |
|||
|
|||
for pos := 0; pos+4 < len(buf); { |
|||
|
|||
size := util.BytesToUint32(buf[pos : pos+4]) |
|||
entryData := buf[pos+4 : pos+4+int(size)] |
|||
|
|||
m := &messaging_pb.Message{} |
|||
if err = proto.Unmarshal(entryData, m); err != nil { |
|||
glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err) |
|||
pos += 4 + int(size) |
|||
continue |
|||
} |
|||
|
|||
if err = eachMessageFn(m); err != nil { |
|||
return err |
|||
} |
|||
|
|||
lastReadTime = time.Unix(0, m.Timestamp) |
|||
pos += 4 + int(size) |
|||
} |
|||
|
|||
lock.Mutex.Lock() |
|||
cond.Wait() |
|||
lock.Mutex.Unlock() |
|||
} |
|||
|
|||
} |
@ -0,0 +1,80 @@ |
|||
package broker |
|||
|
|||
import ( |
|||
"sync" |
|||
"time" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/util/log_buffer" |
|||
) |
|||
|
|||
type TopicPartition struct { |
|||
Namespace string |
|||
Topic string |
|||
Partition int32 |
|||
} |
|||
type TopicLock struct { |
|||
sync.Mutex |
|||
subscriberCount int |
|||
publisherCount int |
|||
logBuffer *log_buffer.LogBuffer |
|||
} |
|||
|
|||
type TopicLocks struct { |
|||
sync.Mutex |
|||
locks map[TopicPartition]*TopicLock |
|||
} |
|||
|
|||
func NewTopicLocks() *TopicLocks { |
|||
return &TopicLocks{ |
|||
locks: make(map[TopicPartition]*TopicLock), |
|||
} |
|||
} |
|||
|
|||
func (tl *TopicLocks) RequestSubscriberLock(partition TopicPartition) *TopicLock { |
|||
tl.Lock() |
|||
defer tl.Unlock() |
|||
|
|||
lock, found := tl.locks[partition] |
|||
if !found { |
|||
lock = &TopicLock{} |
|||
tl.locks[partition] = lock |
|||
} |
|||
lock.subscriberCount++ |
|||
|
|||
return lock |
|||
} |
|||
|
|||
func (tl *TopicLocks) RequestPublisherLock(partition TopicPartition, flushFn func(startTime, stopTime time.Time, buf []byte)) *log_buffer.LogBuffer { |
|||
tl.Lock() |
|||
defer tl.Unlock() |
|||
|
|||
lock, found := tl.locks[partition] |
|||
if !found { |
|||
lock = &TopicLock{} |
|||
tl.locks[partition] = lock |
|||
} |
|||
lock.publisherCount++ |
|||
cond := sync.NewCond(&lock.Mutex) |
|||
lock.logBuffer = log_buffer.NewLogBuffer(time.Minute, flushFn, func() { |
|||
cond.Broadcast() |
|||
}) |
|||
return lock.logBuffer |
|||
} |
|||
|
|||
func (tl *TopicLocks) ReleaseLock(partition TopicPartition, isPublisher bool) { |
|||
tl.Lock() |
|||
defer tl.Unlock() |
|||
|
|||
lock, found := tl.locks[partition] |
|||
if !found { |
|||
return |
|||
} |
|||
if isPublisher { |
|||
lock.publisherCount-- |
|||
} else { |
|||
lock.subscriberCount-- |
|||
} |
|||
if lock.subscriberCount <= 0 && lock.publisherCount <= 0 { |
|||
delete(tl.locks, partition) |
|||
} |
|||
} |
@ -0,0 +1,110 @@ |
|||
syntax = "proto3"; |
|||
|
|||
package messaging_pb; |
|||
|
|||
option java_package = "seaweedfs.client"; |
|||
option java_outer_classname = "MessagingProto"; |
|||
|
|||
////////////////////////////////////////////////// |
|||
|
|||
service SeaweedMessaging { |
|||
|
|||
rpc Subscribe (stream SubscriberMessage) returns (stream BrokerMessage) { |
|||
} |
|||
|
|||
rpc Publish (stream PublishRequest) returns (stream PublishResponse) { |
|||
} |
|||
|
|||
rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) { |
|||
} |
|||
|
|||
rpc GetTopicConfiguration (GetTopicConfigurationRequest) returns (GetTopicConfigurationResponse) { |
|||
} |
|||
|
|||
} |
|||
|
|||
////////////////////////////////////////////////// |
|||
|
|||
message SubscriberMessage { |
|||
message InitMessage { |
|||
string namespace = 1; |
|||
string topic = 2; |
|||
int32 partition = 3; |
|||
enum StartPosition { |
|||
LATEST = 0; // Start at the newest message |
|||
EARLIEST = 1; // Start at the oldest message |
|||
TIMESTAMP = 2; // Start after a specified timestamp, exclusive |
|||
} |
|||
StartPosition startPosition = 4; // Where to begin consuming from |
|||
int64 timestampNs = 5; // timestamp in nano seconds |
|||
string subscriber_id = 6; // uniquely identify a subscriber to track consumption |
|||
} |
|||
InitMessage init = 1; |
|||
message AckMessage { |
|||
int64 message_id = 1; |
|||
} |
|||
AckMessage ack = 2; |
|||
} |
|||
|
|||
message Message { |
|||
int64 timestamp = 1 [jstype = JS_STRING]; // When the message was received by the broker |
|||
bytes key = 2; // Message key |
|||
bytes value = 3; // Message payload |
|||
map<string, bytes> headers = 4; // Message headers |
|||
} |
|||
|
|||
message BrokerMessage { |
|||
Message data = 1; |
|||
message RedirectMessage { |
|||
string new_broker = 1; |
|||
} |
|||
RedirectMessage redirect = 2; |
|||
} |
|||
|
|||
message PublishRequest { |
|||
message InitMessage { |
|||
string namespace = 1; // only needed on the initial request |
|||
string topic = 2; // only needed on the initial request |
|||
int32 partition = 3; |
|||
} |
|||
InitMessage init = 1; |
|||
message DataMessage { |
|||
bytes key = 1; // Message key |
|||
bytes value = 2; // Message payload |
|||
map<string, bytes> headers = 3; // Message headers |
|||
} |
|||
DataMessage data = 2; |
|||
} |
|||
|
|||
message PublishResponse { |
|||
message ConfigMessage { |
|||
int32 partition_count = 1; |
|||
} |
|||
ConfigMessage config = 1; |
|||
message RedirectMessage { |
|||
string new_broker = 1; |
|||
} |
|||
RedirectMessage redirect = 2; |
|||
} |
|||
|
|||
message ConfigureTopicRequest { |
|||
string namespace = 1; |
|||
string topic = 2; |
|||
TopicConfiguration configuration = 3; |
|||
} |
|||
message ConfigureTopicResponse { |
|||
} |
|||
|
|||
message GetTopicConfigurationRequest { |
|||
string namespace = 1; |
|||
string topic = 2; |
|||
} |
|||
message GetTopicConfigurationResponse { |
|||
TopicConfiguration configuration = 1; |
|||
} |
|||
|
|||
message TopicConfiguration { |
|||
int32 partition_count = 1; |
|||
string collection = 2; |
|||
string replication = 3; |
|||
} |
@ -0,0 +1,824 @@ |
|||
// Code generated by protoc-gen-go.
|
|||
// source: messaging.proto
|
|||
// DO NOT EDIT!
|
|||
|
|||
/* |
|||
Package messaging_pb is a generated protocol buffer package. |
|||
|
|||
It is generated from these files: |
|||
messaging.proto |
|||
|
|||
It has these top-level messages: |
|||
SubscriberMessage |
|||
Message |
|||
BrokerMessage |
|||
PublishRequest |
|||
PublishResponse |
|||
ConfigureTopicRequest |
|||
ConfigureTopicResponse |
|||
GetTopicConfigurationRequest |
|||
GetTopicConfigurationResponse |
|||
TopicConfiguration |
|||
*/ |
|||
package messaging_pb |
|||
|
|||
import proto "github.com/golang/protobuf/proto" |
|||
import fmt "fmt" |
|||
import math "math" |
|||
|
|||
import ( |
|||
context "golang.org/x/net/context" |
|||
grpc "google.golang.org/grpc" |
|||
) |
|||
|
|||
// Reference imports to suppress errors if they are not otherwise used.
|
|||
var _ = proto.Marshal |
|||
var _ = fmt.Errorf |
|||
var _ = math.Inf |
|||
|
|||
// This is a compile-time assertion to ensure that this generated file
|
|||
// is compatible with the proto package it is being compiled against.
|
|||
// A compilation error at this line likely means your copy of the
|
|||
// proto package needs to be updated.
|
|||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|||
|
|||
type SubscriberMessage_InitMessage_StartPosition int32 |
|||
|
|||
const ( |
|||
SubscriberMessage_InitMessage_LATEST SubscriberMessage_InitMessage_StartPosition = 0 |
|||
SubscriberMessage_InitMessage_EARLIEST SubscriberMessage_InitMessage_StartPosition = 1 |
|||
SubscriberMessage_InitMessage_TIMESTAMP SubscriberMessage_InitMessage_StartPosition = 2 |
|||
) |
|||
|
|||
var SubscriberMessage_InitMessage_StartPosition_name = map[int32]string{ |
|||
0: "LATEST", |
|||
1: "EARLIEST", |
|||
2: "TIMESTAMP", |
|||
} |
|||
var SubscriberMessage_InitMessage_StartPosition_value = map[string]int32{ |
|||
"LATEST": 0, |
|||
"EARLIEST": 1, |
|||
"TIMESTAMP": 2, |
|||
} |
|||
|
|||
func (x SubscriberMessage_InitMessage_StartPosition) String() string { |
|||
return proto.EnumName(SubscriberMessage_InitMessage_StartPosition_name, int32(x)) |
|||
} |
|||
func (SubscriberMessage_InitMessage_StartPosition) EnumDescriptor() ([]byte, []int) { |
|||
return fileDescriptor0, []int{0, 0, 0} |
|||
} |
|||
|
|||
type SubscriberMessage struct { |
|||
Init *SubscriberMessage_InitMessage `protobuf:"bytes,1,opt,name=init" json:"init,omitempty"` |
|||
Ack *SubscriberMessage_AckMessage `protobuf:"bytes,2,opt,name=ack" json:"ack,omitempty"` |
|||
} |
|||
|
|||
func (m *SubscriberMessage) Reset() { *m = SubscriberMessage{} } |
|||
func (m *SubscriberMessage) String() string { return proto.CompactTextString(m) } |
|||
func (*SubscriberMessage) ProtoMessage() {} |
|||
func (*SubscriberMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } |
|||
|
|||
func (m *SubscriberMessage) GetInit() *SubscriberMessage_InitMessage { |
|||
if m != nil { |
|||
return m.Init |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *SubscriberMessage) GetAck() *SubscriberMessage_AckMessage { |
|||
if m != nil { |
|||
return m.Ack |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type SubscriberMessage_InitMessage struct { |
|||
Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"` |
|||
Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"` |
|||
Partition int32 `protobuf:"varint,3,opt,name=partition" json:"partition,omitempty"` |
|||
StartPosition SubscriberMessage_InitMessage_StartPosition `protobuf:"varint,4,opt,name=startPosition,enum=messaging_pb.SubscriberMessage_InitMessage_StartPosition" json:"startPosition,omitempty"` |
|||
TimestampNs int64 `protobuf:"varint,5,opt,name=timestampNs" json:"timestampNs,omitempty"` |
|||
SubscriberId string `protobuf:"bytes,6,opt,name=subscriber_id,json=subscriberId" json:"subscriber_id,omitempty"` |
|||
} |
|||
|
|||
func (m *SubscriberMessage_InitMessage) Reset() { *m = SubscriberMessage_InitMessage{} } |
|||
func (m *SubscriberMessage_InitMessage) String() string { return proto.CompactTextString(m) } |
|||
func (*SubscriberMessage_InitMessage) ProtoMessage() {} |
|||
func (*SubscriberMessage_InitMessage) Descriptor() ([]byte, []int) { |
|||
return fileDescriptor0, []int{0, 0} |
|||
} |
|||
|
|||
func (m *SubscriberMessage_InitMessage) GetNamespace() string { |
|||
if m != nil { |
|||
return m.Namespace |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *SubscriberMessage_InitMessage) GetTopic() string { |
|||
if m != nil { |
|||
return m.Topic |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *SubscriberMessage_InitMessage) GetPartition() int32 { |
|||
if m != nil { |
|||
return m.Partition |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *SubscriberMessage_InitMessage) GetStartPosition() SubscriberMessage_InitMessage_StartPosition { |
|||
if m != nil { |
|||
return m.StartPosition |
|||
} |
|||
return SubscriberMessage_InitMessage_LATEST |
|||
} |
|||
|
|||
func (m *SubscriberMessage_InitMessage) GetTimestampNs() int64 { |
|||
if m != nil { |
|||
return m.TimestampNs |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *SubscriberMessage_InitMessage) GetSubscriberId() string { |
|||
if m != nil { |
|||
return m.SubscriberId |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
type SubscriberMessage_AckMessage struct { |
|||
MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId" json:"message_id,omitempty"` |
|||
} |
|||
|
|||
func (m *SubscriberMessage_AckMessage) Reset() { *m = SubscriberMessage_AckMessage{} } |
|||
func (m *SubscriberMessage_AckMessage) String() string { return proto.CompactTextString(m) } |
|||
func (*SubscriberMessage_AckMessage) ProtoMessage() {} |
|||
func (*SubscriberMessage_AckMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 1} } |
|||
|
|||
func (m *SubscriberMessage_AckMessage) GetMessageId() int64 { |
|||
if m != nil { |
|||
return m.MessageId |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type Message struct { |
|||
Timestamp int64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"` |
|||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` |
|||
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` |
|||
Headers map[string][]byte `protobuf:"bytes,4,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` |
|||
} |
|||
|
|||
func (m *Message) Reset() { *m = Message{} } |
|||
func (m *Message) String() string { return proto.CompactTextString(m) } |
|||
func (*Message) ProtoMessage() {} |
|||
func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } |
|||
|
|||
func (m *Message) GetTimestamp() int64 { |
|||
if m != nil { |
|||
return m.Timestamp |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *Message) GetKey() []byte { |
|||
if m != nil { |
|||
return m.Key |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Message) GetValue() []byte { |
|||
if m != nil { |
|||
return m.Value |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *Message) GetHeaders() map[string][]byte { |
|||
if m != nil { |
|||
return m.Headers |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type BrokerMessage struct { |
|||
Data *Message `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` |
|||
Redirect *BrokerMessage_RedirectMessage `protobuf:"bytes,2,opt,name=redirect" json:"redirect,omitempty"` |
|||
} |
|||
|
|||
func (m *BrokerMessage) Reset() { *m = BrokerMessage{} } |
|||
func (m *BrokerMessage) String() string { return proto.CompactTextString(m) } |
|||
func (*BrokerMessage) ProtoMessage() {} |
|||
func (*BrokerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } |
|||
|
|||
func (m *BrokerMessage) GetData() *Message { |
|||
if m != nil { |
|||
return m.Data |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *BrokerMessage) GetRedirect() *BrokerMessage_RedirectMessage { |
|||
if m != nil { |
|||
return m.Redirect |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type BrokerMessage_RedirectMessage struct { |
|||
NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker" json:"new_broker,omitempty"` |
|||
} |
|||
|
|||
func (m *BrokerMessage_RedirectMessage) Reset() { *m = BrokerMessage_RedirectMessage{} } |
|||
func (m *BrokerMessage_RedirectMessage) String() string { return proto.CompactTextString(m) } |
|||
func (*BrokerMessage_RedirectMessage) ProtoMessage() {} |
|||
func (*BrokerMessage_RedirectMessage) Descriptor() ([]byte, []int) { |
|||
return fileDescriptor0, []int{2, 0} |
|||
} |
|||
|
|||
func (m *BrokerMessage_RedirectMessage) GetNewBroker() string { |
|||
if m != nil { |
|||
return m.NewBroker |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
type PublishRequest struct { |
|||
Init *PublishRequest_InitMessage `protobuf:"bytes,1,opt,name=init" json:"init,omitempty"` |
|||
Data *PublishRequest_DataMessage `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` |
|||
} |
|||
|
|||
func (m *PublishRequest) Reset() { *m = PublishRequest{} } |
|||
func (m *PublishRequest) String() string { return proto.CompactTextString(m) } |
|||
func (*PublishRequest) ProtoMessage() {} |
|||
func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } |
|||
|
|||
func (m *PublishRequest) GetInit() *PublishRequest_InitMessage { |
|||
if m != nil { |
|||
return m.Init |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *PublishRequest) GetData() *PublishRequest_DataMessage { |
|||
if m != nil { |
|||
return m.Data |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type PublishRequest_InitMessage struct { |
|||
Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"` |
|||
Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"` |
|||
Partition int32 `protobuf:"varint,3,opt,name=partition" json:"partition,omitempty"` |
|||
} |
|||
|
|||
func (m *PublishRequest_InitMessage) Reset() { *m = PublishRequest_InitMessage{} } |
|||
func (m *PublishRequest_InitMessage) String() string { return proto.CompactTextString(m) } |
|||
func (*PublishRequest_InitMessage) ProtoMessage() {} |
|||
func (*PublishRequest_InitMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } |
|||
|
|||
func (m *PublishRequest_InitMessage) GetNamespace() string { |
|||
if m != nil { |
|||
return m.Namespace |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *PublishRequest_InitMessage) GetTopic() string { |
|||
if m != nil { |
|||
return m.Topic |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *PublishRequest_InitMessage) GetPartition() int32 { |
|||
if m != nil { |
|||
return m.Partition |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type PublishRequest_DataMessage struct { |
|||
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` |
|||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` |
|||
Headers map[string][]byte `protobuf:"bytes,3,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` |
|||
} |
|||
|
|||
func (m *PublishRequest_DataMessage) Reset() { *m = PublishRequest_DataMessage{} } |
|||
func (m *PublishRequest_DataMessage) String() string { return proto.CompactTextString(m) } |
|||
func (*PublishRequest_DataMessage) ProtoMessage() {} |
|||
func (*PublishRequest_DataMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 1} } |
|||
|
|||
func (m *PublishRequest_DataMessage) GetKey() []byte { |
|||
if m != nil { |
|||
return m.Key |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *PublishRequest_DataMessage) GetValue() []byte { |
|||
if m != nil { |
|||
return m.Value |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *PublishRequest_DataMessage) GetHeaders() map[string][]byte { |
|||
if m != nil { |
|||
return m.Headers |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type PublishResponse struct { |
|||
Config *PublishResponse_ConfigMessage `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` |
|||
Redirect *PublishResponse_RedirectMessage `protobuf:"bytes,2,opt,name=redirect" json:"redirect,omitempty"` |
|||
} |
|||
|
|||
func (m *PublishResponse) Reset() { *m = PublishResponse{} } |
|||
func (m *PublishResponse) String() string { return proto.CompactTextString(m) } |
|||
func (*PublishResponse) ProtoMessage() {} |
|||
func (*PublishResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } |
|||
|
|||
func (m *PublishResponse) GetConfig() *PublishResponse_ConfigMessage { |
|||
if m != nil { |
|||
return m.Config |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *PublishResponse) GetRedirect() *PublishResponse_RedirectMessage { |
|||
if m != nil { |
|||
return m.Redirect |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type PublishResponse_ConfigMessage struct { |
|||
PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount" json:"partition_count,omitempty"` |
|||
} |
|||
|
|||
func (m *PublishResponse_ConfigMessage) Reset() { *m = PublishResponse_ConfigMessage{} } |
|||
func (m *PublishResponse_ConfigMessage) String() string { return proto.CompactTextString(m) } |
|||
func (*PublishResponse_ConfigMessage) ProtoMessage() {} |
|||
func (*PublishResponse_ConfigMessage) Descriptor() ([]byte, []int) { |
|||
return fileDescriptor0, []int{4, 0} |
|||
} |
|||
|
|||
func (m *PublishResponse_ConfigMessage) GetPartitionCount() int32 { |
|||
if m != nil { |
|||
return m.PartitionCount |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type PublishResponse_RedirectMessage struct { |
|||
NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker" json:"new_broker,omitempty"` |
|||
} |
|||
|
|||
func (m *PublishResponse_RedirectMessage) Reset() { *m = PublishResponse_RedirectMessage{} } |
|||
func (m *PublishResponse_RedirectMessage) String() string { return proto.CompactTextString(m) } |
|||
func (*PublishResponse_RedirectMessage) ProtoMessage() {} |
|||
func (*PublishResponse_RedirectMessage) Descriptor() ([]byte, []int) { |
|||
return fileDescriptor0, []int{4, 1} |
|||
} |
|||
|
|||
func (m *PublishResponse_RedirectMessage) GetNewBroker() string { |
|||
if m != nil { |
|||
return m.NewBroker |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
type ConfigureTopicRequest struct { |
|||
Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"` |
|||
Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"` |
|||
Configuration *TopicConfiguration `protobuf:"bytes,3,opt,name=configuration" json:"configuration,omitempty"` |
|||
} |
|||
|
|||
func (m *ConfigureTopicRequest) Reset() { *m = ConfigureTopicRequest{} } |
|||
func (m *ConfigureTopicRequest) String() string { return proto.CompactTextString(m) } |
|||
func (*ConfigureTopicRequest) ProtoMessage() {} |
|||
func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } |
|||
|
|||
func (m *ConfigureTopicRequest) GetNamespace() string { |
|||
if m != nil { |
|||
return m.Namespace |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *ConfigureTopicRequest) GetTopic() string { |
|||
if m != nil { |
|||
return m.Topic |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *ConfigureTopicRequest) GetConfiguration() *TopicConfiguration { |
|||
if m != nil { |
|||
return m.Configuration |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type ConfigureTopicResponse struct { |
|||
} |
|||
|
|||
func (m *ConfigureTopicResponse) Reset() { *m = ConfigureTopicResponse{} } |
|||
func (m *ConfigureTopicResponse) String() string { return proto.CompactTextString(m) } |
|||
func (*ConfigureTopicResponse) ProtoMessage() {} |
|||
func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } |
|||
|
|||
type GetTopicConfigurationRequest struct { |
|||
Namespace string `protobuf:"bytes,1,opt,name=namespace" json:"namespace,omitempty"` |
|||
Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"` |
|||
} |
|||
|
|||
func (m *GetTopicConfigurationRequest) Reset() { *m = GetTopicConfigurationRequest{} } |
|||
func (m *GetTopicConfigurationRequest) String() string { return proto.CompactTextString(m) } |
|||
func (*GetTopicConfigurationRequest) ProtoMessage() {} |
|||
func (*GetTopicConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } |
|||
|
|||
func (m *GetTopicConfigurationRequest) GetNamespace() string { |
|||
if m != nil { |
|||
return m.Namespace |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *GetTopicConfigurationRequest) GetTopic() string { |
|||
if m != nil { |
|||
return m.Topic |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
type GetTopicConfigurationResponse struct { |
|||
Configuration *TopicConfiguration `protobuf:"bytes,1,opt,name=configuration" json:"configuration,omitempty"` |
|||
} |
|||
|
|||
func (m *GetTopicConfigurationResponse) Reset() { *m = GetTopicConfigurationResponse{} } |
|||
func (m *GetTopicConfigurationResponse) String() string { return proto.CompactTextString(m) } |
|||
func (*GetTopicConfigurationResponse) ProtoMessage() {} |
|||
func (*GetTopicConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } |
|||
|
|||
func (m *GetTopicConfigurationResponse) GetConfiguration() *TopicConfiguration { |
|||
if m != nil { |
|||
return m.Configuration |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type TopicConfiguration struct { |
|||
PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount" json:"partition_count,omitempty"` |
|||
Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` |
|||
Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` |
|||
} |
|||
|
|||
func (m *TopicConfiguration) Reset() { *m = TopicConfiguration{} } |
|||
func (m *TopicConfiguration) String() string { return proto.CompactTextString(m) } |
|||
func (*TopicConfiguration) ProtoMessage() {} |
|||
func (*TopicConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } |
|||
|
|||
func (m *TopicConfiguration) GetPartitionCount() int32 { |
|||
if m != nil { |
|||
return m.PartitionCount |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *TopicConfiguration) GetCollection() string { |
|||
if m != nil { |
|||
return m.Collection |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *TopicConfiguration) GetReplication() string { |
|||
if m != nil { |
|||
return m.Replication |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func init() { |
|||
proto.RegisterType((*SubscriberMessage)(nil), "messaging_pb.SubscriberMessage") |
|||
proto.RegisterType((*SubscriberMessage_InitMessage)(nil), "messaging_pb.SubscriberMessage.InitMessage") |
|||
proto.RegisterType((*SubscriberMessage_AckMessage)(nil), "messaging_pb.SubscriberMessage.AckMessage") |
|||
proto.RegisterType((*Message)(nil), "messaging_pb.Message") |
|||
proto.RegisterType((*BrokerMessage)(nil), "messaging_pb.BrokerMessage") |
|||
proto.RegisterType((*BrokerMessage_RedirectMessage)(nil), "messaging_pb.BrokerMessage.RedirectMessage") |
|||
proto.RegisterType((*PublishRequest)(nil), "messaging_pb.PublishRequest") |
|||
proto.RegisterType((*PublishRequest_InitMessage)(nil), "messaging_pb.PublishRequest.InitMessage") |
|||
proto.RegisterType((*PublishRequest_DataMessage)(nil), "messaging_pb.PublishRequest.DataMessage") |
|||
proto.RegisterType((*PublishResponse)(nil), "messaging_pb.PublishResponse") |
|||
proto.RegisterType((*PublishResponse_ConfigMessage)(nil), "messaging_pb.PublishResponse.ConfigMessage") |
|||
proto.RegisterType((*PublishResponse_RedirectMessage)(nil), "messaging_pb.PublishResponse.RedirectMessage") |
|||
proto.RegisterType((*ConfigureTopicRequest)(nil), "messaging_pb.ConfigureTopicRequest") |
|||
proto.RegisterType((*ConfigureTopicResponse)(nil), "messaging_pb.ConfigureTopicResponse") |
|||
proto.RegisterType((*GetTopicConfigurationRequest)(nil), "messaging_pb.GetTopicConfigurationRequest") |
|||
proto.RegisterType((*GetTopicConfigurationResponse)(nil), "messaging_pb.GetTopicConfigurationResponse") |
|||
proto.RegisterType((*TopicConfiguration)(nil), "messaging_pb.TopicConfiguration") |
|||
proto.RegisterEnum("messaging_pb.SubscriberMessage_InitMessage_StartPosition", SubscriberMessage_InitMessage_StartPosition_name, SubscriberMessage_InitMessage_StartPosition_value) |
|||
} |
|||
|
|||
// Reference imports to suppress errors if they are not otherwise used.
|
|||
var _ context.Context |
|||
var _ grpc.ClientConn |
|||
|
|||
// This is a compile-time assertion to ensure that this generated file
|
|||
// is compatible with the grpc package it is being compiled against.
|
|||
const _ = grpc.SupportPackageIsVersion4 |
|||
|
|||
// Client API for SeaweedMessaging service
|
|||
|
|||
type SeaweedMessagingClient interface { |
|||
Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) |
|||
Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) |
|||
ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) |
|||
GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) |
|||
} |
|||
|
|||
type seaweedMessagingClient struct { |
|||
cc *grpc.ClientConn |
|||
} |
|||
|
|||
func NewSeaweedMessagingClient(cc *grpc.ClientConn) SeaweedMessagingClient { |
|||
return &seaweedMessagingClient{cc} |
|||
} |
|||
|
|||
func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) { |
|||
stream, err := grpc.NewClientStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[0], c.cc, "/messaging_pb.SeaweedMessaging/Subscribe", opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
x := &seaweedMessagingSubscribeClient{stream} |
|||
return x, nil |
|||
} |
|||
|
|||
type SeaweedMessaging_SubscribeClient interface { |
|||
Send(*SubscriberMessage) error |
|||
Recv() (*BrokerMessage, error) |
|||
grpc.ClientStream |
|||
} |
|||
|
|||
type seaweedMessagingSubscribeClient struct { |
|||
grpc.ClientStream |
|||
} |
|||
|
|||
func (x *seaweedMessagingSubscribeClient) Send(m *SubscriberMessage) error { |
|||
return x.ClientStream.SendMsg(m) |
|||
} |
|||
|
|||
func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) { |
|||
m := new(BrokerMessage) |
|||
if err := x.ClientStream.RecvMsg(m); err != nil { |
|||
return nil, err |
|||
} |
|||
return m, nil |
|||
} |
|||
|
|||
func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) { |
|||
stream, err := grpc.NewClientStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[1], c.cc, "/messaging_pb.SeaweedMessaging/Publish", opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
x := &seaweedMessagingPublishClient{stream} |
|||
return x, nil |
|||
} |
|||
|
|||
type SeaweedMessaging_PublishClient interface { |
|||
Send(*PublishRequest) error |
|||
Recv() (*PublishResponse, error) |
|||
grpc.ClientStream |
|||
} |
|||
|
|||
type seaweedMessagingPublishClient struct { |
|||
grpc.ClientStream |
|||
} |
|||
|
|||
func (x *seaweedMessagingPublishClient) Send(m *PublishRequest) error { |
|||
return x.ClientStream.SendMsg(m) |
|||
} |
|||
|
|||
func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) { |
|||
m := new(PublishResponse) |
|||
if err := x.ClientStream.RecvMsg(m); err != nil { |
|||
return nil, err |
|||
} |
|||
return m, nil |
|||
} |
|||
|
|||
func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) { |
|||
out := new(ConfigureTopicResponse) |
|||
err := grpc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, c.cc, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) { |
|||
out := new(GetTopicConfigurationResponse) |
|||
err := grpc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, c.cc, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
// Server API for SeaweedMessaging service
|
|||
|
|||
type SeaweedMessagingServer interface { |
|||
Subscribe(SeaweedMessaging_SubscribeServer) error |
|||
Publish(SeaweedMessaging_PublishServer) error |
|||
ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) |
|||
GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) |
|||
} |
|||
|
|||
func RegisterSeaweedMessagingServer(s *grpc.Server, srv SeaweedMessagingServer) { |
|||
s.RegisterService(&_SeaweedMessaging_serviceDesc, srv) |
|||
} |
|||
|
|||
func _SeaweedMessaging_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { |
|||
return srv.(SeaweedMessagingServer).Subscribe(&seaweedMessagingSubscribeServer{stream}) |
|||
} |
|||
|
|||
type SeaweedMessaging_SubscribeServer interface { |
|||
Send(*BrokerMessage) error |
|||
Recv() (*SubscriberMessage, error) |
|||
grpc.ServerStream |
|||
} |
|||
|
|||
type seaweedMessagingSubscribeServer struct { |
|||
grpc.ServerStream |
|||
} |
|||
|
|||
func (x *seaweedMessagingSubscribeServer) Send(m *BrokerMessage) error { |
|||
return x.ServerStream.SendMsg(m) |
|||
} |
|||
|
|||
func (x *seaweedMessagingSubscribeServer) Recv() (*SubscriberMessage, error) { |
|||
m := new(SubscriberMessage) |
|||
if err := x.ServerStream.RecvMsg(m); err != nil { |
|||
return nil, err |
|||
} |
|||
return m, nil |
|||
} |
|||
|
|||
func _SeaweedMessaging_Publish_Handler(srv interface{}, stream grpc.ServerStream) error { |
|||
return srv.(SeaweedMessagingServer).Publish(&seaweedMessagingPublishServer{stream}) |
|||
} |
|||
|
|||
type SeaweedMessaging_PublishServer interface { |
|||
Send(*PublishResponse) error |
|||
Recv() (*PublishRequest, error) |
|||
grpc.ServerStream |
|||
} |
|||
|
|||
type seaweedMessagingPublishServer struct { |
|||
grpc.ServerStream |
|||
} |
|||
|
|||
func (x *seaweedMessagingPublishServer) Send(m *PublishResponse) error { |
|||
return x.ServerStream.SendMsg(m) |
|||
} |
|||
|
|||
func (x *seaweedMessagingPublishServer) Recv() (*PublishRequest, error) { |
|||
m := new(PublishRequest) |
|||
if err := x.ServerStream.RecvMsg(m); err != nil { |
|||
return nil, err |
|||
} |
|||
return m, nil |
|||
} |
|||
|
|||
func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(ConfigureTopicRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(GetTopicConfigurationRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
var _SeaweedMessaging_serviceDesc = grpc.ServiceDesc{ |
|||
ServiceName: "messaging_pb.SeaweedMessaging", |
|||
HandlerType: (*SeaweedMessagingServer)(nil), |
|||
Methods: []grpc.MethodDesc{ |
|||
{ |
|||
MethodName: "ConfigureTopic", |
|||
Handler: _SeaweedMessaging_ConfigureTopic_Handler, |
|||
}, |
|||
{ |
|||
MethodName: "GetTopicConfiguration", |
|||
Handler: _SeaweedMessaging_GetTopicConfiguration_Handler, |
|||
}, |
|||
}, |
|||
Streams: []grpc.StreamDesc{ |
|||
{ |
|||
StreamName: "Subscribe", |
|||
Handler: _SeaweedMessaging_Subscribe_Handler, |
|||
ServerStreams: true, |
|||
ClientStreams: true, |
|||
}, |
|||
{ |
|||
StreamName: "Publish", |
|||
Handler: _SeaweedMessaging_Publish_Handler, |
|||
ServerStreams: true, |
|||
ClientStreams: true, |
|||
}, |
|||
}, |
|||
Metadata: "messaging.proto", |
|||
} |
|||
|
|||
func init() { proto.RegisterFile("messaging.proto", fileDescriptor0) } |
|||
|
|||
var fileDescriptor0 = []byte{ |
|||
// 829 bytes of a gzipped FileDescriptorProto
|
|||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0x4d, 0x8f, 0xe3, 0x44, |
|||
0x10, 0xdd, 0xb6, 0x93, 0xcc, 0xba, 0xf2, 0x49, 0x8b, 0x41, 0x91, 0x99, 0x01, 0xcb, 0x8b, 0x44, |
|||
0x60, 0x84, 0x35, 0x0a, 0x42, 0x1a, 0x56, 0x2b, 0xa1, 0x24, 0x84, 0x25, 0xd2, 0x84, 0x8d, 0x3a, |
|||
0xb9, 0xa2, 0xc8, 0x71, 0x7a, 0xb3, 0x56, 0x12, 0xdb, 0xb8, 0x3b, 0x8c, 0xe6, 0xc4, 0x01, 0xae, |
|||
0x9c, 0xf8, 0x27, 0x48, 0xfc, 0x00, 0xb8, 0xf3, 0x9f, 0x90, 0xdb, 0xdf, 0x49, 0x26, 0x13, 0x22, |
|||
0xcd, 0xcd, 0x2e, 0xbf, 0x7a, 0xaf, 0xba, 0xea, 0x95, 0x6d, 0xa8, 0xaf, 0x29, 0x63, 0xe6, 0xc2, |
|||
0x76, 0x16, 0x86, 0xe7, 0xbb, 0xdc, 0xc5, 0x95, 0x24, 0x30, 0xf5, 0x66, 0xfa, 0xaf, 0x05, 0x78, |
|||
0x6f, 0xbc, 0x99, 0x31, 0xcb, 0xb7, 0x67, 0xd4, 0x1f, 0x8a, 0x47, 0x14, 0x7f, 0x03, 0x05, 0xdb, |
|||
0xb1, 0x79, 0x13, 0x69, 0xa8, 0x55, 0x6e, 0x5f, 0x19, 0xd9, 0x14, 0x63, 0x07, 0x6e, 0x0c, 0x1c, |
|||
0x9b, 0x47, 0xd7, 0x44, 0x24, 0xe2, 0x57, 0x20, 0x9b, 0xd6, 0xb2, 0x29, 0x89, 0xfc, 0xcf, 0x1f, |
|||
0xcb, 0xef, 0x58, 0xcb, 0x38, 0x3d, 0x48, 0x53, 0xff, 0x96, 0xa0, 0x9c, 0xe1, 0xc4, 0x17, 0xa0, |
|||
0x38, 0xe6, 0x9a, 0x32, 0xcf, 0xb4, 0xa8, 0xa8, 0x49, 0x21, 0x69, 0x00, 0xbf, 0x0f, 0x45, 0xee, |
|||
0x7a, 0xb6, 0x25, 0xd4, 0x14, 0x12, 0xde, 0x04, 0x39, 0x9e, 0xe9, 0x73, 0x9b, 0xdb, 0xae, 0xd3, |
|||
0x94, 0x35, 0xd4, 0x2a, 0x92, 0x34, 0x80, 0xa7, 0x50, 0x65, 0xdc, 0xf4, 0xf9, 0xc8, 0x65, 0x21, |
|||
0xa2, 0xa0, 0xa1, 0x56, 0xad, 0xfd, 0xf5, 0xff, 0x38, 0xa9, 0x31, 0xce, 0x12, 0x90, 0x3c, 0x1f, |
|||
0xd6, 0xa0, 0xcc, 0xed, 0x35, 0x65, 0xdc, 0x5c, 0x7b, 0x3f, 0xb0, 0x66, 0x51, 0x43, 0x2d, 0x99, |
|||
0x64, 0x43, 0xf8, 0x05, 0x54, 0x59, 0xc2, 0x3f, 0xb5, 0xe7, 0xcd, 0x92, 0x28, 0xbf, 0x92, 0x06, |
|||
0x07, 0x73, 0xfd, 0x06, 0xaa, 0x39, 0x19, 0x0c, 0x50, 0xba, 0xed, 0x4c, 0xfa, 0xe3, 0x49, 0xe3, |
|||
0x19, 0xae, 0xc0, 0xf3, 0x7e, 0x87, 0xdc, 0x0e, 0x82, 0x3b, 0x84, 0xab, 0xa0, 0x4c, 0x06, 0xc3, |
|||
0xfe, 0x78, 0xd2, 0x19, 0x8e, 0x1a, 0x92, 0x7a, 0x05, 0x90, 0xb6, 0x15, 0x5f, 0x02, 0x84, 0x27, |
|||
0xa3, 0x81, 0x12, 0x12, 0xd5, 0x28, 0x51, 0x64, 0x30, 0xd7, 0xff, 0x45, 0x70, 0x16, 0x43, 0x35, |
|||
0x50, 0x92, 0x32, 0x43, 0x64, 0x57, 0xba, 0x46, 0x24, 0x0d, 0xe2, 0x06, 0xc8, 0x4b, 0x7a, 0x2f, |
|||
0xda, 0x5d, 0x21, 0xc1, 0x65, 0x30, 0x82, 0x9f, 0xcd, 0xd5, 0x86, 0x8a, 0x46, 0x57, 0x48, 0x78, |
|||
0x83, 0x5f, 0xc1, 0xd9, 0x3b, 0x6a, 0xce, 0xa9, 0xcf, 0x9a, 0x05, 0x4d, 0x6e, 0x95, 0xdb, 0x7a, |
|||
0xbe, 0xbd, 0x71, 0x23, 0xbf, 0x0f, 0x41, 0x7d, 0x87, 0xfb, 0xf7, 0x24, 0x4e, 0x51, 0x5f, 0x42, |
|||
0x25, 0xfb, 0x20, 0x56, 0x0d, 0xc7, 0x9f, 0x57, 0x95, 0x32, 0xaa, 0x2f, 0xa5, 0x1b, 0xa4, 0xff, |
|||
0x85, 0xa0, 0xda, 0xf5, 0xdd, 0x65, 0xea, 0xe8, 0xcf, 0xa0, 0x30, 0x37, 0xb9, 0x19, 0x39, 0xfa, |
|||
0x7c, 0x6f, 0x21, 0x44, 0x40, 0xf0, 0x6b, 0x78, 0xee, 0xd3, 0xb9, 0xed, 0x53, 0x8b, 0x47, 0x06, |
|||
0xde, 0x5a, 0x80, 0x1c, 0xb3, 0x41, 0x22, 0x6c, 0x4c, 0x92, 0x24, 0xab, 0xd7, 0x50, 0xdf, 0x7a, |
|||
0x18, 0xcc, 0xc1, 0xa1, 0x77, 0xd3, 0x99, 0x60, 0x48, 0xac, 0x4c, 0xef, 0x42, 0x4a, 0xfd, 0x4f, |
|||
0x19, 0x6a, 0xa3, 0xcd, 0x6c, 0x65, 0xb3, 0x77, 0x84, 0xfe, 0xb4, 0xa1, 0x2c, 0xd8, 0xa4, 0xec, |
|||
0x2a, 0xb6, 0xf2, 0x95, 0xe4, 0xb1, 0x7b, 0xf7, 0x30, 0x3c, 0xb6, 0x74, 0x44, 0xf6, 0xb7, 0x26, |
|||
0x37, 0x73, 0x9d, 0x50, 0xa7, 0x4f, 0xbc, 0x86, 0xea, 0x3f, 0x08, 0xca, 0x19, 0xd9, 0xec, 0x8c, |
|||
0x2b, 0x07, 0x66, 0x8c, 0xdf, 0xa4, 0xce, 0x92, 0x85, 0xb3, 0xbe, 0x3a, 0xf6, 0x64, 0x4f, 0x60, |
|||
0xb6, 0xdf, 0x25, 0xa8, 0x27, 0x82, 0xcc, 0x73, 0x1d, 0x46, 0x71, 0x0f, 0x4a, 0x96, 0xeb, 0xbc, |
|||
0xb5, 0x17, 0xfb, 0x5f, 0xa1, 0x5b, 0x70, 0xa3, 0x27, 0xb0, 0x71, 0xf3, 0xa3, 0x54, 0x3c, 0xd8, |
|||
0x31, 0xe2, 0x17, 0x87, 0x69, 0x1e, 0xb6, 0xe2, 0x0d, 0x54, 0x73, 0x1a, 0xf8, 0x53, 0xa8, 0x27, |
|||
0x63, 0x98, 0x5a, 0xee, 0xc6, 0x09, 0x1d, 0x56, 0x24, 0xb5, 0x24, 0xdc, 0x0b, 0xa2, 0x27, 0x98, |
|||
0xf8, 0x0f, 0x04, 0xe7, 0xa1, 0xd8, 0xc6, 0xa7, 0x93, 0xc0, 0x05, 0xb1, 0x97, 0x4f, 0x31, 0xd0, |
|||
0x77, 0x50, 0xb5, 0x22, 0x32, 0x33, 0x31, 0x51, 0xb9, 0xad, 0xe5, 0x3b, 0x21, 0x64, 0x7a, 0x59, |
|||
0x1c, 0xc9, 0xa7, 0xe9, 0x4d, 0xf8, 0x60, 0xbb, 0xa8, 0xb0, 0x6b, 0x3a, 0x81, 0x8b, 0xd7, 0x94, |
|||
0xef, 0x61, 0x38, 0xbd, 0x6a, 0x7d, 0x01, 0x97, 0x0f, 0x70, 0x46, 0x06, 0xd9, 0x39, 0x16, 0x3a, |
|||
0xed, 0x58, 0xbf, 0x00, 0xde, 0x05, 0x1d, 0x3d, 0x5d, 0xfc, 0x11, 0x80, 0xe5, 0xae, 0x56, 0xd4, |
|||
0x12, 0x35, 0x84, 0x47, 0xc8, 0x44, 0x82, 0xcf, 0x98, 0x4f, 0xbd, 0x95, 0x6d, 0xa5, 0xbd, 0x57, |
|||
0x48, 0x36, 0xd4, 0xfe, 0x4d, 0x86, 0xc6, 0x98, 0x9a, 0x77, 0x94, 0xce, 0x87, 0x71, 0xe9, 0xf8, |
|||
0x0d, 0x28, 0xc9, 0xb7, 0x13, 0x7f, 0xfc, 0xc8, 0x47, 0x55, 0xfd, 0xf0, 0xc0, 0xeb, 0x55, 0x7f, |
|||
0xd6, 0x42, 0xd7, 0x08, 0xdf, 0xc2, 0x59, 0x64, 0x76, 0x7c, 0x71, 0x68, 0xd5, 0xd5, 0xcb, 0x83, |
|||
0x1b, 0x12, 0xb1, 0xfd, 0x08, 0xb5, 0xbc, 0x17, 0xf0, 0x8b, 0x7c, 0xda, 0x5e, 0xfb, 0xaa, 0x9f, |
|||
0x1c, 0x06, 0xc5, 0x12, 0xd8, 0x87, 0xf3, 0xbd, 0xc3, 0xc7, 0x5b, 0x3f, 0x42, 0x87, 0x5c, 0xa7, |
|||
0x5e, 0x1d, 0x85, 0x8d, 0x35, 0xbb, 0x3a, 0x34, 0x58, 0x38, 0x85, 0xb7, 0xcc, 0xb0, 0x56, 0x36, |
|||
0x75, 0x78, 0xb7, 0x96, 0x0c, 0x64, 0x14, 0xfc, 0xf9, 0xcd, 0x4a, 0xe2, 0x07, 0xf0, 0xcb, 0xff, |
|||
0x02, 0x00, 0x00, 0xff, 0xff, 0xed, 0x8d, 0x77, 0xac, 0x13, 0x0a, 0x00, 0x00, |
|||
} |
@ -1,66 +0,0 @@ |
|||
syntax = "proto3"; |
|||
|
|||
package queue_pb; |
|||
|
|||
option java_package = "seaweedfs.client"; |
|||
option java_outer_classname = "QueueProto"; |
|||
|
|||
////////////////////////////////////////////////// |
|||
|
|||
service SeaweedQueue { |
|||
|
|||
rpc StreamWrite (stream WriteMessageRequest) returns (stream WriteMessageResponse) { |
|||
} |
|||
|
|||
rpc StreamRead (ReadMessageRequest) returns (stream ReadMessageResponse) { |
|||
} |
|||
|
|||
rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) { |
|||
} |
|||
|
|||
rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) { |
|||
} |
|||
|
|||
} |
|||
|
|||
////////////////////////////////////////////////// |
|||
|
|||
|
|||
message WriteMessageRequest { |
|||
string topic = 1; |
|||
int64 event_ns = 2; |
|||
bytes partition_key = 3; |
|||
bytes data = 4; |
|||
} |
|||
|
|||
message WriteMessageResponse { |
|||
string error = 1; |
|||
int64 ack_ns = 2; |
|||
} |
|||
|
|||
message ReadMessageRequest { |
|||
string topic = 1; |
|||
int64 start_ns = 2; |
|||
} |
|||
|
|||
message ReadMessageResponse { |
|||
string error = 1; |
|||
int64 event_ns = 2; |
|||
bytes data = 3; |
|||
} |
|||
|
|||
message ConfigureTopicRequest { |
|||
string topic = 1; |
|||
int64 ttl_seconds = 2; |
|||
int32 partition_count = 3; |
|||
} |
|||
message ConfigureTopicResponse { |
|||
string error = 1; |
|||
} |
|||
|
|||
message DeleteTopicRequest { |
|||
string topic = 1; |
|||
} |
|||
message DeleteTopicResponse { |
|||
string error = 1; |
|||
} |
@ -1,516 +0,0 @@ |
|||
// Code generated by protoc-gen-go.
|
|||
// source: queue.proto
|
|||
// DO NOT EDIT!
|
|||
|
|||
/* |
|||
Package queue_pb is a generated protocol buffer package. |
|||
|
|||
It is generated from these files: |
|||
queue.proto |
|||
|
|||
It has these top-level messages: |
|||
WriteMessageRequest |
|||
WriteMessageResponse |
|||
ReadMessageRequest |
|||
ReadMessageResponse |
|||
ConfigureTopicRequest |
|||
ConfigureTopicResponse |
|||
DeleteTopicRequest |
|||
DeleteTopicResponse |
|||
*/ |
|||
package queue_pb |
|||
|
|||
import proto "github.com/golang/protobuf/proto" |
|||
import fmt "fmt" |
|||
import math "math" |
|||
|
|||
import ( |
|||
context "golang.org/x/net/context" |
|||
grpc "google.golang.org/grpc" |
|||
) |
|||
|
|||
// Reference imports to suppress errors if they are not otherwise used.
|
|||
var _ = proto.Marshal |
|||
var _ = fmt.Errorf |
|||
var _ = math.Inf |
|||
|
|||
// This is a compile-time assertion to ensure that this generated file
|
|||
// is compatible with the proto package it is being compiled against.
|
|||
// A compilation error at this line likely means your copy of the
|
|||
// proto package needs to be updated.
|
|||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
|||
|
|||
type WriteMessageRequest struct { |
|||
Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` |
|||
EventNs int64 `protobuf:"varint,2,opt,name=event_ns,json=eventNs" json:"event_ns,omitempty"` |
|||
PartitionKey []byte `protobuf:"bytes,3,opt,name=partition_key,json=partitionKey,proto3" json:"partition_key,omitempty"` |
|||
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` |
|||
} |
|||
|
|||
func (m *WriteMessageRequest) Reset() { *m = WriteMessageRequest{} } |
|||
func (m *WriteMessageRequest) String() string { return proto.CompactTextString(m) } |
|||
func (*WriteMessageRequest) ProtoMessage() {} |
|||
func (*WriteMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } |
|||
|
|||
func (m *WriteMessageRequest) GetTopic() string { |
|||
if m != nil { |
|||
return m.Topic |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *WriteMessageRequest) GetEventNs() int64 { |
|||
if m != nil { |
|||
return m.EventNs |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *WriteMessageRequest) GetPartitionKey() []byte { |
|||
if m != nil { |
|||
return m.PartitionKey |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
func (m *WriteMessageRequest) GetData() []byte { |
|||
if m != nil { |
|||
return m.Data |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type WriteMessageResponse struct { |
|||
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` |
|||
AckNs int64 `protobuf:"varint,2,opt,name=ack_ns,json=ackNs" json:"ack_ns,omitempty"` |
|||
} |
|||
|
|||
func (m *WriteMessageResponse) Reset() { *m = WriteMessageResponse{} } |
|||
func (m *WriteMessageResponse) String() string { return proto.CompactTextString(m) } |
|||
func (*WriteMessageResponse) ProtoMessage() {} |
|||
func (*WriteMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } |
|||
|
|||
func (m *WriteMessageResponse) GetError() string { |
|||
if m != nil { |
|||
return m.Error |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *WriteMessageResponse) GetAckNs() int64 { |
|||
if m != nil { |
|||
return m.AckNs |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type ReadMessageRequest struct { |
|||
Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` |
|||
StartNs int64 `protobuf:"varint,2,opt,name=start_ns,json=startNs" json:"start_ns,omitempty"` |
|||
} |
|||
|
|||
func (m *ReadMessageRequest) Reset() { *m = ReadMessageRequest{} } |
|||
func (m *ReadMessageRequest) String() string { return proto.CompactTextString(m) } |
|||
func (*ReadMessageRequest) ProtoMessage() {} |
|||
func (*ReadMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } |
|||
|
|||
func (m *ReadMessageRequest) GetTopic() string { |
|||
if m != nil { |
|||
return m.Topic |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *ReadMessageRequest) GetStartNs() int64 { |
|||
if m != nil { |
|||
return m.StartNs |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type ReadMessageResponse struct { |
|||
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` |
|||
EventNs int64 `protobuf:"varint,2,opt,name=event_ns,json=eventNs" json:"event_ns,omitempty"` |
|||
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` |
|||
} |
|||
|
|||
func (m *ReadMessageResponse) Reset() { *m = ReadMessageResponse{} } |
|||
func (m *ReadMessageResponse) String() string { return proto.CompactTextString(m) } |
|||
func (*ReadMessageResponse) ProtoMessage() {} |
|||
func (*ReadMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } |
|||
|
|||
func (m *ReadMessageResponse) GetError() string { |
|||
if m != nil { |
|||
return m.Error |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *ReadMessageResponse) GetEventNs() int64 { |
|||
if m != nil { |
|||
return m.EventNs |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *ReadMessageResponse) GetData() []byte { |
|||
if m != nil { |
|||
return m.Data |
|||
} |
|||
return nil |
|||
} |
|||
|
|||
type ConfigureTopicRequest struct { |
|||
Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` |
|||
TtlSeconds int64 `protobuf:"varint,2,opt,name=ttl_seconds,json=ttlSeconds" json:"ttl_seconds,omitempty"` |
|||
PartitionCount int32 `protobuf:"varint,3,opt,name=partition_count,json=partitionCount" json:"partition_count,omitempty"` |
|||
} |
|||
|
|||
func (m *ConfigureTopicRequest) Reset() { *m = ConfigureTopicRequest{} } |
|||
func (m *ConfigureTopicRequest) String() string { return proto.CompactTextString(m) } |
|||
func (*ConfigureTopicRequest) ProtoMessage() {} |
|||
func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } |
|||
|
|||
func (m *ConfigureTopicRequest) GetTopic() string { |
|||
if m != nil { |
|||
return m.Topic |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func (m *ConfigureTopicRequest) GetTtlSeconds() int64 { |
|||
if m != nil { |
|||
return m.TtlSeconds |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
func (m *ConfigureTopicRequest) GetPartitionCount() int32 { |
|||
if m != nil { |
|||
return m.PartitionCount |
|||
} |
|||
return 0 |
|||
} |
|||
|
|||
type ConfigureTopicResponse struct { |
|||
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` |
|||
} |
|||
|
|||
func (m *ConfigureTopicResponse) Reset() { *m = ConfigureTopicResponse{} } |
|||
func (m *ConfigureTopicResponse) String() string { return proto.CompactTextString(m) } |
|||
func (*ConfigureTopicResponse) ProtoMessage() {} |
|||
func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } |
|||
|
|||
func (m *ConfigureTopicResponse) GetError() string { |
|||
if m != nil { |
|||
return m.Error |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
type DeleteTopicRequest struct { |
|||
Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` |
|||
} |
|||
|
|||
func (m *DeleteTopicRequest) Reset() { *m = DeleteTopicRequest{} } |
|||
func (m *DeleteTopicRequest) String() string { return proto.CompactTextString(m) } |
|||
func (*DeleteTopicRequest) ProtoMessage() {} |
|||
func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } |
|||
|
|||
func (m *DeleteTopicRequest) GetTopic() string { |
|||
if m != nil { |
|||
return m.Topic |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
type DeleteTopicResponse struct { |
|||
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` |
|||
} |
|||
|
|||
func (m *DeleteTopicResponse) Reset() { *m = DeleteTopicResponse{} } |
|||
func (m *DeleteTopicResponse) String() string { return proto.CompactTextString(m) } |
|||
func (*DeleteTopicResponse) ProtoMessage() {} |
|||
func (*DeleteTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } |
|||
|
|||
func (m *DeleteTopicResponse) GetError() string { |
|||
if m != nil { |
|||
return m.Error |
|||
} |
|||
return "" |
|||
} |
|||
|
|||
func init() { |
|||
proto.RegisterType((*WriteMessageRequest)(nil), "queue_pb.WriteMessageRequest") |
|||
proto.RegisterType((*WriteMessageResponse)(nil), "queue_pb.WriteMessageResponse") |
|||
proto.RegisterType((*ReadMessageRequest)(nil), "queue_pb.ReadMessageRequest") |
|||
proto.RegisterType((*ReadMessageResponse)(nil), "queue_pb.ReadMessageResponse") |
|||
proto.RegisterType((*ConfigureTopicRequest)(nil), "queue_pb.ConfigureTopicRequest") |
|||
proto.RegisterType((*ConfigureTopicResponse)(nil), "queue_pb.ConfigureTopicResponse") |
|||
proto.RegisterType((*DeleteTopicRequest)(nil), "queue_pb.DeleteTopicRequest") |
|||
proto.RegisterType((*DeleteTopicResponse)(nil), "queue_pb.DeleteTopicResponse") |
|||
} |
|||
|
|||
// Reference imports to suppress errors if they are not otherwise used.
|
|||
var _ context.Context |
|||
var _ grpc.ClientConn |
|||
|
|||
// This is a compile-time assertion to ensure that this generated file
|
|||
// is compatible with the grpc package it is being compiled against.
|
|||
const _ = grpc.SupportPackageIsVersion4 |
|||
|
|||
// Client API for SeaweedQueue service
|
|||
|
|||
type SeaweedQueueClient interface { |
|||
StreamWrite(ctx context.Context, opts ...grpc.CallOption) (SeaweedQueue_StreamWriteClient, error) |
|||
StreamRead(ctx context.Context, in *ReadMessageRequest, opts ...grpc.CallOption) (SeaweedQueue_StreamReadClient, error) |
|||
ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) |
|||
DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) |
|||
} |
|||
|
|||
type seaweedQueueClient struct { |
|||
cc *grpc.ClientConn |
|||
} |
|||
|
|||
func NewSeaweedQueueClient(cc *grpc.ClientConn) SeaweedQueueClient { |
|||
return &seaweedQueueClient{cc} |
|||
} |
|||
|
|||
func (c *seaweedQueueClient) StreamWrite(ctx context.Context, opts ...grpc.CallOption) (SeaweedQueue_StreamWriteClient, error) { |
|||
stream, err := grpc.NewClientStream(ctx, &_SeaweedQueue_serviceDesc.Streams[0], c.cc, "/queue_pb.SeaweedQueue/StreamWrite", opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
x := &seaweedQueueStreamWriteClient{stream} |
|||
return x, nil |
|||
} |
|||
|
|||
type SeaweedQueue_StreamWriteClient interface { |
|||
Send(*WriteMessageRequest) error |
|||
Recv() (*WriteMessageResponse, error) |
|||
grpc.ClientStream |
|||
} |
|||
|
|||
type seaweedQueueStreamWriteClient struct { |
|||
grpc.ClientStream |
|||
} |
|||
|
|||
func (x *seaweedQueueStreamWriteClient) Send(m *WriteMessageRequest) error { |
|||
return x.ClientStream.SendMsg(m) |
|||
} |
|||
|
|||
func (x *seaweedQueueStreamWriteClient) Recv() (*WriteMessageResponse, error) { |
|||
m := new(WriteMessageResponse) |
|||
if err := x.ClientStream.RecvMsg(m); err != nil { |
|||
return nil, err |
|||
} |
|||
return m, nil |
|||
} |
|||
|
|||
func (c *seaweedQueueClient) StreamRead(ctx context.Context, in *ReadMessageRequest, opts ...grpc.CallOption) (SeaweedQueue_StreamReadClient, error) { |
|||
stream, err := grpc.NewClientStream(ctx, &_SeaweedQueue_serviceDesc.Streams[1], c.cc, "/queue_pb.SeaweedQueue/StreamRead", opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
x := &seaweedQueueStreamReadClient{stream} |
|||
if err := x.ClientStream.SendMsg(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if err := x.ClientStream.CloseSend(); err != nil { |
|||
return nil, err |
|||
} |
|||
return x, nil |
|||
} |
|||
|
|||
type SeaweedQueue_StreamReadClient interface { |
|||
Recv() (*ReadMessageResponse, error) |
|||
grpc.ClientStream |
|||
} |
|||
|
|||
type seaweedQueueStreamReadClient struct { |
|||
grpc.ClientStream |
|||
} |
|||
|
|||
func (x *seaweedQueueStreamReadClient) Recv() (*ReadMessageResponse, error) { |
|||
m := new(ReadMessageResponse) |
|||
if err := x.ClientStream.RecvMsg(m); err != nil { |
|||
return nil, err |
|||
} |
|||
return m, nil |
|||
} |
|||
|
|||
func (c *seaweedQueueClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) { |
|||
out := new(ConfigureTopicResponse) |
|||
err := grpc.Invoke(ctx, "/queue_pb.SeaweedQueue/ConfigureTopic", in, out, c.cc, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
func (c *seaweedQueueClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) { |
|||
out := new(DeleteTopicResponse) |
|||
err := grpc.Invoke(ctx, "/queue_pb.SeaweedQueue/DeleteTopic", in, out, c.cc, opts...) |
|||
if err != nil { |
|||
return nil, err |
|||
} |
|||
return out, nil |
|||
} |
|||
|
|||
// Server API for SeaweedQueue service
|
|||
|
|||
type SeaweedQueueServer interface { |
|||
StreamWrite(SeaweedQueue_StreamWriteServer) error |
|||
StreamRead(*ReadMessageRequest, SeaweedQueue_StreamReadServer) error |
|||
ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) |
|||
DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) |
|||
} |
|||
|
|||
func RegisterSeaweedQueueServer(s *grpc.Server, srv SeaweedQueueServer) { |
|||
s.RegisterService(&_SeaweedQueue_serviceDesc, srv) |
|||
} |
|||
|
|||
func _SeaweedQueue_StreamWrite_Handler(srv interface{}, stream grpc.ServerStream) error { |
|||
return srv.(SeaweedQueueServer).StreamWrite(&seaweedQueueStreamWriteServer{stream}) |
|||
} |
|||
|
|||
type SeaweedQueue_StreamWriteServer interface { |
|||
Send(*WriteMessageResponse) error |
|||
Recv() (*WriteMessageRequest, error) |
|||
grpc.ServerStream |
|||
} |
|||
|
|||
type seaweedQueueStreamWriteServer struct { |
|||
grpc.ServerStream |
|||
} |
|||
|
|||
func (x *seaweedQueueStreamWriteServer) Send(m *WriteMessageResponse) error { |
|||
return x.ServerStream.SendMsg(m) |
|||
} |
|||
|
|||
func (x *seaweedQueueStreamWriteServer) Recv() (*WriteMessageRequest, error) { |
|||
m := new(WriteMessageRequest) |
|||
if err := x.ServerStream.RecvMsg(m); err != nil { |
|||
return nil, err |
|||
} |
|||
return m, nil |
|||
} |
|||
|
|||
func _SeaweedQueue_StreamRead_Handler(srv interface{}, stream grpc.ServerStream) error { |
|||
m := new(ReadMessageRequest) |
|||
if err := stream.RecvMsg(m); err != nil { |
|||
return err |
|||
} |
|||
return srv.(SeaweedQueueServer).StreamRead(m, &seaweedQueueStreamReadServer{stream}) |
|||
} |
|||
|
|||
type SeaweedQueue_StreamReadServer interface { |
|||
Send(*ReadMessageResponse) error |
|||
grpc.ServerStream |
|||
} |
|||
|
|||
type seaweedQueueStreamReadServer struct { |
|||
grpc.ServerStream |
|||
} |
|||
|
|||
func (x *seaweedQueueStreamReadServer) Send(m *ReadMessageResponse) error { |
|||
return x.ServerStream.SendMsg(m) |
|||
} |
|||
|
|||
func _SeaweedQueue_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(ConfigureTopicRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(SeaweedQueueServer).ConfigureTopic(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/queue_pb.SeaweedQueue/ConfigureTopic", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(SeaweedQueueServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
func _SeaweedQueue_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
|||
in := new(DeleteTopicRequest) |
|||
if err := dec(in); err != nil { |
|||
return nil, err |
|||
} |
|||
if interceptor == nil { |
|||
return srv.(SeaweedQueueServer).DeleteTopic(ctx, in) |
|||
} |
|||
info := &grpc.UnaryServerInfo{ |
|||
Server: srv, |
|||
FullMethod: "/queue_pb.SeaweedQueue/DeleteTopic", |
|||
} |
|||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
|||
return srv.(SeaweedQueueServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) |
|||
} |
|||
return interceptor(ctx, in, info, handler) |
|||
} |
|||
|
|||
var _SeaweedQueue_serviceDesc = grpc.ServiceDesc{ |
|||
ServiceName: "queue_pb.SeaweedQueue", |
|||
HandlerType: (*SeaweedQueueServer)(nil), |
|||
Methods: []grpc.MethodDesc{ |
|||
{ |
|||
MethodName: "ConfigureTopic", |
|||
Handler: _SeaweedQueue_ConfigureTopic_Handler, |
|||
}, |
|||
{ |
|||
MethodName: "DeleteTopic", |
|||
Handler: _SeaweedQueue_DeleteTopic_Handler, |
|||
}, |
|||
}, |
|||
Streams: []grpc.StreamDesc{ |
|||
{ |
|||
StreamName: "StreamWrite", |
|||
Handler: _SeaweedQueue_StreamWrite_Handler, |
|||
ServerStreams: true, |
|||
ClientStreams: true, |
|||
}, |
|||
{ |
|||
StreamName: "StreamRead", |
|||
Handler: _SeaweedQueue_StreamRead_Handler, |
|||
ServerStreams: true, |
|||
}, |
|||
}, |
|||
Metadata: "queue.proto", |
|||
} |
|||
|
|||
func init() { proto.RegisterFile("queue.proto", fileDescriptor0) } |
|||
|
|||
var fileDescriptor0 = []byte{ |
|||
// 429 bytes of a gzipped FileDescriptorProto
|
|||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, |
|||
0x10, 0xae, 0x9b, 0xa6, 0x94, 0x49, 0x28, 0x68, 0xd2, 0xa2, 0x10, 0xd1, 0x36, 0x5a, 0x0e, 0x44, |
|||
0x20, 0x59, 0x15, 0xbc, 0x41, 0x03, 0x27, 0x68, 0x04, 0x0e, 0x08, 0x89, 0x8b, 0xb5, 0xb5, 0xa7, |
|||
0x95, 0x15, 0xb3, 0xeb, 0xee, 0x8e, 0xa9, 0x7a, 0xe2, 0x2d, 0x79, 0x1e, 0xe4, 0xb5, 0x5c, 0xdb, |
|||
0x34, 0xb1, 0x7a, 0xf3, 0xcc, 0x78, 0xe7, 0xfb, 0xd9, 0x6f, 0x61, 0x70, 0x9d, 0x53, 0x4e, 0x7e, |
|||
0x66, 0x34, 0x6b, 0xdc, 0x73, 0x45, 0x98, 0x5d, 0x88, 0x3f, 0x30, 0xfa, 0x61, 0x12, 0xa6, 0x73, |
|||
0xb2, 0x56, 0x5e, 0x51, 0x40, 0xd7, 0x39, 0x59, 0xc6, 0x03, 0xe8, 0xb3, 0xce, 0x92, 0x68, 0xec, |
|||
0x4d, 0xbd, 0xd9, 0xe3, 0xa0, 0x2c, 0xf0, 0x05, 0xec, 0xd1, 0x6f, 0x52, 0x1c, 0x2a, 0x3b, 0xde, |
|||
0x9e, 0x7a, 0xb3, 0x5e, 0xf0, 0xc8, 0xd5, 0x0b, 0x8b, 0xaf, 0xe0, 0x49, 0x26, 0x0d, 0x27, 0x9c, |
|||
0x68, 0x15, 0xae, 0xe8, 0x76, 0xdc, 0x9b, 0x7a, 0xb3, 0x61, 0x30, 0xbc, 0x6b, 0x7e, 0xa2, 0x5b, |
|||
0x44, 0xd8, 0x89, 0x25, 0xcb, 0xf1, 0x8e, 0x9b, 0xb9, 0x6f, 0x31, 0x87, 0x83, 0x36, 0x01, 0x9b, |
|||
0x69, 0x65, 0xa9, 0x60, 0x40, 0xc6, 0x68, 0x53, 0x31, 0x70, 0x05, 0x1e, 0xc2, 0xae, 0x8c, 0x56, |
|||
0x35, 0x7e, 0x5f, 0x46, 0xab, 0x85, 0x15, 0x1f, 0x01, 0x03, 0x92, 0xf1, 0x43, 0x45, 0x58, 0x96, |
|||
0xa6, 0x29, 0xc2, 0xd5, 0x0b, 0x2b, 0x7e, 0xc2, 0xa8, 0xb5, 0xa6, 0x93, 0x4a, 0x87, 0x19, 0x95, |
|||
0xce, 0x5e, 0x43, 0xe7, 0x0d, 0x1c, 0xce, 0xb5, 0xba, 0x4c, 0xae, 0x72, 0x43, 0xdf, 0x0a, 0x22, |
|||
0xdd, 0x2c, 0x4f, 0x60, 0xc0, 0x9c, 0x86, 0x96, 0x22, 0xad, 0xe2, 0x0a, 0x00, 0x98, 0xd3, 0x65, |
|||
0xd9, 0xc1, 0xd7, 0xf0, 0xb4, 0x36, 0x3c, 0xd2, 0xb9, 0x62, 0x07, 0xd7, 0x0f, 0xf6, 0xef, 0xda, |
|||
0xf3, 0xa2, 0x2b, 0x7c, 0x78, 0xfe, 0x3f, 0x70, 0x97, 0x2e, 0xf1, 0x06, 0xf0, 0x03, 0xa5, 0xc4, |
|||
0x0f, 0x60, 0x29, 0xde, 0xc2, 0xa8, 0xf5, 0x6f, 0xd7, 0xe2, 0x77, 0x7f, 0xb7, 0x61, 0xb8, 0x24, |
|||
0x79, 0x43, 0x14, 0x7f, 0x2d, 0xe2, 0x87, 0x01, 0x0c, 0x96, 0x6c, 0x48, 0xfe, 0x72, 0x01, 0xc0, |
|||
0x23, 0xbf, 0x4a, 0xa5, 0xbf, 0x26, 0x92, 0x93, 0xe3, 0x4d, 0xe3, 0x12, 0x54, 0x6c, 0xcd, 0xbc, |
|||
0x53, 0x0f, 0xcf, 0x01, 0xca, 0x9d, 0xc5, 0x45, 0xe2, 0xcb, 0xfa, 0xcc, 0xfd, 0x7c, 0x4c, 0x8e, |
|||
0x36, 0x4c, 0xab, 0x85, 0xa7, 0x1e, 0x7e, 0x87, 0xfd, 0xb6, 0x79, 0x78, 0x52, 0x1f, 0x5a, 0x7b, |
|||
0x9f, 0x93, 0xe9, 0xe6, 0x1f, 0xaa, 0xc5, 0xf8, 0x19, 0x06, 0x0d, 0xdf, 0x9a, 0x34, 0xef, 0x5b, |
|||
0xdf, 0xa4, 0xb9, 0xc6, 0x6c, 0xb1, 0x75, 0x76, 0x0c, 0xcf, 0x6c, 0xe9, 0xeb, 0xa5, 0xf5, 0xa3, |
|||
0x34, 0x21, 0xc5, 0x67, 0xe0, 0x2c, 0xfe, 0x52, 0xbc, 0xf6, 0x8b, 0x5d, 0xf7, 0xe8, 0xdf, 0xff, |
|||
0x0b, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x3e, 0x14, 0xd8, 0x03, 0x04, 0x00, 0x00, |
|||
} |
@ -0,0 +1,5 @@ |
|||
package pb |
|||
|
|||
const ( |
|||
AdminShellClient = "shell" |
|||
) |
@ -1,23 +0,0 @@ |
|||
package weed_server |
|||
|
|||
import ( |
|||
"context" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/pb/queue_pb" |
|||
) |
|||
|
|||
func (broker *MessageBroker) ConfigureTopic(context.Context, *queue_pb.ConfigureTopicRequest) (*queue_pb.ConfigureTopicResponse, error) { |
|||
panic("implement me") |
|||
} |
|||
|
|||
func (broker *MessageBroker) DeleteTopic(context.Context, *queue_pb.DeleteTopicRequest) (*queue_pb.DeleteTopicResponse, error) { |
|||
panic("implement me") |
|||
} |
|||
|
|||
func (broker *MessageBroker) StreamWrite(queue_pb.SeaweedQueue_StreamWriteServer) error { |
|||
panic("implement me") |
|||
} |
|||
|
|||
func (broker *MessageBroker) StreamRead(*queue_pb.ReadMessageRequest, queue_pb.SeaweedQueue_StreamReadServer) error { |
|||
panic("implement me") |
|||
} |
@ -0,0 +1,113 @@ |
|||
package chunk_cache |
|||
|
|||
import ( |
|||
"sync" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/needle" |
|||
) |
|||
|
|||
const ( |
|||
memCacheSizeLimit = 1024 * 1024 |
|||
onDiskCacheSizeLimit0 = memCacheSizeLimit |
|||
onDiskCacheSizeLimit1 = 4 * memCacheSizeLimit |
|||
) |
|||
|
|||
// a global cache for recently accessed file chunks
|
|||
type ChunkCache struct { |
|||
memCache *ChunkCacheInMemory |
|||
diskCaches []*OnDiskCacheLayer |
|||
sync.RWMutex |
|||
} |
|||
|
|||
func NewChunkCache(maxEntries int64, dir string, diskSizeMB int64) *ChunkCache { |
|||
|
|||
c := &ChunkCache{ |
|||
memCache: NewChunkCacheInMemory(maxEntries), |
|||
} |
|||
c.diskCaches = make([]*OnDiskCacheLayer, 3) |
|||
c.diskCaches[0] = NewOnDiskCacheLayer(dir, "c0_1", diskSizeMB/4, 4) |
|||
c.diskCaches[1] = NewOnDiskCacheLayer(dir, "c1_4", diskSizeMB/4, 4) |
|||
c.diskCaches[2] = NewOnDiskCacheLayer(dir, "cache", diskSizeMB/2, 4) |
|||
|
|||
return c |
|||
} |
|||
|
|||
func (c *ChunkCache) GetChunk(fileId string, chunkSize uint64) (data []byte) { |
|||
if c == nil { |
|||
return |
|||
} |
|||
|
|||
c.RLock() |
|||
defer c.RUnlock() |
|||
|
|||
return c.doGetChunk(fileId, chunkSize) |
|||
} |
|||
|
|||
func (c *ChunkCache) doGetChunk(fileId string, chunkSize uint64) (data []byte) { |
|||
|
|||
if chunkSize < memCacheSizeLimit { |
|||
if data = c.memCache.GetChunk(fileId); data != nil { |
|||
return data |
|||
} |
|||
} |
|||
|
|||
fid, err := needle.ParseFileIdFromString(fileId) |
|||
if err != nil { |
|||
glog.Errorf("failed to parse file id %s", fileId) |
|||
return nil |
|||
} |
|||
|
|||
for _, diskCache := range c.diskCaches { |
|||
data := diskCache.getChunk(fid.Key) |
|||
if len(data) != 0 { |
|||
return data |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
|
|||
} |
|||
|
|||
func (c *ChunkCache) SetChunk(fileId string, data []byte) { |
|||
if c == nil { |
|||
return |
|||
} |
|||
c.Lock() |
|||
defer c.Unlock() |
|||
|
|||
c.doSetChunk(fileId, data) |
|||
} |
|||
|
|||
func (c *ChunkCache) doSetChunk(fileId string, data []byte) { |
|||
|
|||
if len(data) < memCacheSizeLimit { |
|||
c.memCache.SetChunk(fileId, data) |
|||
} |
|||
|
|||
fid, err := needle.ParseFileIdFromString(fileId) |
|||
if err != nil { |
|||
glog.Errorf("failed to parse file id %s", fileId) |
|||
return |
|||
} |
|||
|
|||
if len(data) < onDiskCacheSizeLimit0 { |
|||
c.diskCaches[0].setChunk(fid.Key, data) |
|||
} else if len(data) < onDiskCacheSizeLimit1 { |
|||
c.diskCaches[1].setChunk(fid.Key, data) |
|||
} else { |
|||
c.diskCaches[2].setChunk(fid.Key, data) |
|||
} |
|||
|
|||
} |
|||
|
|||
func (c *ChunkCache) Shutdown() { |
|||
if c == nil { |
|||
return |
|||
} |
|||
c.Lock() |
|||
defer c.Unlock() |
|||
for _, diskCache := range c.diskCaches { |
|||
diskCache.shutdown() |
|||
} |
|||
} |
@ -0,0 +1,145 @@ |
|||
package chunk_cache |
|||
|
|||
import ( |
|||
"fmt" |
|||
"os" |
|||
"time" |
|||
|
|||
"github.com/syndtr/goleveldb/leveldb/opt" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/storage" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/backend" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/types" |
|||
"github.com/chrislusf/seaweedfs/weed/util" |
|||
) |
|||
|
|||
// This implements an on disk cache
|
|||
// The entries are an FIFO with a size limit
|
|||
|
|||
type ChunkCacheVolume struct { |
|||
DataBackend backend.BackendStorageFile |
|||
nm storage.NeedleMapper |
|||
fileName string |
|||
smallBuffer []byte |
|||
sizeLimit int64 |
|||
lastModTime time.Time |
|||
fileSize int64 |
|||
} |
|||
|
|||
func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCacheVolume, error) { |
|||
|
|||
v := &ChunkCacheVolume{ |
|||
smallBuffer: make([]byte, types.NeedlePaddingSize), |
|||
fileName: fileName, |
|||
sizeLimit: preallocate, |
|||
} |
|||
|
|||
var err error |
|||
|
|||
if exists, canRead, canWrite, modTime, fileSize := util.CheckFile(v.fileName + ".dat"); exists { |
|||
if !canRead { |
|||
return nil, fmt.Errorf("cannot read cache file %s.dat", v.fileName) |
|||
} |
|||
if !canWrite { |
|||
return nil, fmt.Errorf("cannot write cache file %s.dat", v.fileName) |
|||
} |
|||
if dataFile, err := os.OpenFile(v.fileName+".dat", os.O_RDWR|os.O_CREATE, 0644); err != nil { |
|||
return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err) |
|||
} else { |
|||
v.DataBackend = backend.NewDiskFile(dataFile) |
|||
v.lastModTime = modTime |
|||
v.fileSize = fileSize |
|||
} |
|||
} else { |
|||
if v.DataBackend, err = backend.CreateVolumeFile(v.fileName+".dat", preallocate, 0); err != nil { |
|||
return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err) |
|||
} |
|||
v.lastModTime = time.Now() |
|||
} |
|||
|
|||
var indexFile *os.File |
|||
if indexFile, err = os.OpenFile(v.fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil { |
|||
return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err) |
|||
} |
|||
|
|||
glog.V(0).Infoln("loading leveldb", v.fileName+".ldb") |
|||
opts := &opt.Options{ |
|||
BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
|
|||
WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
|
|||
CompactionTableSizeMultiplier: 10, // default value is 1
|
|||
} |
|||
if v.nm, err = storage.NewLevelDbNeedleMap(v.fileName+".ldb", indexFile, opts); err != nil { |
|||
return nil, fmt.Errorf("loading leveldb %s error: %v", v.fileName+".ldb", err) |
|||
} |
|||
|
|||
return v, nil |
|||
|
|||
} |
|||
|
|||
func (v *ChunkCacheVolume) Shutdown() { |
|||
if v.DataBackend != nil { |
|||
v.DataBackend.Close() |
|||
v.DataBackend = nil |
|||
} |
|||
if v.nm != nil { |
|||
v.nm.Close() |
|||
v.nm = nil |
|||
} |
|||
} |
|||
|
|||
func (v *ChunkCacheVolume) destroy() { |
|||
v.Shutdown() |
|||
os.Remove(v.fileName + ".dat") |
|||
os.Remove(v.fileName + ".idx") |
|||
os.RemoveAll(v.fileName + ".ldb") |
|||
} |
|||
|
|||
func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) { |
|||
v.destroy() |
|||
return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit) |
|||
} |
|||
|
|||
func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) { |
|||
|
|||
nv, ok := v.nm.Get(key) |
|||
if !ok { |
|||
return nil, storage.ErrorNotFound |
|||
} |
|||
data := make([]byte, nv.Size) |
|||
if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToAcutalOffset()); readErr != nil { |
|||
return nil, fmt.Errorf("read %s.dat [%d,%d): %v", |
|||
v.fileName, nv.Offset.ToAcutalOffset(), nv.Offset.ToAcutalOffset()+int64(nv.Size), readErr) |
|||
} else { |
|||
if readSize != int(nv.Size) { |
|||
return nil, fmt.Errorf("read %d, expected %d", readSize, nv.Size) |
|||
} |
|||
} |
|||
|
|||
return data, nil |
|||
} |
|||
|
|||
func (v *ChunkCacheVolume) WriteNeedle(key types.NeedleId, data []byte) error { |
|||
|
|||
offset := v.fileSize |
|||
|
|||
written, err := v.DataBackend.WriteAt(data, offset) |
|||
if err != nil { |
|||
return err |
|||
} else if written != len(data) { |
|||
return fmt.Errorf("partial written %d, expected %d", written, len(data)) |
|||
} |
|||
|
|||
v.fileSize += int64(written) |
|||
extraSize := written % types.NeedlePaddingSize |
|||
if extraSize != 0 { |
|||
v.DataBackend.WriteAt(v.smallBuffer[:types.NeedlePaddingSize-extraSize], offset+int64(written)) |
|||
v.fileSize += int64(types.NeedlePaddingSize - extraSize) |
|||
} |
|||
|
|||
if err := v.nm.Put(key, types.ToOffset(offset), uint32(len(data))); err != nil { |
|||
glog.V(4).Infof("failed to save in needle map %d: %v", key, err) |
|||
} |
|||
|
|||
return nil |
|||
} |
@ -0,0 +1,59 @@ |
|||
package chunk_cache |
|||
|
|||
import ( |
|||
"bytes" |
|||
"fmt" |
|||
"io/ioutil" |
|||
"math/rand" |
|||
"os" |
|||
"testing" |
|||
) |
|||
|
|||
func TestOnDisk(t *testing.T) { |
|||
|
|||
tmpDir, _ := ioutil.TempDir("", "c") |
|||
defer os.RemoveAll(tmpDir) |
|||
|
|||
totalDiskSizeMb := int64(32) |
|||
|
|||
cache := NewChunkCache(0, tmpDir, totalDiskSizeMb) |
|||
|
|||
writeCount := 5 |
|||
type test_data struct { |
|||
data []byte |
|||
fileId string |
|||
size uint64 |
|||
} |
|||
testData := make([]*test_data, writeCount) |
|||
for i := 0; i < writeCount; i++ { |
|||
buff := make([]byte, 1024*1024) |
|||
rand.Read(buff) |
|||
testData[i] = &test_data{ |
|||
data: buff, |
|||
fileId: fmt.Sprintf("1,%daabbccdd", i+1), |
|||
size: uint64(len(buff)), |
|||
} |
|||
cache.SetChunk(testData[i].fileId, testData[i].data) |
|||
} |
|||
|
|||
for i := 0; i < writeCount; i++ { |
|||
data := cache.GetChunk(testData[i].fileId, testData[i].size) |
|||
if bytes.Compare(data, testData[i].data) != 0 { |
|||
t.Errorf("failed to write to and read from cache: %d", i) |
|||
} |
|||
} |
|||
|
|||
cache.Shutdown() |
|||
|
|||
cache = NewChunkCache(0, tmpDir, totalDiskSizeMb) |
|||
|
|||
for i := 0; i < writeCount; i++ { |
|||
data := cache.GetChunk(testData[i].fileId, testData[i].size) |
|||
if bytes.Compare(data, testData[i].data) != 0 { |
|||
t.Errorf("failed to write to and read from cache: %d", i) |
|||
} |
|||
} |
|||
|
|||
cache.Shutdown() |
|||
|
|||
} |
@ -0,0 +1,89 @@ |
|||
package chunk_cache |
|||
|
|||
import ( |
|||
"fmt" |
|||
"path" |
|||
"sort" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
"github.com/chrislusf/seaweedfs/weed/storage" |
|||
"github.com/chrislusf/seaweedfs/weed/storage/types" |
|||
) |
|||
|
|||
type OnDiskCacheLayer struct { |
|||
diskCaches []*ChunkCacheVolume |
|||
} |
|||
|
|||
func NewOnDiskCacheLayer(dir, namePrefix string, diskSizeMB int64, segmentCount int) *OnDiskCacheLayer{ |
|||
|
|||
volumeCount, volumeSize := int(diskSizeMB/30000), int64(30000) |
|||
if volumeCount < segmentCount { |
|||
volumeCount, volumeSize = segmentCount, diskSizeMB/int64(segmentCount) |
|||
} |
|||
|
|||
c := &OnDiskCacheLayer{} |
|||
for i := 0; i < volumeCount; i++ { |
|||
fileName := path.Join(dir, fmt.Sprintf("%s_%d", namePrefix, i)) |
|||
diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize*1024*1024) |
|||
if err != nil { |
|||
glog.Errorf("failed to add cache %s : %v", fileName, err) |
|||
} else { |
|||
c.diskCaches = append(c.diskCaches, diskCache) |
|||
} |
|||
} |
|||
|
|||
// keep newest cache to the front
|
|||
sort.Slice(c.diskCaches, func(i, j int) bool { |
|||
return c.diskCaches[i].lastModTime.After(c.diskCaches[j].lastModTime) |
|||
}) |
|||
|
|||
return c |
|||
} |
|||
|
|||
func (c *OnDiskCacheLayer) setChunk(needleId types.NeedleId, data []byte) { |
|||
|
|||
if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit { |
|||
t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset() |
|||
if resetErr != nil { |
|||
glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName) |
|||
return |
|||
} |
|||
for i := len(c.diskCaches) - 1; i > 0; i-- { |
|||
c.diskCaches[i] = c.diskCaches[i-1] |
|||
} |
|||
c.diskCaches[0] = t |
|||
} |
|||
|
|||
c.diskCaches[0].WriteNeedle(needleId, data) |
|||
|
|||
} |
|||
|
|||
func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte){ |
|||
|
|||
var err error |
|||
|
|||
for _, diskCache := range c.diskCaches { |
|||
data, err = diskCache.GetNeedle(needleId) |
|||
if err == storage.ErrorNotFound { |
|||
continue |
|||
} |
|||
if err != nil { |
|||
glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId) |
|||
continue |
|||
} |
|||
if len(data) != 0 { |
|||
return |
|||
} |
|||
} |
|||
|
|||
return nil |
|||
|
|||
} |
|||
|
|||
func (c *OnDiskCacheLayer) shutdown(){ |
|||
|
|||
for _, diskCache := range c.diskCaches { |
|||
diskCache.Shutdown() |
|||
} |
|||
|
|||
} |
@ -0,0 +1,40 @@ |
|||
package log_buffer |
|||
|
|||
import "time" |
|||
|
|||
type MemBuffer struct { |
|||
buf []byte |
|||
startTime time.Time |
|||
stopTime time.Time |
|||
} |
|||
|
|||
type SealedBuffers struct { |
|||
buffers []*MemBuffer |
|||
} |
|||
|
|||
func newSealedBuffers(size int) *SealedBuffers { |
|||
sbs := &SealedBuffers{} |
|||
|
|||
sbs.buffers = make([]*MemBuffer, size) |
|||
for i := 0; i < size; i++ { |
|||
sbs.buffers[i] = &MemBuffer{ |
|||
buf: make([]byte, BufferSize), |
|||
} |
|||
} |
|||
|
|||
return sbs |
|||
} |
|||
|
|||
func (sbs *SealedBuffers) SealBuffer(startTime, stopTime time.Time, buf []byte) (newBuf []byte) { |
|||
oldMemBuffer := sbs.buffers[0] |
|||
size := len(sbs.buffers) |
|||
for i := 0; i < size-1; i++ { |
|||
sbs.buffers[i].buf = sbs.buffers[i+1].buf |
|||
sbs.buffers[i].startTime = sbs.buffers[i+1].startTime |
|||
sbs.buffers[i].stopTime = sbs.buffers[i+1].stopTime |
|||
} |
|||
sbs.buffers[size-1].buf = buf |
|||
sbs.buffers[size-1].startTime = startTime |
|||
sbs.buffers[size-1].stopTime = stopTime |
|||
return oldMemBuffer.buf |
|||
} |
@ -0,0 +1,25 @@ |
|||
package util |
|||
|
|||
import ( |
|||
"net" |
|||
|
|||
"github.com/chrislusf/seaweedfs/weed/glog" |
|||
) |
|||
|
|||
func DetectedHostAddress() string { |
|||
addrs, err := net.InterfaceAddrs() |
|||
if err != nil { |
|||
glog.V(0).Infof("failed to detect ip address: %v", err) |
|||
return "" |
|||
} |
|||
|
|||
for _, a := range addrs { |
|||
if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { |
|||
if ipnet.IP.To4() != nil { |
|||
return ipnet.IP.String() |
|||
} |
|||
} |
|||
} |
|||
|
|||
return "localhost" |
|||
} |
Some files were not shown because too many files changed in this diff
Write
Preview
Loading…
Cancel
Save
Reference in new issue