Browse Source

Merge pull request #16 from chrislusf/master

sync
pull/1480/head
hilimd 4 years ago
committed by GitHub
parent
commit
082500151a
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 5
      .travis.yml
  2. 2
      k8s/seaweedfs/Chart.yaml
  3. 2
      k8s/seaweedfs/values.yaml
  4. 2
      other/java/client/src/main/proto/filer.proto
  5. 2
      weed/command/master.go
  6. 8
      weed/command/s3.go
  7. 22
      weed/filer/mongodb/mongodb_store.go
  8. 2
      weed/pb/filer.proto
  9. 366
      weed/pb/filer_pb/filer.pb.go
  10. 27
      weed/s3api/auth_credentials.go
  11. 65
      weed/s3api/auth_signature_v2.go
  12. 166
      weed/s3api/auth_signature_v4.go
  13. 15
      weed/s3api/auto_signature_v4_test.go
  14. 25
      weed/s3api/chunked_reader_v4.go
  15. 48
      weed/s3api/filer_multipart.go
  16. 321
      weed/s3api/policy/post-policy.go
  17. 378
      weed/s3api/policy/post-policy_test.go
  18. 276
      weed/s3api/policy/postpolicyform.go
  19. 106
      weed/s3api/policy/postpolicyform_test.go
  20. 9
      weed/s3api/s3api_bucket_handlers.go
  21. 11
      weed/s3api/s3api_handlers.go
  22. 21
      weed/s3api/s3api_object_copy_handlers.go
  23. 37
      weed/s3api/s3api_object_handlers.go
  24. 241
      weed/s3api/s3api_object_handlers_postpolicy.go
  25. 33
      weed/s3api/s3api_object_multipart_handlers.go
  26. 13
      weed/s3api/s3api_objects_list_handlers.go
  27. 43
      weed/s3api/s3api_server.go
  28. 61
      weed/s3api/s3err/s3-error.go
  29. 54
      weed/s3api/s3err/s3api_errors.go
  30. 21
      weed/s3api/stats.go
  31. 9
      weed/security/tls.go
  32. 17
      weed/server/filer_grpc_server.go
  33. 1
      weed/server/filer_grpc_server_rename.go
  34. 22
      weed/server/filer_server.go
  35. 3
      weed/server/filer_server_handlers.go
  36. 2
      weed/server/master_grpc_server.go
  37. 30
      weed/server/volume_grpc_client_to_master.go
  38. 7
      weed/server/volume_server.go
  39. 3
      weed/server/volume_server_handlers.go
  40. 2
      weed/server/volume_server_handlers_admin.go
  41. 1
      weed/server/volume_server_handlers_ui.go
  42. 21
      weed/shell/command_collection_delete.go
  43. 2
      weed/shell/command_volume_configure_replication.go
  44. 20
      weed/shell/command_volume_copy.go
  45. 20
      weed/shell/command_volume_delete.go
  46. 10
      weed/shell/command_volume_fix_replication.go
  47. 20
      weed/shell/command_volume_mount.go
  48. 20
      weed/shell/command_volume_move.go
  49. 20
      weed/shell/command_volume_unmount.go
  50. 21
      weed/stats/metrics.go
  51. 20
      weed/topology/data_node.go
  52. 2
      weed/util/constants.go

5
.travis.yml

@ -1,9 +1,8 @@
sudo: false
language: go
go:
- 1.12.x
- 1.13.x
- 1.14.x
- 1.15.x
before_install:
- export PATH=/home/travis/gopath/bin:$PATH
@ -45,4 +44,4 @@ deploy:
on:
tags: true
repo: chrislusf/seaweedfs
go: 1.14.x
go: 1.15.x

2
k8s/seaweedfs/Chart.yaml

@ -1,4 +1,4 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
version: 1.99
version: 2.00

2
k8s/seaweedfs/values.yaml

@ -4,7 +4,7 @@ global:
registry: ""
repository: ""
imageName: chrislusf/seaweedfs
imageTag: "1.99"
imageTag: "2.00"
imagePullPolicy: IfNotPresent
imagePullSecrets: imagepullsecret
restartPolicy: Always

2
other/java/client/src/main/proto/filer.proto

@ -273,6 +273,8 @@ message GetFilerConfigurationResponse {
string dir_buckets = 5;
bool cipher = 7;
int32 signature = 8;
string metrics_address = 9;
int32 metrics_interval_sec = 10;
}
message SubscribeMetadataRequest {

2
weed/command/master.go

@ -57,7 +57,7 @@ func init() {
m.garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces")
m.whiteList = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
m.disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address")
m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address <host>:<port>")
m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds")
}

8
weed/command/s3.go

@ -14,6 +14,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/s3api"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -128,6 +129,10 @@ func (s3opt *S3Options) startS3Server() bool {
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
// metrics read from the filer
var metricsAddress string
var metricsIntervalSec int
for {
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
@ -135,6 +140,7 @@ func (s3opt *S3Options) startS3Server() bool {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
}
filerBucketsPath = resp.DirBuckets
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
return nil
})
@ -147,6 +153,8 @@ func (s3opt *S3Options) startS3Server() bool {
}
}
go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), stats_collect.S3Gather, metricsAddress, metricsIntervalSec)
router := mux.NewRouter().SkipClean(true)
_, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{

22
weed/filer/mongodb/mongodb_store.go

@ -95,6 +95,12 @@ func (store *MongodbStore) RollbackTransaction(ctx context.Context) error {
func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.UpdateEntry(ctx, entry)
}
func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
dir, name := entry.FullPath.DirAndName()
meta, err := entry.EncodeAttributesAndChunks()
if err != nil {
@ -107,23 +113,19 @@ func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer.Entry)
c := store.connect.Database(store.database).Collection(store.collectionName)
_, err = c.InsertOne(ctx, Model{
Directory: dir,
Name: name,
Meta: meta,
})
opts := options.Update().SetUpsert(true)
filter := bson.D{{"directory", dir}, {"name", name}}
update := bson.D{{"$set", bson.D{{"meta", meta}}}}
_, err = c.UpdateOne(ctx, filter, update, opts)
if err != nil {
return fmt.Errorf("InsertEntry %st: %v", entry.FullPath, err)
return fmt.Errorf("UpdateEntry %s: %v", entry.FullPath, err)
}
return nil
}
func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {
return store.InsertEntry(ctx, entry)
}
func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {
dir, name := fullpath.DirAndName()

2
weed/pb/filer.proto

@ -273,6 +273,8 @@ message GetFilerConfigurationResponse {
string dir_buckets = 5;
bool cipher = 7;
int32 signature = 8;
string metrics_address = 9;
int32 metrics_interval_sec = 10;
}
message SubscribeMetadataRequest {

366
weed/pb/filer_pb/filer.pb.go

@ -2118,13 +2118,15 @@ type GetFilerConfigurationResponse struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Masters []string `protobuf:"bytes,1,rep,name=masters,proto3" json:"masters,omitempty"`
Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"`
Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb,proto3" json:"max_mb,omitempty"`
DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets,proto3" json:"dir_buckets,omitempty"`
Cipher bool `protobuf:"varint,7,opt,name=cipher,proto3" json:"cipher,omitempty"`
Signature int32 `protobuf:"varint,8,opt,name=signature,proto3" json:"signature,omitempty"`
Masters []string `protobuf:"bytes,1,rep,name=masters,proto3" json:"masters,omitempty"`
Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"`
Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"`
MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb,proto3" json:"max_mb,omitempty"`
DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets,proto3" json:"dir_buckets,omitempty"`
Cipher bool `protobuf:"varint,7,opt,name=cipher,proto3" json:"cipher,omitempty"`
Signature int32 `protobuf:"varint,8,opt,name=signature,proto3" json:"signature,omitempty"`
MetricsAddress string `protobuf:"bytes,9,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"`
MetricsIntervalSec int32 `protobuf:"varint,10,opt,name=metrics_interval_sec,json=metricsIntervalSec,proto3" json:"metrics_interval_sec,omitempty"`
}
func (x *GetFilerConfigurationResponse) Reset() {
@ -2208,6 +2210,20 @@ func (x *GetFilerConfigurationResponse) GetSignature() int32 {
return 0
}
func (x *GetFilerConfigurationResponse) GetMetricsAddress() string {
if x != nil {
return x.MetricsAddress
}
return ""
}
func (x *GetFilerConfigurationResponse) GetMetricsIntervalSec() int32 {
if x != nil {
return x.MetricsIntervalSec
}
return 0
}
type SubscribeMetadataRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -3142,7 +3158,7 @@ var file_filer_proto_rawDesc = []byte{
0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f,
0x75, 0x6e, 0x74, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x22, 0xe9, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72,
0x65, 0x73, 0x74, 0x22, 0xc4, 0x02, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73,
0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12,
@ -3156,171 +3172,177 @@ var file_filer_proto_rawDesc = []byte{
0x69, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x69, 0x70,
0x68, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x69, 0x70, 0x68, 0x65,
0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x08,
0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22,
0x95, 0x01, 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74,
0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b,
0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a,
0x0b, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x19,
0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67,
0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69,
0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73,
0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
0x6f, 0x72, 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x74,
0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74,
0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x76,
0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04,
0x74, 0x73, 0x4e, 0x73, 0x22, 0x61, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
0x04, 0x74, 0x73, 0x4e, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28,
0x05, 0x52, 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x48,
0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43,
0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74,
0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20,
0x03, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0x17,
0x0a, 0x15, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x74,
0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x14, 0x4c,
0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66,
0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72,
0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
0x1a, 0x58, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e,
0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x72, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x4b, 0x76,
0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x3b, 0x0a, 0x0d,
0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x36, 0x0a, 0x0c, 0x4b, 0x76, 0x50,
0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x22, 0x25, 0x0a, 0x0d, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x32, 0x85, 0x0c, 0x0a, 0x0c, 0x53, 0x65, 0x61,
0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f, 0x6f,
0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f,
0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74,
0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65,
0x73, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73,
0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45,
0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65,
0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74,
0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x12, 0x4c, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52,
0x0a, 0x0d, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e,
0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e,
0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c,
0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x12, 0x5e, 0x0a, 0x11, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d,
0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x12, 0x4f, 0x0a, 0x0c, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69,
0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67,
0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f,
0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b,
0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c,
0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
0x49, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e,
0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74,
0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x15, 0x47, 0x65,
0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47,
0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x66, 0x69,
0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72,
0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69,
0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63,
0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x65, 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73,
0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75,
0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64,
0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12,
0x56, 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64,
0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70,
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70,
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x63, 0x61, 0x74,
0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12,
0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d, 0x65, 0x74, 0x72,
0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63,
0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49,
0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x22, 0x95, 0x01, 0x0a, 0x18, 0x53,
0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x74, 0x68,
0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70,
0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e,
0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x69, 0x6e,
0x63, 0x65, 0x4e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
0x72, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4a,
0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f,
0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73,
0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x22,
0x61, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x13, 0x0a, 0x05, 0x74,
0x73, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73,
0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65,
0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x70, 0x61,
0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x12,
0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61,
0x74, 0x61, 0x22, 0x65, 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b,
0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09,
0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x4b, 0x65, 0x65,
0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x31, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b,
0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65,
0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14,
0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66,
0x6f, 0x75, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f,
0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x47, 0x65,
0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47,
0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65,
0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x12, 0x16, 0x2e,
0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72,
0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73,
0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x58, 0x0a, 0x08, 0x52,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f,
0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x25,
0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x3b, 0x0a, 0x0d, 0x4b, 0x76, 0x47, 0x65, 0x74,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65,
0x72, 0x72, 0x6f, 0x72, 0x22, 0x36, 0x0a, 0x0c, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x25, 0x0a, 0x0d,
0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a,
0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x32, 0x85, 0x0c, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x46,
0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69,
0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x66,
0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69,
0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c,
0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a,
0x0b, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x66,
0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72,
0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4c, 0x0a,
0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66,
0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x55,
0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x41, 0x70, 0x70,
0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a,
0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66,
0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x11, 0x41,
0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d,
0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x41,
0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69,
0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75,
0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c,
0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66,
0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f,
0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69,
0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a,
0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
0x6e, 0x12, 0x21, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c,
0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0a, 0x53, 0x74,
0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62,
0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65,
0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26,
0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c,
0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65,
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70,
0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64,
0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c,
0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d,
0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x00, 0x30, 0x01, 0x12, 0x65, 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e,
0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69,
0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62,
0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x56, 0x0a, 0x0d, 0x4b, 0x65,
0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1e, 0x2e, 0x66, 0x69,
0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69,
0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01,
0x30, 0x01, 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b,
0x65, 0x72, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63,
0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x12, 0x16, 0x2e, 0x66,
0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
0x3a, 0x0a, 0x05, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72,
0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75,
0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73,
0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42,
0x0a, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73,
0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64,
0x2f, 0x70, 0x62, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (

27
weed/s3api/auth_credentials.go

@ -3,6 +3,7 @@ package s3api
import (
"bytes"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"io/ioutil"
"net/http"
@ -63,7 +64,7 @@ func (iam *IdentityAccessManagement) loadS3ApiConfiguration(fileName string) err
return fmt.Errorf("fail to read %s : %v", fileName, readErr)
}
glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
glog.V(1).Infof("load s3 config: %v", fileName)
if err := jsonpb.Unmarshal(bytes.NewReader(rawData), s3ApiConfiguration); err != nil {
glog.Warningf("unmarshal error: %v", err)
return fmt.Errorf("unmarshal %s error: %v", fileName, err)
@ -125,7 +126,7 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt
return func(w http.ResponseWriter, r *http.Request) {
errCode := iam.authRequest(r, action)
if errCode == ErrNone {
if errCode == s3err.ErrNone {
f(w, r)
return
}
@ -134,16 +135,16 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) htt
}
// check whether the request has valid access keys
func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) ErrorCode {
func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) s3err.ErrorCode {
var identity *Identity
var s3Err ErrorCode
var s3Err s3err.ErrorCode
var found bool
switch getRequestAuthType(r) {
case authTypeStreamingSigned:
return ErrNone
return s3err.ErrNone
case authTypeUnknown:
glog.V(3).Infof("unknown auth type")
return ErrAccessDenied
return s3err.ErrAccessDenied
case authTypePresignedV2, authTypeSignedV2:
glog.V(3).Infof("v2 auth type")
identity, s3Err = iam.isReqAuthenticatedV2(r)
@ -152,21 +153,21 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
identity, s3Err = iam.reqSignatureV4Verify(r)
case authTypePostPolicy:
glog.V(3).Infof("post policy auth type")
return ErrNotImplemented
return s3err.ErrNone
case authTypeJWT:
glog.V(3).Infof("jwt auth type")
return ErrNotImplemented
return s3err.ErrNotImplemented
case authTypeAnonymous:
identity, found = iam.lookupAnonymous()
if !found {
return ErrAccessDenied
return s3err.ErrAccessDenied
}
default:
return ErrNotImplemented
return s3err.ErrNotImplemented
}
glog.V(3).Infof("auth error: %v", s3Err)
if s3Err != ErrNone {
if s3Err != s3err.ErrNone {
return s3Err
}
@ -175,10 +176,10 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action)
bucket, _ := getBucketAndObject(r)
if !identity.canDo(action, bucket) {
return ErrAccessDenied
return s3err.ErrAccessDenied
}
return ErrNone
return s3err.ErrNone
}

65
weed/s3api/auth_signature_v2.go

@ -23,6 +23,7 @@ import (
"crypto/subtle"
"encoding/base64"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net"
"net/http"
"net/url"
@ -61,13 +62,27 @@ var resourceList = []string{
}
// Verify if request has valid AWS Signature Version '2'.
func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, ErrorCode) {
func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, s3err.ErrorCode) {
if isRequestSignatureV2(r) {
return iam.doesSignV2Match(r)
}
return iam.doesPresignV2SignatureMatch(r)
}
func (iam *IdentityAccessManagement) doesPolicySignatureV2Match(formValues http.Header) s3err.ErrorCode {
accessKey := formValues.Get("AWSAccessKeyId")
_, cred, found := iam.lookupByAccessKey(accessKey)
if !found {
return s3err.ErrInvalidAccessKeyID
}
policy := formValues.Get("Policy")
signature := formValues.Get("Signature")
if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) {
return s3err.ErrSignatureDoesNotMatch
}
return s3err.ErrNone
}
// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) );
//
@ -88,36 +103,36 @@ func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Ide
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html
// returns true if matches, false otherwise. if error is not nil then it is always false
func validateV2AuthHeader(v2Auth string) (accessKey string, errCode ErrorCode) {
func validateV2AuthHeader(v2Auth string) (accessKey string, errCode s3err.ErrorCode) {
if v2Auth == "" {
return "", ErrAuthHeaderEmpty
return "", s3err.ErrAuthHeaderEmpty
}
// Verify if the header algorithm is supported or not.
if !strings.HasPrefix(v2Auth, signV2Algorithm) {
return "", ErrSignatureVersionNotSupported
return "", s3err.ErrSignatureVersionNotSupported
}
// below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string).
// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature
authFields := strings.Split(v2Auth, " ")
if len(authFields) != 2 {
return "", ErrMissingFields
return "", s3err.ErrMissingFields
}
// Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string.
keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":")
if len(keySignFields) != 2 {
return "", ErrMissingFields
return "", s3err.ErrMissingFields
}
return keySignFields[0], ErrNone
return keySignFields[0], s3err.ErrNone
}
func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, ErrorCode) {
func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, s3err.ErrorCode) {
v2Auth := r.Header.Get("Authorization")
accessKey, apiError := validateV2AuthHeader(v2Auth)
if apiError != ErrNone {
if apiError != s3err.ErrNone {
return nil, apiError
}
@ -125,7 +140,7 @@ func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity
// Validate if access key id same.
ident, cred, found := iam.lookupByAccessKey(accessKey)
if !found {
return nil, ErrInvalidAccessKeyID
return nil, s3err.ErrInvalidAccessKeyID
}
// r.RequestURI will have raw encoded URI as sent by the client.
@ -138,30 +153,30 @@ func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity
unescapedQueries, err := unescapeQueries(encodedQuery)
if err != nil {
return nil, ErrInvalidQueryParams
return nil, s3err.ErrInvalidQueryParams
}
encodedResource, err = getResource(encodedResource, r.Host, iam.domain)
if err != nil {
return nil, ErrInvalidRequest
return nil, s3err.ErrInvalidRequest
}
prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey)
if !strings.HasPrefix(v2Auth, prefix) {
return nil, ErrSignatureDoesNotMatch
return nil, s3err.ErrSignatureDoesNotMatch
}
v2Auth = v2Auth[len(prefix):]
expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header)
if !compareSignatureV2(v2Auth, expectedAuth) {
return nil, ErrSignatureDoesNotMatch
return nil, s3err.ErrSignatureDoesNotMatch
}
return ident, ErrNone
return ident, s3err.ErrNone
}
// doesPresignV2SignatureMatch - Verify query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
// returns ErrNone if matches. S3 errors otherwise.
func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, ErrorCode) {
func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, s3err.ErrorCode) {
// r.RequestURI will have raw encoded URI as sent by the client.
tokens := strings.SplitN(r.RequestURI, "?", 2)
@ -182,14 +197,14 @@ func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request
var unescapedQueries []string
unescapedQueries, err = unescapeQueries(encodedQuery)
if err != nil {
return nil, ErrInvalidQueryParams
return nil, s3err.ErrInvalidQueryParams
}
// Extract the necessary values from presigned query, construct a list of new filtered queries.
for _, query := range unescapedQueries {
keyval := strings.SplitN(query, "=", 2)
if len(keyval) != 2 {
return nil, ErrInvalidQueryParams
return nil, s3err.ErrInvalidQueryParams
}
switch keyval[0] {
case "AWSAccessKeyId":
@ -205,37 +220,37 @@ func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request
// Invalid values returns error.
if accessKey == "" || gotSignature == "" || expires == "" {
return nil, ErrInvalidQueryParams
return nil, s3err.ErrInvalidQueryParams
}
// Validate if access key id same.
ident, cred, found := iam.lookupByAccessKey(accessKey)
if !found {
return nil, ErrInvalidAccessKeyID
return nil, s3err.ErrInvalidAccessKeyID
}
// Make sure the request has not expired.
expiresInt, err := strconv.ParseInt(expires, 10, 64)
if err != nil {
return nil, ErrMalformedExpires
return nil, s3err.ErrMalformedExpires
}
// Check if the presigned URL has expired.
if expiresInt < time.Now().UTC().Unix() {
return nil, ErrExpiredPresignRequest
return nil, s3err.ErrExpiredPresignRequest
}
encodedResource, err = getResource(encodedResource, r.Host, iam.domain)
if err != nil {
return nil, ErrInvalidRequest
return nil, s3err.ErrInvalidRequest
}
expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires)
if !compareSignatureV2(gotSignature, expectedSignature) {
return nil, ErrSignatureDoesNotMatch
return nil, s3err.ErrSignatureDoesNotMatch
}
return ident, ErrNone
return ident, s3err.ErrNone
}
// Escape encodedQuery string into unescaped list of query params, returns error

166
weed/s3api/auth_signature_v4.go

@ -23,6 +23,7 @@ import (
"crypto/sha256"
"crypto/subtle"
"encoding/hex"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http"
"net/url"
"regexp"
@ -33,7 +34,7 @@ import (
"unicode/utf8"
)
func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, ErrorCode) {
func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, s3err.ErrorCode) {
sha256sum := getContentSha256Cksum(r)
switch {
case isRequestSignatureV4(r):
@ -41,7 +42,7 @@ func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Ide
case isRequestPresignedSignatureV4(r):
return iam.doesPresignedSignatureMatch(sha256sum, r)
}
return nil, ErrAccessDenied
return nil, s3err.ErrAccessDenied
}
// Streaming AWS Signature Version '4' constants.
@ -89,7 +90,7 @@ func getContentSha256Cksum(r *http.Request) string {
}
// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) {
func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) {
// Copy request.
req := *r
@ -99,33 +100,33 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r
// Parse signature version '4' header.
signV4Values, err := parseSignV4(v4Auth)
if err != ErrNone {
if err != s3err.ErrNone {
return nil, err
}
// Extract all the signed headers along with its values.
extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r)
if errCode != ErrNone {
if errCode != s3err.ErrNone {
return nil, errCode
}
// Verify if the access key id matches.
identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
if !found {
return nil, ErrInvalidAccessKeyID
return nil, s3err.ErrInvalidAccessKeyID
}
// Extract date, if not present throw error.
var date string
if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" {
if date = r.Header.Get("Date"); date == "" {
return nil, ErrMissingDateHeader
return nil, s3err.ErrMissingDateHeader
}
}
// Parse date header.
t, e := time.Parse(iso8601Format, date)
if e != nil {
return nil, ErrMalformedDate
return nil, s3err.ErrMalformedDate
}
// Query string.
@ -145,11 +146,11 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r
// Verify if signature match.
if !compareSignatureV4(newSignature, signV4Values.Signature) {
return nil, ErrSignatureDoesNotMatch
return nil, s3err.ErrSignatureDoesNotMatch
}
// Return error none.
return identity, ErrNone
return identity, s3err.ErrNone
}
// credentialHeader data type represents structured form of Credential
@ -184,65 +185,65 @@ func (c credentialHeader) getScope() string {
// Authorization: algorithm Credential=accessKeyID/credScope, \
// SignedHeaders=signedHeaders, Signature=signature
//
func parseSignV4(v4Auth string) (sv signValues, aec ErrorCode) {
func parseSignV4(v4Auth string) (sv signValues, aec s3err.ErrorCode) {
// Replace all spaced strings, some clients can send spaced
// parameters and some won't. So we pro-actively remove any spaces
// to make parsing easier.
v4Auth = strings.Replace(v4Auth, " ", "", -1)
if v4Auth == "" {
return sv, ErrAuthHeaderEmpty
return sv, s3err.ErrAuthHeaderEmpty
}
// Verify if the header algorithm is supported or not.
if !strings.HasPrefix(v4Auth, signV4Algorithm) {
return sv, ErrSignatureVersionNotSupported
return sv, s3err.ErrSignatureVersionNotSupported
}
// Strip off the Algorithm prefix.
v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm)
authFields := strings.Split(strings.TrimSpace(v4Auth), ",")
if len(authFields) != 3 {
return sv, ErrMissingFields
return sv, s3err.ErrMissingFields
}
// Initialize signature version '4' structured header.
signV4Values := signValues{}
var err ErrorCode
var err s3err.ErrorCode
// Save credentail values.
signV4Values.Credential, err = parseCredentialHeader(authFields[0])
if err != ErrNone {
if err != s3err.ErrNone {
return sv, err
}
// Save signed headers.
signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1])
if err != ErrNone {
if err != s3err.ErrNone {
return sv, err
}
// Save signature.
signV4Values.Signature, err = parseSignature(authFields[2])
if err != ErrNone {
if err != s3err.ErrNone {
return sv, err
}
// Return the structure here.
return signV4Values, ErrNone
return signV4Values, s3err.ErrNone
}
// parse credentialHeader string into its structured form.
func parseCredentialHeader(credElement string) (ch credentialHeader, aec ErrorCode) {
func parseCredentialHeader(credElement string) (ch credentialHeader, aec s3err.ErrorCode) {
creds := strings.Split(strings.TrimSpace(credElement), "=")
if len(creds) != 2 {
return ch, ErrMissingFields
return ch, s3err.ErrMissingFields
}
if creds[0] != "Credential" {
return ch, ErrMissingCredTag
return ch, s3err.ErrMissingCredTag
}
credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
if len(credElements) != 5 {
return ch, ErrCredMalformed
return ch, s3err.ErrCredMalformed
}
// Save access key id.
cred := credentialHeader{
@ -251,69 +252,100 @@ func parseCredentialHeader(credElement string) (ch credentialHeader, aec ErrorCo
var e error
cred.scope.date, e = time.Parse(yyyymmdd, credElements[1])
if e != nil {
return ch, ErrMalformedCredentialDate
return ch, s3err.ErrMalformedCredentialDate
}
cred.scope.region = credElements[2]
cred.scope.service = credElements[3] // "s3"
cred.scope.request = credElements[4] // "aws4_request"
return cred, ErrNone
return cred, s3err.ErrNone
}
// Parse slice of signed headers from signed headers tag.
func parseSignedHeader(signedHdrElement string) ([]string, ErrorCode) {
func parseSignedHeader(signedHdrElement string) ([]string, s3err.ErrorCode) {
signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=")
if len(signedHdrFields) != 2 {
return nil, ErrMissingFields
return nil, s3err.ErrMissingFields
}
if signedHdrFields[0] != "SignedHeaders" {
return nil, ErrMissingSignHeadersTag
return nil, s3err.ErrMissingSignHeadersTag
}
if signedHdrFields[1] == "" {
return nil, ErrMissingFields
return nil, s3err.ErrMissingFields
}
signedHeaders := strings.Split(signedHdrFields[1], ";")
return signedHeaders, ErrNone
return signedHeaders, s3err.ErrNone
}
// Parse signature from signature tag.
func parseSignature(signElement string) (string, ErrorCode) {
func parseSignature(signElement string) (string, s3err.ErrorCode) {
signFields := strings.Split(strings.TrimSpace(signElement), "=")
if len(signFields) != 2 {
return "", ErrMissingFields
return "", s3err.ErrMissingFields
}
if signFields[0] != "Signature" {
return "", ErrMissingSignTag
return "", s3err.ErrMissingSignTag
}
if signFields[1] == "" {
return "", ErrMissingFields
return "", s3err.ErrMissingFields
}
signature := signFields[1]
return signature, ErrNone
return signature, s3err.ErrNone
}
// doesPolicySignatureMatch - Verify query headers with post policy
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// returns ErrNone if the signature matches.
func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http.Header) s3err.ErrorCode {
// Parse credential tag.
credHeader, err := parseCredentialHeader("Credential=" + formValues.Get("X-Amz-Credential"))
if err != s3err.ErrNone {
return s3err.ErrMissingFields
}
_, cred, found := iam.lookupByAccessKey(credHeader.accessKey)
if !found {
return s3err.ErrInvalidAccessKeyID
}
// Get signing key.
signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region)
// Get signature.
newSignature := getSignature(signingKey, formValues.Get("Policy"))
// Verify signature.
if !compareSignatureV4(newSignature, formValues.Get("X-Amz-Signature")) {
return s3err.ErrSignatureDoesNotMatch
}
// Success.
return s3err.ErrNone
}
// check query headers with presigned signature
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) {
func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) {
// Copy request
req := *r
// Parse request query string.
pSignValues, err := parsePreSignV4(req.URL.Query())
if err != ErrNone {
if err != s3err.ErrNone {
return nil, err
}
// Verify if the access key id matches.
identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey)
if !found {
return nil, ErrInvalidAccessKeyID
return nil, s3err.ErrInvalidAccessKeyID
}
// Extract all the signed headers along with its values.
extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r)
if errCode != ErrNone {
if errCode != s3err.ErrNone {
return nil, errCode
}
// Construct new query.
@ -329,11 +361,11 @@ func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload s
// If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the
// request should still be allowed.
if pSignValues.Date.After(now.Add(15 * time.Minute)) {
return nil, ErrRequestNotReadyYet
return nil, s3err.ErrRequestNotReadyYet
}
if now.Sub(pSignValues.Date) > pSignValues.Expires {
return nil, ErrExpiredPresignRequest
return nil, s3err.ErrExpiredPresignRequest
}
// Save the date and expires.
@ -365,24 +397,24 @@ func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload s
// Verify if date query is same.
if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") {
return nil, ErrSignatureDoesNotMatch
return nil, s3err.ErrSignatureDoesNotMatch
}
// Verify if expires query is same.
if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") {
return nil, ErrSignatureDoesNotMatch
return nil, s3err.ErrSignatureDoesNotMatch
}
// Verify if signed headers query is same.
if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") {
return nil, ErrSignatureDoesNotMatch
return nil, s3err.ErrSignatureDoesNotMatch
}
// Verify if credential query is same.
if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") {
return nil, ErrSignatureDoesNotMatch
return nil, s3err.ErrSignatureDoesNotMatch
}
// Verify if sha256 payload query is same.
if req.URL.Query().Get("X-Amz-Content-Sha256") != "" {
if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") {
return nil, ErrContentSHA256Mismatch
return nil, s3err.ErrContentSHA256Mismatch
}
}
@ -402,9 +434,9 @@ func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload s
// Verify signature.
if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) {
return nil, ErrSignatureDoesNotMatch
return nil, s3err.ErrSignatureDoesNotMatch
}
return identity, ErrNone
return identity, s3err.ErrNone
}
func contains(list []string, elem string) bool {
@ -433,28 +465,28 @@ type preSignValues struct {
// querystring += &X-Amz-Signature=signature
//
// verifies if any of the necessary query params are missing in the presigned request.
func doesV4PresignParamsExist(query url.Values) ErrorCode {
func doesV4PresignParamsExist(query url.Values) s3err.ErrorCode {
v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"}
for _, v4PresignQueryParam := range v4PresignQueryParams {
if _, ok := query[v4PresignQueryParam]; !ok {
return ErrInvalidQueryParams
return s3err.ErrInvalidQueryParams
}
}
return ErrNone
return s3err.ErrNone
}
// Parses all the presigned signature values into separate elements.
func parsePreSignV4(query url.Values) (psv preSignValues, aec ErrorCode) {
var err ErrorCode
func parsePreSignV4(query url.Values) (psv preSignValues, aec s3err.ErrorCode) {
var err s3err.ErrorCode
// verify whether the required query params exist.
err = doesV4PresignParamsExist(query)
if err != ErrNone {
if err != s3err.ErrNone {
return psv, err
}
// Verify if the query algorithm is supported or not.
if query.Get("X-Amz-Algorithm") != signV4Algorithm {
return psv, ErrInvalidQuerySignatureAlgo
return psv, s3err.ErrInvalidQuerySignatureAlgo
}
// Initialize signature version '4' structured header.
@ -462,7 +494,7 @@ func parsePreSignV4(query url.Values) (psv preSignValues, aec ErrorCode) {
// Save credential.
preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential"))
if err != ErrNone {
if err != s3err.ErrNone {
return psv, err
}
@ -470,47 +502,47 @@ func parsePreSignV4(query url.Values) (psv preSignValues, aec ErrorCode) {
// Save date in native time.Time.
preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date"))
if e != nil {
return psv, ErrMalformedPresignedDate
return psv, s3err.ErrMalformedPresignedDate
}
// Save expires in native time.Duration.
preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s")
if e != nil {
return psv, ErrMalformedExpires
return psv, s3err.ErrMalformedExpires
}
if preSignV4Values.Expires < 0 {
return psv, ErrNegativeExpires
return psv, s3err.ErrNegativeExpires
}
// Check if Expiry time is less than 7 days (value in seconds).
if preSignV4Values.Expires.Seconds() > 604800 {
return psv, ErrMaximumExpires
return psv, s3err.ErrMaximumExpires
}
// Save signed headers.
preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders"))
if err != ErrNone {
if err != s3err.ErrNone {
return psv, err
}
// Save signature.
preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature"))
if err != ErrNone {
if err != s3err.ErrNone {
return psv, err
}
// Return structed form of signature query string.
return preSignV4Values, ErrNone
return preSignV4Values, s3err.ErrNone
}
// extractSignedHeaders extract signed headers from Authorization header
func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, ErrorCode) {
func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, s3err.ErrorCode) {
reqHeaders := r.Header
// find whether "host" is part of list of signed headers.
// if not return ErrUnsignedHeaders. "host" is mandatory.
if !contains(signedHeaders, "host") {
return nil, ErrUnsignedHeaders
return nil, s3err.ErrUnsignedHeaders
}
extractedSignedHeaders := make(http.Header)
for _, header := range signedHeaders {
@ -555,10 +587,10 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header,
// calculation to be compatible with such clients.
extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10))
default:
return nil, ErrUnsignedHeaders
return nil, s3err.ErrUnsignedHeaders
}
}
return extractedSignedHeaders, ErrNone
return extractedSignedHeaders, s3err.ErrNone
}
// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names

15
weed/s3api/auto_signature_v4_test.go

@ -8,6 +8,7 @@ import (
"encoding/hex"
"errors"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"io"
"io/ioutil"
"net/http"
@ -73,12 +74,12 @@ func TestIsReqAuthenticated(t *testing.T) {
// List of test cases for validating http request authentication.
testCases := []struct {
req *http.Request
s3Error ErrorCode
s3Error s3err.ErrorCode
}{
// When request is unsigned, access denied is returned.
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrAccessDenied},
// When request is properly signed, error is none.
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrNone},
}
// Validates all testcases.
@ -107,11 +108,11 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
testCases := []struct {
Request *http.Request
ErrCode ErrorCode
ErrCode s3err.ErrorCode
}{
{Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
{Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrAccessDenied},
{Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone},
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone},
}
for i, testCase := range testCases {
if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode {

25
weed/s3api/chunked_reader_v4.go

@ -24,6 +24,7 @@ import (
"crypto/sha256"
"encoding/hex"
"errors"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"hash"
"io"
"net/http"
@ -56,7 +57,7 @@ func getChunkSignature(secretKey string, seedSignature string, region string, da
// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
// returns signature, error otherwise if the signature mismatches or any other
// error while parsing and validating.
func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode ErrorCode) {
func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode s3err.ErrorCode) {
// Copy request.
req := *r
@ -66,7 +67,7 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr
// Parse signature version '4' header.
signV4Values, errCode := parseSignV4(v4Auth)
if errCode != ErrNone {
if errCode != s3err.ErrNone {
return nil, "", "", time.Time{}, errCode
}
@ -75,18 +76,18 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr
// Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
if payload != req.Header.Get("X-Amz-Content-Sha256") {
return nil, "", "", time.Time{}, ErrContentSHA256Mismatch
return nil, "", "", time.Time{}, s3err.ErrContentSHA256Mismatch
}
// Extract all the signed headers along with its values.
extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r)
if errCode != ErrNone {
if errCode != s3err.ErrNone {
return nil, "", "", time.Time{}, errCode
}
// Verify if the access key id matches.
_, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey)
if !found {
return nil, "", "", time.Time{}, ErrInvalidAccessKeyID
return nil, "", "", time.Time{}, s3err.ErrInvalidAccessKeyID
}
// Verify if region is valid.
@ -96,14 +97,14 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr
var dateStr string
if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" {
if dateStr = r.Header.Get("Date"); dateStr == "" {
return nil, "", "", time.Time{}, ErrMissingDateHeader
return nil, "", "", time.Time{}, s3err.ErrMissingDateHeader
}
}
// Parse date header.
var err error
date, err = time.Parse(iso8601Format, dateStr)
if err != nil {
return nil, "", "", time.Time{}, ErrMalformedDate
return nil, "", "", time.Time{}, s3err.ErrMalformedDate
}
// Query string.
@ -123,11 +124,11 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr
// Verify if signature match.
if !compareSignatureV4(newSignature, signV4Values.Signature) {
return nil, "", "", time.Time{}, ErrSignatureDoesNotMatch
return nil, "", "", time.Time{}, s3err.ErrSignatureDoesNotMatch
}
// Return caculated signature.
return cred, newSignature, region, date, ErrNone
return cred, newSignature, region, date, s3err.ErrNone
}
const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB
@ -141,9 +142,9 @@ var errMalformedEncoding = errors.New("malformed chunked encoding")
// newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r
// out of HTTP "chunked" format before returning it.
// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read.
func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, ErrorCode) {
func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, s3err.ErrorCode) {
ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req)
if errCode != ErrNone {
if errCode != s3err.ErrNone {
return nil, errCode
}
return &s3ChunkedReader{
@ -154,7 +155,7 @@ func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (
region: region,
chunkSHA256Writer: sha256.New(),
state: readChunkHeader,
}, ErrNone
}, s3err.ErrNone
}
// Represents the overall state that is required for decoding a

48
weed/s3api/filer_multipart.go

@ -3,6 +3,7 @@ package s3api
import (
"encoding/xml"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"path/filepath"
"strconv"
"strings"
@ -22,7 +23,10 @@ type InitiateMultipartUploadResult struct {
s3.CreateMultipartUploadOutput
}
func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) {
func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) {
glog.V(2).Infof("createMultipartUpload input %v", input)
uploadId, _ := uuid.NewRandom()
uploadIdString := uploadId.String()
@ -33,7 +37,7 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp
entry.Extended["key"] = []byte(*input.Key)
}); err != nil {
glog.Errorf("NewMultipartUpload error: %v", err)
return nil, ErrInternalError
return nil, s3err.ErrInternalError
}
output = &InitiateMultipartUploadResult{
@ -52,14 +56,16 @@ type CompleteMultipartUploadResult struct {
s3.CompleteMultipartUploadOutput
}
func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) {
func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) {
glog.V(2).Infof("completeMultipartUpload input %v", input)
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
entries, _, err := s3a.list(uploadDirectory, "", "", false, 0)
if err != nil || len(entries) == 0 {
glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries))
return nil, ErrNoSuchUpload
return nil, s3err.ErrNoSuchUpload
}
var finalParts []*filer_pb.FileChunk
@ -101,7 +107,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
if err != nil {
glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
return nil, ErrInternalError
return nil, s3err.ErrInternalError
}
output = &CompleteMultipartUploadResult{
@ -120,22 +126,24 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
return
}
func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) {
func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code s3err.ErrorCode) {
glog.V(2).Infof("abortMultipartUpload input %v", input)
exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
if err != nil {
glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload
return nil, s3err.ErrNoSuchUpload
}
if exists {
err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true)
}
if err != nil {
glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrInternalError
return nil, s3err.ErrInternalError
}
return &s3.AbortMultipartUploadOutput{}, ErrNone
return &s3.AbortMultipartUploadOutput{}, s3err.ErrNone
}
type ListMultipartUploadsResult struct {
@ -155,9 +163,11 @@ type ListMultipartUploadsResult struct {
Upload []*s3.MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
}
func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) {
func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code s3err.ErrorCode) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
glog.V(2).Infof("listMultipartUploads input %v", input)
output = &ListMultipartUploadsResult{
Bucket: input.Bucket,
Delimiter: input.Delimiter,
@ -167,7 +177,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
Prefix: input.Prefix,
}
entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, uint32(*input.MaxUploads))
entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, uint32(*input.MaxUploads))
if err != nil {
glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
return
@ -176,9 +186,15 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput
for _, entry := range entries {
if entry.Extended != nil {
key := entry.Extended["key"]
key := string(entry.Extended["key"])
if *input.KeyMarker != "" && *input.KeyMarker != key {
continue
}
if *input.Prefix != "" && !strings.HasPrefix(key, *input.Prefix) {
continue
}
output.Upload = append(output.Upload, &s3.MultipartUpload{
Key: objectKey(aws.String(string(key))),
Key: objectKey(aws.String(key)),
UploadId: aws.String(entry.Name),
})
if !isLast {
@ -205,9 +221,11 @@ type ListPartsResult struct {
UploadId *string `type:"string"`
}
func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) {
func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code s3err.ErrorCode) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
glog.V(2).Infof("listObjectParts input %v", input)
output = &ListPartsResult{
Bucket: input.Bucket,
Key: objectKey(input.Key),
@ -220,7 +238,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts))
if err != nil {
glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
return nil, ErrNoSuchUpload
return nil, s3err.ErrNoSuchUpload
}
output.IsTruncated = aws.Bool(!isLast)

321
weed/s3api/policy/post-policy.go

@ -0,0 +1,321 @@
package policy
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import (
"encoding/base64"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http"
"strings"
"time"
)
// expirationDateFormat date format for expiration key in json policy.
const expirationDateFormat = "2006-01-02T15:04:05.999Z"
// policyCondition explanation:
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
//
// Example:
//
// policyCondition {
// matchType: "$eq",
// key: "$Content-Type",
// value: "image/png",
// }
//
type policyCondition struct {
matchType string
condition string
value string
}
// PostPolicy - Provides strict static type conversion and validation
// for Amazon S3's POST policy JSON string.
type PostPolicy struct {
// Expiration date and time of the POST policy.
expiration time.Time
// Collection of different policy conditions.
conditions []policyCondition
// ContentLengthRange minimum and maximum allowable size for the
// uploaded content.
contentLengthRange struct {
min int64
max int64
}
// Post form data.
formData map[string]string
}
// NewPostPolicy - Instantiate new post policy.
func NewPostPolicy() *PostPolicy {
p := &PostPolicy{}
p.conditions = make([]policyCondition, 0)
p.formData = make(map[string]string)
return p
}
// SetExpires - Sets expiration time for the new policy.
func (p *PostPolicy) SetExpires(t time.Time) error {
if t.IsZero() {
return errInvalidArgument("No expiry time set.")
}
p.expiration = t
return nil
}
// SetKey - Sets an object name for the policy based upload.
func (p *PostPolicy) SetKey(key string) error {
if strings.TrimSpace(key) == "" || key == "" {
return errInvalidArgument("Object name is empty.")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$key",
value: key,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["key"] = key
return nil
}
// SetKeyStartsWith - Sets an object name that an policy based upload
// can start with.
func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error {
if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" {
return errInvalidArgument("Object prefix is empty.")
}
policyCond := policyCondition{
matchType: "starts-with",
condition: "$key",
value: keyStartsWith,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["key"] = keyStartsWith
return nil
}
// SetBucket - Sets bucket at which objects will be uploaded to.
func (p *PostPolicy) SetBucket(bucketName string) error {
if strings.TrimSpace(bucketName) == "" || bucketName == "" {
return errInvalidArgument("Bucket name is empty.")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$bucket",
value: bucketName,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["bucket"] = bucketName
return nil
}
// SetCondition - Sets condition for credentials, date and algorithm
func (p *PostPolicy) SetCondition(matchType, condition, value string) error {
if strings.TrimSpace(value) == "" || value == "" {
return errInvalidArgument("No value specified for condition")
}
policyCond := policyCondition{
matchType: matchType,
condition: "$" + condition,
value: value,
}
if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" {
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData[condition] = value
return nil
}
return errInvalidArgument("Invalid condition in policy")
}
// SetContentType - Sets content-type of the object for this policy
// based upload.
func (p *PostPolicy) SetContentType(contentType string) error {
if strings.TrimSpace(contentType) == "" || contentType == "" {
return errInvalidArgument("No content type specified.")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$Content-Type",
value: contentType,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["Content-Type"] = contentType
return nil
}
// SetContentLengthRange - Set new min and max content length
// condition for all incoming uploads.
func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
if min > max {
return errInvalidArgument("Minimum limit is larger than maximum limit.")
}
if min < 0 {
return errInvalidArgument("Minimum limit cannot be negative.")
}
if max < 0 {
return errInvalidArgument("Maximum limit cannot be negative.")
}
p.contentLengthRange.min = min
p.contentLengthRange.max = max
return nil
}
// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy
// based upload.
func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error {
if strings.TrimSpace(redirect) == "" || redirect == "" {
return errInvalidArgument("Redirect is empty")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$success_action_redirect",
value: redirect,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["success_action_redirect"] = redirect
return nil
}
// SetSuccessStatusAction - Sets the status success code of the object for this policy
// based upload.
func (p *PostPolicy) SetSuccessStatusAction(status string) error {
if strings.TrimSpace(status) == "" || status == "" {
return errInvalidArgument("Status is empty")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$success_action_status",
value: status,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["success_action_status"] = status
return nil
}
// SetUserMetadata - Set user metadata as a key/value couple.
// Can be retrieved through a HEAD request or an event.
func (p *PostPolicy) SetUserMetadata(key string, value string) error {
if strings.TrimSpace(key) == "" || key == "" {
return errInvalidArgument("Key is empty")
}
if strings.TrimSpace(value) == "" || value == "" {
return errInvalidArgument("Value is empty")
}
headerName := fmt.Sprintf("x-amz-meta-%s", key)
policyCond := policyCondition{
matchType: "eq",
condition: fmt.Sprintf("$%s", headerName),
value: value,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData[headerName] = value
return nil
}
// SetUserData - Set user data as a key/value couple.
// Can be retrieved through a HEAD request or an event.
func (p *PostPolicy) SetUserData(key string, value string) error {
if key == "" {
return errInvalidArgument("Key is empty")
}
if value == "" {
return errInvalidArgument("Value is empty")
}
headerName := fmt.Sprintf("x-amz-%s", key)
policyCond := policyCondition{
matchType: "eq",
condition: fmt.Sprintf("$%s", headerName),
value: value,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData[headerName] = value
return nil
}
// addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
return errInvalidArgument("Policy fields are empty.")
}
p.conditions = append(p.conditions, policyCond)
return nil
}
// String function for printing policy in json formatted string.
func (p PostPolicy) String() string {
return string(p.marshalJSON())
}
// marshalJSON - Provides Marshaled JSON in bytes.
func (p PostPolicy) marshalJSON() []byte {
expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"`
var conditionsStr string
conditions := []string{}
for _, po := range p.conditions {
conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value))
}
if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 {
conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]",
p.contentLengthRange.min, p.contentLengthRange.max))
}
if len(conditions) > 0 {
conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]"
}
retStr := "{"
retStr = retStr + expirationStr + ","
retStr = retStr + conditionsStr
retStr = retStr + "}"
return []byte(retStr)
}
// base64 - Produces base64 of PostPolicy's Marshaled json.
func (p PostPolicy) base64() string {
return base64.StdEncoding.EncodeToString(p.marshalJSON())
}
// errInvalidArgument - Invalid argument response.
func errInvalidArgument(message string) error {
return s3err.RESTErrorResponse{
StatusCode: http.StatusBadRequest,
Code: "InvalidArgument",
Message: message,
RequestID: "minio",
}
}

378
weed/s3api/policy/post-policy_test.go

@ -0,0 +1,378 @@
package policy
/*
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"mime/multipart"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"unicode/utf8"
)
const (
iso8601DateFormat = "20060102T150405Z"
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
)
func newPostPolicyBytesV4WithContentRange(credential, bucketName, objectKey string, expiration time.Time) []byte {
t := time.Now().UTC()
// Add the expiration date.
expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat))
// Add the bucket condition, only accept buckets equal to the one passed.
bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName)
// Add the key condition, only accept keys equal to the one passed.
keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s/upload.txt"]`, objectKey)
// Add content length condition, only accept content sizes of a given length.
contentLengthCondStr := `["content-length-range", 1024, 1048576]`
// Add the algorithm condition, only accept AWS SignV4 Sha256.
algorithmConditionStr := `["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"]`
// Add the date condition, only accept the current date.
dateConditionStr := fmt.Sprintf(`["eq", "$x-amz-date", "%s"]`, t.Format(iso8601DateFormat))
// Add the credential string, only accept the credential passed.
credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential)
// Add the meta-uuid string, set to 1234
uuidConditionStr := fmt.Sprintf(`["eq", "$x-amz-meta-uuid", "%s"]`, "1234")
// Combine all conditions into one string.
conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s, %s]`, bucketConditionStr,
keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr)
retStr := "{"
retStr = retStr + expirationStr + ","
retStr = retStr + conditionStr
retStr = retStr + "}"
return []byte(retStr)
}
// newPostPolicyBytesV4 - creates a bare bones postpolicy string with key and bucket matches.
func newPostPolicyBytesV4(credential, bucketName, objectKey string, expiration time.Time) []byte {
t := time.Now().UTC()
// Add the expiration date.
expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat))
// Add the bucket condition, only accept buckets equal to the one passed.
bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName)
// Add the key condition, only accept keys equal to the one passed.
keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s/upload.txt"]`, objectKey)
// Add the algorithm condition, only accept AWS SignV4 Sha256.
algorithmConditionStr := `["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"]`
// Add the date condition, only accept the current date.
dateConditionStr := fmt.Sprintf(`["eq", "$x-amz-date", "%s"]`, t.Format(iso8601DateFormat))
// Add the credential string, only accept the credential passed.
credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential)
// Add the meta-uuid string, set to 1234
uuidConditionStr := fmt.Sprintf(`["eq", "$x-amz-meta-uuid", "%s"]`, "1234")
// Combine all conditions into one string.
conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr)
retStr := "{"
retStr = retStr + expirationStr + ","
retStr = retStr + conditionStr
retStr = retStr + "}"
return []byte(retStr)
}
// newPostPolicyBytesV2 - creates a bare bones postpolicy string with key and bucket matches.
func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) []byte {
// Add the expiration date.
expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat))
// Add the bucket condition, only accept buckets equal to the one passed.
bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName)
// Add the key condition, only accept keys equal to the one passed.
keyConditionStr := fmt.Sprintf(`["starts-with", "$key", "%s/upload.txt"]`, objectKey)
// Combine all conditions into one string.
conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr)
retStr := "{"
retStr = retStr + expirationStr + ","
retStr = retStr + conditionStr
retStr = retStr + "}"
return []byte(retStr)
}
// Wrapper for calling TestPostPolicyBucketHandler tests for both Erasure multiple disks and single node setup.
// testPostPolicyBucketHandler - Tests validate post policy handler uploading objects.
// Wrapper for calling TestPostPolicyBucketHandlerRedirect tests for both Erasure multiple disks and single node setup.
// testPostPolicyBucketHandlerRedirect tests POST Object when success_action_redirect is specified
// postPresignSignatureV4 - presigned signature for PostPolicy requests.
func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
// Get signining key.
signingkey := getSigningKey(secretAccessKey, t, location)
// Calculate signature.
signature := getSignature(signingkey, policyBase64)
return signature
}
// copied from auth_signature_v4.go to break import loop
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
// copied from auth_signature_v4.go to break import loop
// getSigningKey hmac seed to calculate final signature.
func getSigningKey(secretKey string, t time.Time, region string) []byte {
date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format("20060102")))
regionBytes := sumHMAC(date, []byte(region))
service := sumHMAC(regionBytes, []byte("s3"))
signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey
}
// copied from auth_signature_v4.go to break import loop
// getSignature final signature in hexadecimal form.
func getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
}
// copied from auth_signature_v4.go to break import loop
func calculateSignatureV2(stringToSign string, secret string) string {
hm := hmac.New(sha1.New, []byte(secret))
hm.Write([]byte(stringToSign))
return base64.StdEncoding.EncodeToString(hm.Sum(nil))
}
func newPostRequestV2(endPoint, bucketName, objectName string, accessKey, secretKey string) (*http.Request, error) {
// Expire the request five minutes from now.
expirationTime := time.Now().UTC().Add(time.Minute * 5)
// Create a new post policy.
policy := newPostPolicyBytesV2(bucketName, objectName, expirationTime)
// Only need the encoding.
encodedPolicy := base64.StdEncoding.EncodeToString(policy)
// Presign with V4 signature based on the policy.
signature := calculateSignatureV2(encodedPolicy, secretKey)
formData := map[string]string{
"AWSAccessKeyId": accessKey,
"bucket": bucketName,
"key": objectName + "/${filename}",
"policy": encodedPolicy,
"signature": signature,
}
// Create the multipart form.
var buf bytes.Buffer
w := multipart.NewWriter(&buf)
// Set the normal formData
for k, v := range formData {
w.WriteField(k, v)
}
// Set the File formData
writer, err := w.CreateFormFile("file", "upload.txt")
if err != nil {
// return nil, err
return nil, err
}
writer.Write([]byte("hello world"))
// Close before creating the new request.
w.Close()
// Set the body equal to the created policy.
reader := bytes.NewReader(buf.Bytes())
req, err := http.NewRequest(http.MethodPost, makeTestTargetURL(endPoint, bucketName, "", nil), reader)
if err != nil {
return nil, err
}
// Set form content-type.
req.Header.Set("Content-Type", w.FormDataContentType())
return req, nil
}
func buildGenericPolicy(t time.Time, accessKey, region, bucketName, objectName string, contentLengthRange bool) []byte {
// Expire the request five minutes from now.
expirationTime := t.Add(time.Minute * 5)
credStr := getCredentialString(accessKey, region, t)
// Create a new post policy.
policy := newPostPolicyBytesV4(credStr, bucketName, objectName, expirationTime)
if contentLengthRange {
policy = newPostPolicyBytesV4WithContentRange(credStr, bucketName, objectName, expirationTime)
}
return policy
}
func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, region string,
t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) {
// Get the user credential.
credStr := getCredentialString(accessKey, region, t)
// Only need the encoding.
encodedPolicy := base64.StdEncoding.EncodeToString(policy)
if corruptedB64 {
encodedPolicy = "%!~&" + encodedPolicy
}
// Presign with V4 signature based on the policy.
signature := postPresignSignatureV4(encodedPolicy, t, secretKey, region)
formData := map[string]string{
"bucket": bucketName,
"key": objectName + "/${filename}",
"x-amz-credential": credStr,
"policy": encodedPolicy,
"x-amz-signature": signature,
"x-amz-date": t.Format(iso8601DateFormat),
"x-amz-algorithm": "AWS4-HMAC-SHA256",
"x-amz-meta-uuid": "1234",
"Content-Encoding": "gzip",
}
// Add form data
for k, v := range addFormData {
formData[k] = v
}
// Create the multipart form.
var buf bytes.Buffer
w := multipart.NewWriter(&buf)
// Set the normal formData
for k, v := range formData {
w.WriteField(k, v)
}
// Set the File formData but don't if we want send an incomplete multipart request
if !corruptedMultipart {
writer, err := w.CreateFormFile("file", "upload.txt")
if err != nil {
// return nil, err
return nil, err
}
writer.Write(objData)
// Close before creating the new request.
w.Close()
}
// Set the body equal to the created policy.
reader := bytes.NewReader(buf.Bytes())
req, err := http.NewRequest(http.MethodPost, makeTestTargetURL(endPoint, bucketName, "", nil), reader)
if err != nil {
return nil, err
}
// Set form content-type.
req.Header.Set("Content-Type", w.FormDataContentType())
return req, nil
}
func newPostRequestV4WithContentLength(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) {
t := time.Now().UTC()
region := "us-east-1"
policy := buildGenericPolicy(t, accessKey, region, bucketName, objectName, true)
return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, region, t, policy, nil, false, false)
}
func newPostRequestV4(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) {
t := time.Now().UTC()
region := "us-east-1"
policy := buildGenericPolicy(t, accessKey, region, bucketName, objectName, false)
return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, region, t, policy, nil, false, false)
}
// construct URL for http requests for bucket operations.
func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string {
urlStr := endPoint + "/"
if bucketName != "" {
urlStr = urlStr + bucketName + "/"
}
if objectName != "" {
urlStr = urlStr + EncodePath(objectName)
}
if len(queryValues) > 0 {
urlStr = urlStr + "?" + queryValues.Encode()
}
return urlStr
}
// if object matches reserved string, no need to encode them
var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func EncodePath(pathName string) string {
if reservedObjectNames.MatchString(pathName) {
return pathName
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
}
// getCredentialString generate a credential string.
func getCredentialString(accessKeyID, location string, t time.Time) string {
return accessKeyID + "/" + getScope(t, location)
}
// getScope generate a string of a specific date, an AWS region, and a service.
func getScope(t time.Time, region string) string {
scope := strings.Join([]string{
t.Format("20060102"),
region,
string("s3"),
"aws4_request",
}, "/")
return scope
}

276
weed/s3api/policy/postpolicyform.go

@ -0,0 +1,276 @@
package policy
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"reflect"
"strconv"
"strings"
"time"
)
// startWithConds - map which indicates if a given condition supports starts-with policy operator
var startsWithConds = map[string]bool{
"$acl": true,
"$bucket": false,
"$cache-control": true,
"$content-type": true,
"$content-disposition": true,
"$content-encoding": true,
"$expires": true,
"$key": true,
"$success_action_redirect": true,
"$redirect": true,
"$success_action_status": false,
"$x-amz-algorithm": false,
"$x-amz-credential": false,
"$x-amz-date": false,
}
// Add policy conditionals.
const (
policyCondEqual = "eq"
policyCondStartsWith = "starts-with"
policyCondContentLength = "content-length-range"
)
// toString - Safely convert interface to string without causing panic.
func toString(val interface{}) string {
switch v := val.(type) {
case string:
return v
default:
return ""
}
}
// toLowerString - safely convert interface to lower string
func toLowerString(val interface{}) string {
return strings.ToLower(toString(val))
}
// toInteger _ Safely convert interface to integer without causing panic.
func toInteger(val interface{}) (int64, error) {
switch v := val.(type) {
case float64:
return int64(v), nil
case int64:
return v, nil
case int:
return int64(v), nil
case string:
i, err := strconv.Atoi(v)
return int64(i), err
default:
return 0, errors.New("Invalid number format")
}
}
// isString - Safely check if val is of type string without causing panic.
func isString(val interface{}) bool {
_, ok := val.(string)
return ok
}
// ContentLengthRange - policy content-length-range field.
type contentLengthRange struct {
Min int64
Max int64
Valid bool // If content-length-range was part of policy
}
// PostPolicyForm provides strict static type conversion and validation for Amazon S3's POST policy JSON string.
type PostPolicyForm struct {
Expiration time.Time // Expiration date and time of the POST policy.
Conditions struct { // Conditional policy structure.
Policies []struct {
Operator string
Key string
Value string
}
ContentLengthRange contentLengthRange
}
}
// ParsePostPolicyForm - Parse JSON policy string into typed PostPolicyForm structure.
func ParsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) {
// Convert po into interfaces and
// perform strict type conversion using reflection.
var rawPolicy struct {
Expiration string `json:"expiration"`
Conditions []interface{} `json:"conditions"`
}
err := json.Unmarshal([]byte(policy), &rawPolicy)
if err != nil {
return ppf, err
}
parsedPolicy := PostPolicyForm{}
// Parse expiry time.
parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
if err != nil {
return ppf, err
}
// Parse conditions.
for _, val := range rawPolicy.Conditions {
switch condt := val.(type) {
case map[string]interface{}: // Handle key:value map types.
for k, v := range condt {
if !isString(v) { // Pre-check value type.
// All values must be of type string.
return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", reflect.TypeOf(condt).String(), condt)
}
// {"acl": "public-read" } is an alternate way to indicate - [ "eq", "$acl", "public-read" ]
// In this case we will just collapse this into "eq" for all use cases.
parsedPolicy.Conditions.Policies = append(parsedPolicy.Conditions.Policies, struct {
Operator string
Key string
Value string
}{
policyCondEqual, "$" + strings.ToLower(k), toString(v),
})
}
case []interface{}: // Handle array types.
if len(condt) != 3 { // Return error if we have insufficient elements.
return parsedPolicy, fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form", condt, reflect.TypeOf(condt).String())
}
switch toLowerString(condt[0]) {
case policyCondEqual, policyCondStartsWith:
for _, v := range condt { // Pre-check all values for type.
if !isString(v) {
// All values must be of type string.
return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", reflect.TypeOf(condt).String(), condt)
}
}
operator, matchType, value := toLowerString(condt[0]), toLowerString(condt[1]), toString(condt[2])
if !strings.HasPrefix(matchType, "$") {
return parsedPolicy, fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", operator, matchType, value)
}
parsedPolicy.Conditions.Policies = append(parsedPolicy.Conditions.Policies, struct {
Operator string
Key string
Value string
}{
operator, matchType, value,
})
case policyCondContentLength:
min, err := toInteger(condt[1])
if err != nil {
return parsedPolicy, err
}
max, err := toInteger(condt[2])
if err != nil {
return parsedPolicy, err
}
parsedPolicy.Conditions.ContentLengthRange = contentLengthRange{
Min: min,
Max: max,
Valid: true,
}
default:
// Condition should be valid.
return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form",
reflect.TypeOf(condt).String(), condt)
}
default:
return parsedPolicy, fmt.Errorf("Unknown field %s of type %s found in POST policy form",
condt, reflect.TypeOf(condt).String())
}
}
return parsedPolicy, nil
}
// checkPolicyCond returns a boolean to indicate if a condition is satisified according
// to the passed operator
func checkPolicyCond(op string, input1, input2 string) bool {
switch op {
case policyCondEqual:
return input1 == input2
case policyCondStartsWith:
return strings.HasPrefix(input1, input2)
}
return false
}
// CheckPostPolicy - apply policy conditions and validate input values.
// (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html)
func CheckPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) error {
// Check if policy document expiry date is still not reached
if !postPolicyForm.Expiration.After(time.Now().UTC()) {
return fmt.Errorf("Invalid according to Policy: Policy expired")
}
// map to store the metadata
metaMap := make(map[string]string)
for _, policy := range postPolicyForm.Conditions.Policies {
if strings.HasPrefix(policy.Key, "$x-amz-meta-") {
formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(policy.Key, "$"))
metaMap[formCanonicalName] = policy.Value
}
}
// Check if any extra metadata field is passed as input
for key := range formValues {
if strings.HasPrefix(key, "X-Amz-Meta-") {
if _, ok := metaMap[key]; !ok {
return fmt.Errorf("Invalid according to Policy: Extra input fields: %s", key)
}
}
}
// Flag to indicate if all policies conditions are satisfied
var condPassed bool
// Iterate over policy conditions and check them against received form fields
for _, policy := range postPolicyForm.Conditions.Policies {
// Form fields names are in canonical format, convert conditions names
// to canonical for simplification purpose, so `$key` will become `Key`
formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(policy.Key, "$"))
// Operator for the current policy condition
op := policy.Operator
// If the current policy condition is known
if startsWithSupported, condFound := startsWithConds[policy.Key]; condFound {
// Check if the current condition supports starts-with operator
if op == policyCondStartsWith && !startsWithSupported {
return fmt.Errorf("Invalid according to Policy: Policy Condition failed")
}
// Check if current policy condition is satisfied
condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value)
if !condPassed {
return fmt.Errorf("Invalid according to Policy: Policy Condition failed")
}
} else {
// This covers all conditions X-Amz-Meta-* and X-Amz-*
if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") {
// Check if policy condition is satisfied
condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value)
if !condPassed {
return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value)
}
}
}
}
return nil
}

106
weed/s3api/policy/postpolicyform_test.go

@ -0,0 +1,106 @@
package policy
/*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import (
"encoding/base64"
"fmt"
"net/http"
"testing"
"time"
)
// Test Post Policy parsing and checking conditions
func TestPostPolicyForm(t *testing.T) {
pp := NewPostPolicy()
pp.SetBucket("testbucket")
pp.SetContentType("image/jpeg")
pp.SetUserMetadata("uuid", "14365123651274")
pp.SetKeyStartsWith("user/user1/filename")
pp.SetContentLengthRange(1048579, 10485760)
pp.SetSuccessStatusAction("201")
type testCase struct {
Bucket string
Key string
XAmzDate string
XAmzAlgorithm string
XAmzCredential string
XAmzMetaUUID string
ContentType string
SuccessActionStatus string
Policy string
Expired bool
expectedErr error
}
testCases := []testCase{
// Everything is fine with this test
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: nil},
// Expired policy document
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Expired: true, expectedErr: fmt.Errorf("Invalid according to Policy: Policy expired")},
// Different AMZ date
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "2017T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
// Key which doesn't start with user/user1/filename
{Bucket: "testbucket", Key: "myfile.txt", XAmzDate: "20160727T000000Z", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
// Incorrect bucket name.
{Bucket: "incorrect", Key: "user/user1/filename/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
// Incorrect key name
{Bucket: "testbucket", Key: "incorrect", XAmzDate: "20160727T000000Z", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
// Incorrect date
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "incorrect", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
// Incorrect ContentType
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "incorrect", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")},
// Incorrect Metadata
{Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "151274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed: [eq, $x-amz-meta-uuid, 14365123651274]")},
}
// Validate all the test cases.
for i, tt := range testCases {
formValues := make(http.Header)
formValues.Set("Bucket", tt.Bucket)
formValues.Set("Key", tt.Key)
formValues.Set("Content-Type", tt.ContentType)
formValues.Set("X-Amz-Date", tt.XAmzDate)
formValues.Set("X-Amz-Meta-Uuid", tt.XAmzMetaUUID)
formValues.Set("X-Amz-Algorithm", tt.XAmzAlgorithm)
formValues.Set("X-Amz-Credential", tt.XAmzCredential)
if tt.Expired {
// Expired already.
pp.SetExpires(time.Now().UTC().AddDate(0, 0, -10))
} else {
// Expires in 10 days.
pp.SetExpires(time.Now().UTC().AddDate(0, 0, 10))
}
formValues.Set("Policy", base64.StdEncoding.EncodeToString([]byte(pp.String())))
formValues.Set("Success_action_status", tt.SuccessActionStatus)
policyBytes, err := base64.StdEncoding.DecodeString(base64.StdEncoding.EncodeToString([]byte(pp.String())))
if err != nil {
t.Fatal(err)
}
postPolicyForm, err := ParsePostPolicyForm(string(policyBytes))
if err != nil {
t.Fatal(err)
}
err = CheckPostPolicy(formValues, postPolicyForm)
if err != nil && tt.expectedErr != nil && err.Error() != tt.expectedErr.Error() {
t.Fatalf("Test %d:, Expected %s, got %s", i+1, tt.expectedErr.Error(), err.Error())
}
}
}

9
weed/s3api/s3api_bucket_handlers.go

@ -4,6 +4,7 @@ import (
"context"
"encoding/xml"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"math"
"net/http"
"time"
@ -28,7 +29,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques
entries, _, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
@ -59,7 +60,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request)
// create the folder for bucket, but lazily create actual collection
if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
@ -88,7 +89,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque
err = s3a.rm(s3a.option.BucketsPath, bucket, false, true)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
@ -118,7 +119,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request
})
if err != nil {
writeErrorResponse(w, ErrNoSuchBucket, r.URL)
writeErrorResponse(w, s3err.ErrNoSuchBucket, r.URL)
return
}

11
weed/s3api/s3api_handlers.go

@ -5,6 +5,7 @@ import (
"encoding/base64"
"encoding/xml"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http"
"net/url"
"strconv"
@ -56,18 +57,18 @@ func (s3a *S3ApiServer) AdjustedUrl(hostAndPort string) string {
// If none of the http routes match respond with MethodNotAllowed
func notFoundHandler(w http.ResponseWriter, r *http.Request) {
glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI)
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL)
}
func writeErrorResponse(w http.ResponseWriter, errorCode ErrorCode, reqURL *url.URL) {
apiError := getAPIError(errorCode)
func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) {
apiError := s3err.GetAPIError(errorCode)
errorResponse := getRESTErrorResponse(apiError, reqURL.Path)
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
}
func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse {
return RESTErrorResponse{
func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse {
return s3err.RESTErrorResponse{
Code: err.Code,
Message: err.Description,
Resource: resource,

21
weed/s3api/s3api_object_copy_handlers.go

@ -2,6 +2,7 @@ package s3api
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http"
"net/url"
"strconv"
@ -25,12 +26,12 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
// If source object is empty or bucket is empty, reply back invalid copy source.
if srcObject == "" || srcBucket == "" {
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
return
}
if srcBucket == dstBucket && srcObject == dstObject {
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
return
}
@ -41,14 +42,14 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request
_, _, resp, err := util.DownloadFile(srcUrl)
if err != nil {
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
return
}
defer util.CloseResponse(resp)
etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body)
if errCode != ErrNone {
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
@ -93,7 +94,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
// If source object is empty or bucket is empty, reply back invalid copy source.
if srcObject == "" || srcBucket == "" {
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
return
}
@ -102,33 +103,33 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req
partID, err := strconv.Atoi(partIDString)
if err != nil {
writeErrorResponse(w, ErrInvalidPart, r.URL)
writeErrorResponse(w, s3err.ErrInvalidPart, r.URL)
return
}
// check partID with maximum part ID for multipart objects
if partID > globalMaxPartID {
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL)
return
}
rangeHeader := r.Header.Get("x-amz-copy-source-range")
dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s",
s3a.option.Filer, s3a.genUploadsFolder(dstBucket), uploadID, partID-1, dstBucket)
s3a.option.Filer, s3a.genUploadsFolder(dstBucket), uploadID, partID, dstBucket)
srcUrl := fmt.Sprintf("http://%s%s/%s%s",
s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject)
dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader)
if err != nil {
writeErrorResponse(w, ErrInvalidCopySource, r.URL)
writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL)
return
}
defer dataReader.Close()
etag, errCode := s3a.putToFiler(r, dstUrl, dataReader)
if errCode != ErrNone {
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}

37
weed/s3api/s3api_object_handlers.go

@ -5,6 +5,7 @@ import (
"encoding/json"
"encoding/xml"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"io"
"io/ioutil"
"net/http"
@ -36,14 +37,14 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
_, err := validateContentMd5(r.Header)
if err != nil {
writeErrorResponse(w, ErrInvalidDigest, r.URL)
writeErrorResponse(w, s3err.ErrInvalidDigest, r.URL)
return
}
dataReader := r.Body
if s3a.iam.isEnabled() {
rAuthType := getRequestAuthType(r)
var s3ErrCode ErrorCode
var s3ErrCode s3err.ErrorCode
switch rAuthType {
case authTypeStreamingSigned:
dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
@ -52,7 +53,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
case authTypePresigned, authTypeSigned:
_, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
}
if s3ErrCode != ErrNone {
if s3ErrCode != s3err.ErrNone {
writeErrorResponse(w, s3ErrCode, r.URL)
return
}
@ -61,7 +62,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
if strings.HasSuffix(object, "/") {
if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
} else {
@ -69,7 +70,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request)
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
if errCode != ErrNone {
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
@ -85,7 +86,7 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request)
bucket, object := getBucketAndObject(r)
if strings.HasSuffix(r.URL.Path, "/") {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(w, s3err.ErrNotImplemented, r.URL)
return
}
@ -161,13 +162,13 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h
deleteXMLBytes, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
deleteObjects := &DeleteObjectsRequest{}
if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
writeErrorResponse(w, ErrMalformedXML, r.URL)
writeErrorResponse(w, s3err.ErrMalformedXML, r.URL)
return
}
@ -217,7 +218,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
if err != nil {
glog.Errorf("NewRequest %s: %v", destUrl, err)
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
@ -233,13 +234,13 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
resp, postErr := client.Do(proxyReq)
if resp.ContentLength == -1 {
writeErrorResponse(w, ErrNoSuchKey, r.URL)
writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
return
}
if postErr != nil {
glog.Errorf("post to filer: %v", postErr)
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
defer util.CloseResponse(resp)
@ -255,7 +256,7 @@ func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) {
io.Copy(w, proxyResponse.Body)
}
func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code ErrorCode) {
func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code s3err.ErrorCode) {
hash := md5.New()
var body = io.TeeReader(dataReader, hash)
@ -264,7 +265,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
if err != nil {
glog.Errorf("NewRequest %s: %v", uploadUrl, err)
return "", ErrInternalError
return "", s3err.ErrInternalError
}
proxyReq.Header.Set("Host", s3a.option.Filer)
@ -280,7 +281,7 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
if postErr != nil {
glog.Errorf("post to filer: %v", postErr)
return "", ErrInternalError
return "", s3err.ErrInternalError
}
defer resp.Body.Close()
@ -289,20 +290,20 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader
resp_body, ra_err := ioutil.ReadAll(resp.Body)
if ra_err != nil {
glog.Errorf("upload to filer response read: %v", ra_err)
return etag, ErrInternalError
return etag, s3err.ErrInternalError
}
var ret weed_server.FilerPostResult
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
return "", ErrInternalError
return "", s3err.ErrInternalError
}
if ret.Error != "" {
glog.Errorf("upload to filer error: %v", ret.Error)
return "", ErrInternalError
return "", s3err.ErrInternalError
}
return etag, ErrNone
return etag, s3err.ErrNone
}
func setEtag(w http.ResponseWriter, etag string) {

241
weed/s3api/s3api_object_handlers_postpolicy.go

@ -0,0 +1,241 @@
package s3api
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/policy"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/dustin/go-humanize"
"github.com/gorilla/mux"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"strings"
)
func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html
bucket := mux.Vars(r)["bucket"]
reader, err := r.MultipartReader()
if err != nil {
writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL)
return
}
form, err := reader.ReadForm(int64(5 * humanize.MiByte))
if err != nil {
writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL)
return
}
defer form.RemoveAll()
fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form)
if err != nil {
writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL)
return
}
if fileBody == nil {
writeErrorResponse(w, s3err.ErrPOSTFileRequired, r.URL)
return
}
defer fileBody.Close()
formValues.Set("Bucket", bucket)
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
}
object := formValues.Get("Key")
successRedirect := formValues.Get("success_action_redirect")
successStatus := formValues.Get("success_action_status")
var redirectURL *url.URL
if successRedirect != "" {
redirectURL, err = url.Parse(successRedirect)
if err != nil {
writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL)
return
}
}
// Verify policy signature.
errCode := s3a.iam.doesPolicySignatureMatch(formValues)
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy"))
if err != nil {
writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL)
return
}
// Handle policy if it is set.
if len(policyBytes) > 0 {
postPolicyForm, err := policy.ParsePostPolicyForm(string(policyBytes))
if err != nil {
writeErrorResponse(w, s3err.ErrPostPolicyConditionInvalidFormat, r.URL)
return
}
// Make sure formValues adhere to policy restrictions.
if err = policy.CheckPostPolicy(formValues, postPolicyForm); err != nil {
w.Header().Set("Location", r.URL.Path)
w.WriteHeader(http.StatusTemporaryRedirect)
return
}
// Ensure that the object size is within expected range, also the file size
// should not exceed the maximum single Put size (5 GiB)
lengthRange := postPolicyForm.Conditions.ContentLengthRange
if lengthRange.Valid {
if fileSize < lengthRange.Min {
writeErrorResponse(w, s3err.ErrEntityTooSmall, r.URL)
return
}
if fileSize > lengthRange.Max {
writeErrorResponse(w, s3err.ErrEntityTooLarge, r.URL)
return
}
}
}
uploadUrl := fmt.Sprintf("http://%s%s/%s/%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object)
etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody)
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
if successRedirect != "" {
// Replace raw query params..
redirectURL.RawQuery = getRedirectPostRawQuery(bucket, object, etag)
w.Header().Set("Location", redirectURL.String())
writeResponse(w, http.StatusSeeOther, nil, mimeNone)
return
}
setEtag(w, etag)
// Decide what http response to send depending on success_action_status parameter
switch successStatus {
case "201":
resp := encodeResponse(PostResponse{
Bucket: bucket,
Key: object,
ETag: `"` + etag + `"`,
Location: w.Header().Get("Location"),
})
writeResponse(w, http.StatusCreated, resp, mimeXML)
case "200":
writeResponse(w, http.StatusOK, nil, mimeNone)
default:
writeSuccessResponseEmpty(w)
}
}
// Extract form fields and file data from a HTTP POST Policy
func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
/// HTML Form values
fileName = ""
// Canonicalize the form values into http.Header.
formValues = make(http.Header)
for k, v := range form.Value {
formValues[http.CanonicalHeaderKey(k)] = v
}
// Validate form values.
if err = validateFormFieldSize(formValues); err != nil {
return nil, "", 0, nil, err
}
// this means that filename="" was not specified for file key and Go has
// an ugly way of handling this situation. Refer here
// https://golang.org/src/mime/multipart/formdata.go#L61
if len(form.File) == 0 {
var b = &bytes.Buffer{}
for _, v := range formValues["File"] {
b.WriteString(v)
}
fileSize = int64(b.Len())
filePart = ioutil.NopCloser(b)
return filePart, fileName, fileSize, formValues, nil
}
// Iterator until we find a valid File field and break
for k, v := range form.File {
canonicalFormName := http.CanonicalHeaderKey(k)
if canonicalFormName == "File" {
if len(v) == 0 {
return nil, "", 0, nil, errors.New("Invalid arguments specified")
}
// Fetch fileHeader which has the uploaded file information
fileHeader := v[0]
// Set filename
fileName = fileHeader.Filename
// Open the uploaded part
filePart, err = fileHeader.Open()
if err != nil {
return nil, "", 0, nil, err
}
// Compute file size
fileSize, err = filePart.(io.Seeker).Seek(0, 2)
if err != nil {
return nil, "", 0, nil, err
}
// Reset Seek to the beginning
_, err = filePart.(io.Seeker).Seek(0, 0)
if err != nil {
return nil, "", 0, nil, err
}
// File found and ready for reading
break
}
}
return filePart, fileName, fileSize, formValues, nil
}
// Validate form field size for s3 specification requirement.
func validateFormFieldSize(formValues http.Header) error {
// Iterate over form values
for k := range formValues {
// Check if value's field exceeds S3 limit
if int64(len(formValues.Get(k))) > int64(1*humanize.MiByte) {
return errors.New("Data size larger than expected")
}
}
// Success.
return nil
}
func getRedirectPostRawQuery(bucket, key, etag string) string {
redirectValues := make(url.Values)
redirectValues.Set("bucket", bucket)
redirectValues.Set("key", key)
redirectValues.Set("etag", "\""+etag+"\"")
return redirectValues.Encode()
}
// Check to see if Policy is signed correctly.
func (iam *IdentityAccessManagement) doesPolicySignatureMatch(formValues http.Header) s3err.ErrorCode {
// For SignV2 - Signature field will be valid
if _, ok := formValues["Signature"]; ok {
return iam.doesPolicySignatureV2Match(formValues)
}
return iam.doesPolicySignatureV4Match(formValues)
}

33
weed/s3api/s3api_object_multipart_handlers.go

@ -2,6 +2,7 @@ package s3api
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"net/http"
"net/url"
"strconv"
@ -27,7 +28,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http
Key: objectKey(aws.String(object)),
})
if errCode != ErrNone {
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
@ -53,7 +54,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r
// println("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode)
if errCode != ErrNone {
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
@ -75,7 +76,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht
UploadId: aws.String(uploadID),
})
if errCode != ErrNone {
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
@ -92,13 +93,13 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query())
if maxUploads < 0 {
writeErrorResponse(w, ErrInvalidMaxUploads, r.URL)
writeErrorResponse(w, s3err.ErrInvalidMaxUploads, r.URL)
return
}
if keyMarker != "" {
// Marker not common with prefix is not implemented.
if !strings.HasPrefix(keyMarker, prefix) {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(w, s3err.ErrNotImplemented, r.URL)
return
}
}
@ -113,7 +114,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht
UploadIdMarker: aws.String(uploadIDMarker),
})
if errCode != ErrNone {
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
@ -130,11 +131,11 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query())
if partNumberMarker < 0 {
writeErrorResponse(w, ErrInvalidPartNumberMarker, r.URL)
writeErrorResponse(w, s3err.ErrInvalidPartNumberMarker, r.URL)
return
}
if maxParts < 0 {
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL)
return
}
@ -146,7 +147,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re
UploadId: aws.String(uploadID),
})
if errCode != ErrNone {
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}
@ -164,25 +165,25 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
uploadID := r.URL.Query().Get("uploadId")
exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true)
if !exists {
writeErrorResponse(w, ErrNoSuchUpload, r.URL)
writeErrorResponse(w, s3err.ErrNoSuchUpload, r.URL)
return
}
partIDString := r.URL.Query().Get("partNumber")
partID, err := strconv.Atoi(partIDString)
if err != nil {
writeErrorResponse(w, ErrInvalidPart, r.URL)
writeErrorResponse(w, s3err.ErrInvalidPart, r.URL)
return
}
if partID > globalMaxPartID {
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL)
return
}
dataReader := r.Body
if s3a.iam.isEnabled() {
rAuthType := getRequestAuthType(r)
var s3ErrCode ErrorCode
var s3ErrCode s3err.ErrorCode
switch rAuthType {
case authTypeStreamingSigned:
dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
@ -191,7 +192,7 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
case authTypePresigned, authTypeSigned:
_, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
}
if s3ErrCode != ErrNone {
if s3ErrCode != s3err.ErrNone {
writeErrorResponse(w, s3ErrCode, r.URL)
return
}
@ -199,11 +200,11 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ
defer dataReader.Close()
uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s",
s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket)
s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID, bucket)
etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
if errCode != ErrNone {
if errCode != s3err.ErrNone {
writeErrorResponse(w, errCode, r.URL)
return
}

13
weed/s3api/s3api_objects_list_handlers.go

@ -4,6 +4,7 @@ import (
"context"
"encoding/xml"
"fmt"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"io"
"net/http"
"net/url"
@ -41,11 +42,11 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query())
if maxKeys < 0 {
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
writeErrorResponse(w, s3err.ErrInvalidMaxKeys, r.URL)
return
}
if delimiter != "" && delimiter != "/" {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(w, s3err.ErrNotImplemented, r.URL)
return
}
@ -57,7 +58,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}
responseV2 := &ListBucketResultV2{
@ -88,18 +89,18 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query())
if maxKeys < 0 {
writeErrorResponse(w, ErrInvalidMaxKeys, r.URL)
writeErrorResponse(w, s3err.ErrInvalidMaxKeys, r.URL)
return
}
if delimiter != "" && delimiter != "/" {
writeErrorResponse(w, ErrNotImplemented, r.URL)
writeErrorResponse(w, s3err.ErrNotImplemented, r.URL)
return
}
response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter)
if err != nil {
writeErrorResponse(w, ErrInternalError, r.URL)
writeErrorResponse(w, s3err.ErrInternalError, r.URL)
return
}

43
weed/s3api/s3api_server.go

@ -49,46 +49,49 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
for _, bucket := range routers {
// HeadObject
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ))
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET"))
// HeadBucket
bucket.Methods("HEAD").HandlerFunc(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN))
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN), "GET"))
// CopyObjectPart
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE)).Queries("uploads", "")
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploads", "")
// AbortMultipartUpload
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE), "DELETE")).Queries("uploadId", "{uploadId:.*}")
// ListObjectParts
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}")
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_WRITE), "GET")).Queries("uploadId", "{uploadId:.*}")
// ListMultipartUploads
bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_WRITE)).Queries("uploads", "")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_WRITE), "GET")).Queries("uploads", "")
// CopyObject
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE))
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY"))
// PutObject
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE))
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE), "PUT"))
// PutBucket
bucket.Methods("PUT").HandlerFunc(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN))
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN), "PUT"))
// DeleteObject
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE))
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE), "DELETE"))
// DeleteBucket
bucket.Methods("DELETE").HandlerFunc(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE))
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE"))
// ListObjectsV2
bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_READ)).Queries("list-type", "2")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_READ), "LIST")).Queries("list-type", "2")
// GetObject, but directory listing is not supported
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ))
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET"))
// ListObjectsV1 (Legacy)
bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_READ))
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_READ), "LIST"))
// PostPolicy
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST"))
// DeleteMultipleObjects
bucket.Methods("POST").HandlerFunc(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)).Queries("delete", "")
bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE), "DELETE")).Queries("delete", "")
/*
// not implemented
@ -104,14 +107,12 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
bucket.Methods("PUT").HandlerFunc(s3a.PutBucketPolicyHandler).Queries("policy", "")
// DeleteBucketPolicy
bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketPolicyHandler).Queries("policy", "")
// PostPolicy
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(s3a.PostPolicyBucketHandler)
*/
}
// ListBuckets
apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_READ))
apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_READ), "LIST"))
// NotFound
apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)

61
weed/s3api/s3err/s3-error.go

@ -0,0 +1,61 @@
package s3err
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Non exhaustive list of AWS S3 standard error responses -
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var s3ErrorResponseMap = map[string]string{
"AccessDenied": "Access Denied.",
"BadDigest": "The Content-Md5 you specified did not match what we received.",
"EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
"EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
"IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
"InternalError": "We encountered an internal error, please try again.",
"InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
"InvalidBucketName": "The specified bucket is not valid.",
"InvalidDigest": "The Content-Md5 you specified is not valid.",
"InvalidRange": "The requested range is not satisfiable",
"MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
"MissingContentLength": "You must provide the Content-Length HTTP header.",
"MissingContentMD5": "Missing required header for this request: Content-Md5.",
"MissingRequestBodyError": "Request body is empty.",
"NoSuchBucket": "The specified bucket does not exist.",
"NoSuchBucketPolicy": "The bucket policy does not exist",
"NoSuchKey": "The specified key does not exist.",
"NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
"NotImplemented": "A header you provided implies functionality that is not implemented",
"PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
"RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
"SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
"MethodNotAllowed": "The specified method is not allowed against this resource.",
"InvalidPart": "One or more of the specified parts could not be found.",
"InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
"InvalidObjectState": "The operation is not valid for the current state of the object.",
"AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
"MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
"BucketNotEmpty": "The bucket you tried to delete is not empty",
"AllAccessDisabled": "All access to this bucket has been disabled.",
"MalformedPolicy": "Policy has invalid resource.",
"MissingFields": "Missing fields in request.",
"AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
"MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
"BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
"InvalidDuration": "Duration provided in the request is invalid.",
"XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
// Add new API errors here.
}

54
weed/s3api/s3api_errors.go → weed/s3api/s3err/s3api_errors.go

@ -1,7 +1,8 @@
package s3api
package s3err
import (
"encoding/xml"
"fmt"
"net/http"
)
@ -19,6 +20,21 @@ type RESTErrorResponse struct {
Message string `xml:"Message" json:"Message"`
Resource string `xml:"Resource" json:"Resource"`
RequestID string `xml:"RequestId" json:"RequestId"`
// Underlying HTTP status code for the returned error
StatusCode int `xml:"-" json:"-"`
}
// Error - Returns S3 error string.
func (e RESTErrorResponse) Error() string {
if e.Message == "" {
msg, ok := s3ErrorResponseMap[e.Code]
if !ok {
msg = fmt.Sprintf("Error response code %s.", e.Code)
}
return msg
}
return e.Message
}
// ErrorCode type of error status.
@ -47,6 +63,11 @@ const (
ErrInvalidCopySource
ErrAuthHeaderEmpty
ErrSignatureVersionNotSupported
ErrMalformedPOSTRequest
ErrPOSTFileRequired
ErrPostPolicyConditionInvalidFormat
ErrEntityTooSmall
ErrEntityTooLarge
ErrMissingFields
ErrMissingCredTag
ErrCredMalformed
@ -167,13 +188,11 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMalformedXML: {
Code: "MalformedXML",
Description: "The XML you provided was not well-formed or did not validate against our published schema.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAuthHeaderEmpty: {
Code: "InvalidArgument",
Description: "Authorization header is invalid -- one and only one ' ' (space) required.",
@ -184,6 +203,31 @@ var errorCodeResponse = map[ErrorCode]APIError{
Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMalformedPOSTRequest: {
Code: "MalformedPOSTRequest",
Description: "The body of your POST request is not well-formed multipart/form-data.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrPOSTFileRequired: {
Code: "InvalidArgument",
Description: "POST requires exactly one file upload per request.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrPostPolicyConditionInvalidFormat: {
Code: "PostPolicyInvalidKeyName",
Description: "Invalid according to Policy: Policy Condition failed",
HTTPStatusCode: http.StatusForbidden,
},
ErrEntityTooSmall: {
Code: "EntityTooSmall",
Description: "Your proposed upload is smaller than the minimum allowed object size.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrEntityTooLarge: {
Code: "EntityTooLarge",
Description: "Your proposed upload exceeds the maximum allowed object size.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrMissingFields: {
Code: "MissingFields",
Description: "Missing fields in request.",
@ -296,7 +340,7 @@ var errorCodeResponse = map[ErrorCode]APIError{
},
}
// getAPIError provides API Error for input API error code.
func getAPIError(code ErrorCode) APIError {
// GetAPIError provides API Error for input API error code.
func GetAPIError(code ErrorCode) APIError {
return errorCodeResponse[code]
}

21
weed/s3api/stats.go

@ -0,0 +1,21 @@
package s3api
import (
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
"net/http"
"time"
)
func track(f http.HandlerFunc, action string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS S3 "+util.VERSION)
start := time.Now()
stats_collect.S3RequestCounter.WithLabelValues(action).Inc()
f(w, r)
stats_collect.S3RequestHistogram.WithLabelValues(action).Observe(time.Since(start).Seconds())
}
}

9
weed/security/tls.go

@ -45,13 +45,18 @@ func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption {
return grpc.WithInsecure()
}
certFileName, keyFileName, caFileName := config.GetString(component+".cert"), config.GetString(component+".key"), config.GetString(component+".ca")
if certFileName == "" || keyFileName == "" || caFileName == "" {
return grpc.WithInsecure()
}
// load cert/key, cacert
cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key"))
cert, err := tls.LoadX509KeyPair(certFileName, keyFileName)
if err != nil {
glog.V(1).Infof("load cert/key error: %v", err)
return grpc.WithInsecure()
}
caCert, err := ioutil.ReadFile(config.GetString(component + ".ca"))
caCert, err := ioutil.ReadFile(caFileName)
if err != nil {
glog.V(1).Infof("read ca cert file error: %v", err)
return grpc.WithInsecure()

17
weed/server/filer_grpc_server.go

@ -164,6 +164,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr
FullPath: util.JoinPath(req.Directory, req.Entry.Name),
Attr: filer.PbToEntryAttribute(req.Entry.Attributes),
Chunks: chunks,
Extended: req.Entry.Extended,
}, req.OExcl, req.IsFromOtherCluster, req.Signatures)
if createErr == nil {
@ -420,13 +421,15 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR
func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) {
t := &filer_pb.GetFilerConfigurationResponse{
Masters: fs.option.Masters,
Collection: fs.option.Collection,
Replication: fs.option.DefaultReplication,
MaxMb: uint32(fs.option.MaxMB),
DirBuckets: fs.filer.DirBucketsPath,
Cipher: fs.filer.Cipher,
Signature: fs.filer.Signature,
Masters: fs.option.Masters,
Collection: fs.option.Collection,
Replication: fs.option.DefaultReplication,
MaxMb: uint32(fs.option.MaxMB),
DirBuckets: fs.filer.DirBucketsPath,
Cipher: fs.filer.Cipher,
Signature: fs.filer.Signature,
MetricsAddress: fs.metricsAddress,
MetricsIntervalSec: int32(fs.metricsIntervalSec),
}
glog.V(4).Infof("GetFilerConfiguration: %v", t)

1
weed/server/filer_grpc_server_rename.go

@ -109,6 +109,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
FullPath: newPath,
Attr: entry.Attr,
Chunks: entry.Chunks,
Extended: entry.Extended,
}
createErr := fs.filer.CreateEntry(ctx, newEntry, false, false, nil)
if createErr != nil {

22
weed/server/filer_server.go

@ -62,6 +62,10 @@ type FilerServer struct {
filer *filer.Filer
grpcDialOption grpc.DialOption
// metrics read from the master
metricsAddress string
metricsIntervalSec int
// notifying clients
listenersLock sync.Mutex
listenersCond *sync.Cond
@ -88,7 +92,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
})
fs.filer.Cipher = option.Cipher
maybeStartMetrics(fs, option)
fs.maybeStartMetrics()
go fs.filer.KeepConnectedToMaster()
@ -131,9 +135,9 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
return fs, nil
}
func maybeStartMetrics(fs *FilerServer, option *FilerOption) {
func (fs *FilerServer) maybeStartMetrics() {
for _, master := range option.Masters {
for _, master := range fs.option.Masters {
_, err := pb.ParseFilerGrpcAddress(master)
if err != nil {
glog.Fatalf("invalid master address %s: %v", master, err)
@ -141,12 +145,10 @@ func maybeStartMetrics(fs *FilerServer, option *FilerOption) {
}
isConnected := false
var metricsAddress string
var metricsIntervalSec int
var readErr error
for !isConnected {
for _, master := range option.Masters {
metricsAddress, metricsIntervalSec, readErr = readFilerConfiguration(fs.grpcDialOption, master)
for _, master := range fs.option.Masters {
fs.metricsAddress, fs.metricsIntervalSec, readErr = readFilerConfiguration(fs.grpcDialOption, master)
if readErr == nil {
isConnected = true
} else {
@ -154,10 +156,8 @@ func maybeStartMetrics(fs *FilerServer, option *FilerOption) {
}
}
}
if metricsAddress == "" && metricsIntervalSec <= 0 {
return
}
go stats.LoopPushingMetric("filer", stats.SourceName(option.Port), stats.FilerGather, metricsAddress, metricsIntervalSec)
go stats.LoopPushingMetric("filer", stats.SourceName(fs.option.Port), stats.FilerGather, fs.metricsAddress, fs.metricsIntervalSec)
}
func readFilerConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (metricsAddress string, metricsIntervalSec int, err error) {

3
weed/server/filer_server_handlers.go

@ -1,6 +1,7 @@
package weed_server
import (
"github.com/chrislusf/seaweedfs/weed/util"
"net/http"
"time"
@ -8,6 +9,7 @@ import (
)
func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION)
start := time.Now()
switch r.Method {
case "GET":
@ -34,6 +36,7 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) {
}
func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION)
start := time.Now()
switch r.Method {
case "GET":

2
weed/server/master_grpc_server.go

@ -71,7 +71,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
int64(heartbeat.MaxVolumeCount))
glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort())
if err := stream.Send(&master_pb.HeartbeatResponse{
VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024,
VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024,
}); err != nil {
glog.Warningf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err)
return err

30
weed/server/volume_grpc_client_to_master.go

@ -24,21 +24,25 @@ func (vs *VolumeServer) GetMaster() string {
}
func (vs *VolumeServer) checkWithMaster() (err error) {
for _, master := range vs.SeedMasterNodes {
err = operation.WithMasterServerClient(master, vs.grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
if err != nil {
return fmt.Errorf("get master %s configuration: %v", master, err)
isConnected := false
for !isConnected {
for _, master := range vs.SeedMasterNodes {
err = operation.WithMasterServerClient(master, vs.grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
if err != nil {
return fmt.Errorf("get master %s configuration: %v", master, err)
}
vs.metricsAddress, vs.metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds)
backend.LoadFromPbStorageBackends(resp.StorageBackends)
return nil
})
if err == nil {
return
} else {
glog.V(0).Infof("checkWithMaster %s: %v", master, err)
}
vs.MetricsAddress, vs.MetricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds)
backend.LoadFromPbStorageBackends(resp.StorageBackends)
return nil
})
if err == nil {
return
} else {
glog.V(0).Infof("checkWithMaster %s: %v", master, err)
}
time.Sleep(1790 * time.Millisecond)
}
return
}

7
weed/server/volume_server.go

@ -28,8 +28,8 @@ type VolumeServer struct {
FixJpgOrientation bool
ReadRedirect bool
compactionBytePerSecond int64
MetricsAddress string
MetricsIntervalSec int
metricsAddress string
metricsIntervalSec int
fileSizeLimitBytes int64
isHeartbeating bool
stopChan chan bool
@ -97,8 +97,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
}
go vs.heartbeat()
hostAddress := fmt.Sprintf("%s:%d", ip, port)
go stats.LoopPushingMetric("volumeServer", hostAddress, stats.VolumeServerGather, vs.MetricsAddress, vs.MetricsIntervalSec)
go stats.LoopPushingMetric("volumeServer", fmt.Sprintf("%s:%d", ip, port), stats.VolumeServerGather, vs.metricsAddress, vs.metricsIntervalSec)
return vs
}

3
weed/server/volume_server_handlers.go

@ -1,6 +1,7 @@
package weed_server
import (
"github.com/chrislusf/seaweedfs/weed/util"
"net/http"
"strings"
@ -25,6 +26,7 @@ security settings:
*/
func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION)
switch r.Method {
case "GET", "HEAD":
stats.ReadRequest()
@ -39,6 +41,7 @@ func (vs *VolumeServer) privateStoreHandler(w http.ResponseWriter, r *http.Reque
}
func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION)
switch r.Method {
case "GET":
stats.ReadRequest()

2
weed/server/volume_server_handlers_admin.go

@ -10,6 +10,7 @@ import (
)
func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION)
m := make(map[string]interface{})
m["Version"] = util.Version()
var ds []*volume_server_pb.DiskStatus
@ -24,6 +25,7 @@ func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) {
}
func (vs *VolumeServer) statsDiskHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION)
m := make(map[string]interface{})
m["Version"] = util.Version()
var ds []*volume_server_pb.DiskStatus

1
weed/server/volume_server_handlers_ui.go

@ -13,6 +13,7 @@ import (
)
func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "SeaweedFS Volume "+util.VERSION)
infos := make(map[string]interface{})
infos["Up Time"] = time.Now().Sub(startTime).String()
var ds []*volume_server_pb.DiskStatus

21
weed/shell/command_collection_delete.go

@ -2,6 +2,7 @@ package shell
import (
"context"
"flag"
"fmt"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"io"
@ -21,22 +22,32 @@ func (c *commandCollectionDelete) Name() string {
func (c *commandCollectionDelete) Help() string {
return `delete specified collection
collection.delete <collection_name>
collection.delete -collectin <collection_name> -force
`
}
func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
if len(args) == 0 {
if err = commandEnv.confirmIsLocked(); err != nil {
return
}
colDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
collectionName := colDeleteCommand.String("collection", "", "collection to delete")
applyBalancing := colDeleteCommand.Bool("force", false, "apply the collection")
if err = colDeleteCommand.Parse(args); err != nil {
return nil
}
collectionName := args[0]
if !*applyBalancing {
fmt.Fprintf(writer, "collection %s will be deleted. Use -force to apply the change.\n", *collectionName)
return nil
}
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
_, err = client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{
Name: collectionName,
Name: *collectionName,
})
return err
})
@ -44,7 +55,7 @@ func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writ
return
}
fmt.Fprintf(writer, "collection %s is deleted.\n", collectionName)
fmt.Fprintf(writer, "collection %s is deleted.\n", *collectionName)
return nil
}

2
weed/shell/command_volume_configure_replication.go

@ -28,7 +28,7 @@ func (c *commandVolumeConfigureReplication) Name() string {
func (c *commandVolumeConfigureReplication) Help() string {
return `change volume replication value
This command changes a volume replication value. It should be followed by volume.fix.replication.
This command changes a volume replication value. It should be followed by "volume.fix.replication".
`
}

20
weed/shell/command_volume_copy.go

@ -1,6 +1,7 @@
package shell
import (
"flag"
"fmt"
"io"
@ -21,7 +22,7 @@ func (c *commandVolumeCopy) Name() string {
func (c *commandVolumeCopy) Help() string {
return `copy a volume from one volume server to another volume server
volume.copy <source volume server host:port> <target volume server host:port> <volume id>
volume.copy -source <source volume server host:port> -target <target volume server host:port> -volumeId <volume id>
This command copies a volume from one volume server to another volume server.
Usually you will want to unmount the volume first before copying.
@ -35,16 +36,17 @@ func (c *commandVolumeCopy) Do(args []string, commandEnv *CommandEnv, writer io.
return
}
if len(args) != 3 {
fmt.Fprintf(writer, "received args: %+v\n", args)
return fmt.Errorf("need 3 args of <source volume server host:port> <target volume server host:port> <volume id>")
volCopyCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeIdInt := volCopyCommand.Int("volumeId", 0, "the volume id")
sourceNodeStr := volCopyCommand.String("source", "", "the source volume server <host>:<port>")
targetNodeStr := volCopyCommand.String("target", "", "the target volume server <host>:<port>")
if err = volCopyCommand.Parse(args); err != nil {
return nil
}
sourceVolumeServer, targetVolumeServer, volumeIdString := args[0], args[1], args[2]
volumeId, err := needle.NewVolumeId(volumeIdString)
if err != nil {
return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
}
sourceVolumeServer, targetVolumeServer := *sourceNodeStr, *targetNodeStr
volumeId := needle.VolumeId(*volumeIdInt)
if sourceVolumeServer == targetVolumeServer {
return fmt.Errorf("source and target volume servers are the same!")

20
weed/shell/command_volume_delete.go

@ -1,7 +1,7 @@
package shell
import (
"fmt"
"flag"
"io"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@ -21,7 +21,7 @@ func (c *commandVolumeDelete) Name() string {
func (c *commandVolumeDelete) Help() string {
return `delete a live volume from one volume server
volume.delete <volume server host:port> <volume id>
volume.delete -node <volume server host:port> -volumeId <volume id>
This command deletes a volume from one volume server.
@ -34,16 +34,16 @@ func (c *commandVolumeDelete) Do(args []string, commandEnv *CommandEnv, writer i
return
}
if len(args) != 2 {
fmt.Fprintf(writer, "received args: %+v\n", args)
return fmt.Errorf("need 2 args of <volume server host:port> <volume id>")
volDeleteCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeIdInt := volDeleteCommand.Int("volumeId", 0, "the volume id")
nodeStr := volDeleteCommand.String("node", "", "the volume server <host>:<port>")
if err = volDeleteCommand.Parse(args); err != nil {
return nil
}
sourceVolumeServer, volumeIdString := args[0], args[1]
volumeId, err := needle.NewVolumeId(volumeIdString)
if err != nil {
return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
}
sourceVolumeServer := *nodeStr
volumeId := needle.VolumeId(*volumeIdInt)
return deleteVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)

10
weed/shell/command_volume_fix_replication.go

@ -2,6 +2,7 @@ package shell
import (
"context"
"flag"
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"io"
@ -50,11 +51,14 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv,
return
}
takeAction := true
if len(args) > 0 && args[0] == "-n" {
takeAction = false
volFixReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
skipChange := volFixReplicationCommand.Bool("n", false, "skip the changes")
if err = volFixReplicationCommand.Parse(args); err != nil {
return nil
}
takeAction := !*skipChange
var resp *master_pb.VolumeListResponse
err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error {
resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})

20
weed/shell/command_volume_mount.go

@ -2,7 +2,7 @@ package shell
import (
"context"
"fmt"
"flag"
"io"
"github.com/chrislusf/seaweedfs/weed/operation"
@ -25,7 +25,7 @@ func (c *commandVolumeMount) Name() string {
func (c *commandVolumeMount) Help() string {
return `mount a volume from one volume server
volume.mount <volume server host:port> <volume id>
volume.mount -node <volume server host:port> -volumeId <volume id>
This command mounts a volume from one volume server.
@ -38,16 +38,16 @@ func (c *commandVolumeMount) Do(args []string, commandEnv *CommandEnv, writer io
return
}
if len(args) != 2 {
fmt.Fprintf(writer, "received args: %+v\n", args)
return fmt.Errorf("need 2 args of <volume server host:port> <volume id>")
volMountCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeIdInt := volMountCommand.Int("volumeId", 0, "the volume id")
nodeStr := volMountCommand.String("node", "", "the volume server <host>:<port>")
if err = volMountCommand.Parse(args); err != nil {
return nil
}
sourceVolumeServer, volumeIdString := args[0], args[1]
volumeId, err := needle.NewVolumeId(volumeIdString)
if err != nil {
return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
}
sourceVolumeServer := *nodeStr
volumeId := needle.VolumeId(*volumeIdInt)
return mountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)

20
weed/shell/command_volume_move.go

@ -2,6 +2,7 @@ package shell
import (
"context"
"flag"
"fmt"
"io"
"log"
@ -27,7 +28,7 @@ func (c *commandVolumeMove) Name() string {
func (c *commandVolumeMove) Help() string {
return `move a live volume from one volume server to another volume server
volume.move <source volume server host:port> <target volume server host:port> <volume id>
volume.move -source <source volume server host:port> -target <target volume server host:port> -volumeId <volume id>
This command move a live volume from one volume server to another volume server. Here are the steps:
@ -48,16 +49,17 @@ func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io.
return
}
if len(args) != 3 {
fmt.Fprintf(writer, "received args: %+v\n", args)
return fmt.Errorf("need 3 args of <source volume server host:port> <target volume server host:port> <volume id>")
volMoveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeIdInt := volMoveCommand.Int("volumeId", 0, "the volume id")
sourceNodeStr := volMoveCommand.String("source", "", "the source volume server <host>:<port>")
targetNodeStr := volMoveCommand.String("target", "", "the target volume server <host>:<port>")
if err = volMoveCommand.Parse(args); err != nil {
return nil
}
sourceVolumeServer, targetVolumeServer, volumeIdString := args[0], args[1], args[2]
volumeId, err := needle.NewVolumeId(volumeIdString)
if err != nil {
return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
}
sourceVolumeServer, targetVolumeServer := *sourceNodeStr, *targetNodeStr
volumeId := needle.VolumeId(*volumeIdInt)
if sourceVolumeServer == targetVolumeServer {
return fmt.Errorf("source and target volume servers are the same!")

20
weed/shell/command_volume_unmount.go

@ -2,7 +2,7 @@ package shell
import (
"context"
"fmt"
"flag"
"io"
"github.com/chrislusf/seaweedfs/weed/operation"
@ -25,7 +25,7 @@ func (c *commandVolumeUnmount) Name() string {
func (c *commandVolumeUnmount) Help() string {
return `unmount a volume from one volume server
volume.unmount <volume server host:port> <volume id>
volume.unmount -node <volume server host:port> -volumeId <volume id>
This command unmounts a volume from one volume server.
@ -38,16 +38,16 @@ func (c *commandVolumeUnmount) Do(args []string, commandEnv *CommandEnv, writer
return
}
if len(args) != 2 {
fmt.Fprintf(writer, "received args: %+v\n", args)
return fmt.Errorf("need 2 args of <volume server host:port> <volume id>")
volUnmountCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
volumeIdInt := volUnmountCommand.Int("volumeId", 0, "the volume id")
nodeStr := volUnmountCommand.String("node", "", "the volume server <host>:<port>")
if err = volUnmountCommand.Parse(args); err != nil {
return nil
}
sourceVolumeServer, volumeIdString := args[0], args[1]
volumeId, err := needle.NewVolumeId(volumeIdString)
if err != nil {
return fmt.Errorf("wrong volume id format %s: %v", volumeId, err)
}
sourceVolumeServer := *nodeStr
volumeId := needle.VolumeId(*volumeIdInt)
return unmountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer)

21
weed/stats/metrics.go

@ -15,6 +15,7 @@ import (
var (
FilerGather = prometheus.NewRegistry()
VolumeServerGather = prometheus.NewRegistry()
S3Gather = prometheus.NewRegistry()
FilerRequestCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
@ -90,6 +91,22 @@ var (
Name: "total_disk_size",
Help: "Actual disk size used by volumes.",
}, []string{"collection", "type"})
S3RequestCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "SeaweedFS",
Subsystem: "s3",
Name: "request_total",
Help: "Counter of s3 requests.",
}, []string{"type"})
S3RequestHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "SeaweedFS",
Subsystem: "s3",
Name: "request_seconds",
Help: "Bucketed histogram of s3 request processing time.",
Buckets: prometheus.ExponentialBuckets(0.0001, 2, 24),
}, []string{"type"})
)
func init() {
@ -106,6 +123,8 @@ func init() {
VolumeServerGather.MustRegister(VolumeServerMaxVolumeCounter)
VolumeServerGather.MustRegister(VolumeServerDiskSizeGauge)
S3Gather.MustRegister(S3RequestCounter)
S3Gather.MustRegister(S3RequestHistogram)
}
func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, addr string, intervalSeconds int) {
@ -114,6 +133,8 @@ func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, add
return
}
glog.V(0).Infof("%s server sends metrics to %s every %d seconds", name, addr, intervalSeconds)
pusher := push.New(addr, name).Gatherer(gatherer).Grouping("instance", instance)
for {

20
weed/topology/data_node.go

@ -44,6 +44,10 @@ func (dn *DataNode) String() string {
func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
dn.Lock()
defer dn.Unlock()
return dn.doAddOrUpdateVolume(v)
}
func (dn *DataNode) doAddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO bool) {
if oldV, ok := dn.volumes[v.Id]; !ok {
dn.volumes[v.Id] = v
dn.UpAdjustVolumeCountDelta(1)
@ -71,11 +75,15 @@ func (dn *DataNode) AddOrUpdateVolume(v storage.VolumeInfo) (isNew, isChangedRO
}
func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolumes, deletedVolumes, changeRO []storage.VolumeInfo) {
actualVolumeMap := make(map[needle.VolumeId]storage.VolumeInfo)
for _, v := range actualVolumes {
actualVolumeMap[v.Id] = v
}
dn.Lock()
defer dn.Unlock()
for vid, v := range dn.volumes {
if _, ok := actualVolumeMap[vid]; !ok {
glog.V(0).Infoln("Deleting volume id:", vid)
@ -90,9 +98,8 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume
}
}
}
dn.Unlock()
for _, v := range actualVolumes {
isNew, isChangedRO := dn.AddOrUpdateVolume(v)
isNew, isChangedRO := dn.doAddOrUpdateVolume(v)
if isNew {
newVolumes = append(newVolumes, v)
}
@ -103,8 +110,10 @@ func (dn *DataNode) UpdateVolumes(actualVolumes []storage.VolumeInfo) (newVolume
return
}
func (dn *DataNode) DeltaUpdateVolumes(newlVolumes, deletedVolumes []storage.VolumeInfo) {
func (dn *DataNode) DeltaUpdateVolumes(newVolumes, deletedVolumes []storage.VolumeInfo) {
dn.Lock()
defer dn.Unlock()
for _, v := range deletedVolumes {
delete(dn.volumes, v.Id)
dn.UpAdjustVolumeCountDelta(-1)
@ -115,9 +124,8 @@ func (dn *DataNode) DeltaUpdateVolumes(newlVolumes, deletedVolumes []storage.Vol
dn.UpAdjustActiveVolumeCountDelta(-1)
}
}
dn.Unlock()
for _, v := range newlVolumes {
dn.AddOrUpdateVolume(v)
for _, v := range newVolumes {
dn.doAddOrUpdateVolume(v)
}
return
}

2
weed/util/constants.go

@ -5,7 +5,7 @@ import (
)
var (
VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 99)
VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 00)
COMMIT = ""
)

Loading…
Cancel
Save