Browse Source

Merge pull request #1 from chrislusf/master

volume 完成之后合并
pull/1287/head
11062055 6 years ago
committed by GitHub
parent
commit
7bd8f621ea
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 6
      README.md
  2. 12
      go.mod
  3. 18
      go.sum
  4. 2
      k8s/seaweedfs/Chart.yaml
  5. 2
      k8s/seaweedfs/values.yaml
  6. 8
      other/java/client/src/main/proto/filer.proto
  7. 75
      unmaintained/see_log_entry/see_log_entry.go
  8. 12
      weed/command/mount.go
  9. 6
      weed/command/mount_std.go
  10. 6
      weed/command/msg_broker.go
  11. 17
      weed/command/scaffold.go
  12. 15
      weed/command/shell.go
  13. 3
      weed/command/volume.go
  14. 2
      weed/command/watch.go
  15. 7
      weed/command/webdav.go
  16. 1
      weed/filer2/entry.go
  17. 6
      weed/filer2/entry_codec.go
  18. 16
      weed/filer2/filechunks.go
  19. 14
      weed/filer2/filer.go
  20. 19
      weed/filer2/filer_buckets.go
  21. 24
      weed/filer2/filer_notify.go
  22. 47
      weed/filer2/filer_notify_append.go
  23. 4
      weed/filer2/leveldb/leveldb_store_test.go
  24. 4
      weed/filer2/leveldb2/leveldb2_store_test.go
  25. 12
      weed/filer2/reader_at.go
  26. 42
      weed/filer2/redis2/redis_cluster_store.go
  27. 36
      weed/filer2/redis2/redis_store.go
  28. 162
      weed/filer2/redis2/universal_redis_store.go
  29. 6
      weed/filer2/topics.go
  30. 4
      weed/filesys/dir.go
  31. 18
      weed/filesys/dirty_page.go
  32. 20
      weed/filesys/filehandle.go
  33. 32
      weed/filesys/fscache.go
  34. 72
      weed/filesys/wfs.go
  35. 2
      weed/images/orientation.go
  36. 4
      weed/operation/upload_content.go
  37. 8
      weed/pb/filer.proto
  38. 331
      weed/pb/filer_pb/filer.pb.go
  39. 5
      weed/pb/shared_values.go
  40. 4
      weed/replication/sink/filersink/filer_sink.go
  41. 9
      weed/replication/sink/s3sink/s3_sink.go
  42. 4
      weed/replication/sub/notification_aws_sqs.go
  43. 4
      weed/s3api/filer_multipart.go
  44. 2
      weed/s3api/s3api_objects_list_handlers.go
  45. 3
      weed/server/filer_grpc_server.go
  46. 7
      weed/server/filer_grpc_server_listen.go
  47. 28
      weed/server/filer_grpc_server_rename.go
  48. 12
      weed/server/filer_server.go
  49. 21
      weed/server/filer_server_handlers_read.go
  50. 42
      weed/server/filer_server_handlers_write.go
  51. 16
      weed/server/filer_server_handlers_write_autochunk.go
  52. 4
      weed/server/filer_server_handlers_write_cipher.go
  53. 13
      weed/server/master_grpc_server.go
  54. 11
      weed/server/master_server.go
  55. 2
      weed/server/volume_grpc_tail.go
  56. 8
      weed/server/volume_server_handlers_write.go
  57. 13
      weed/server/webdav_server.go
  58. 2
      weed/shell/commands.go
  59. 4
      weed/storage/backend/memory_map/memory_map_backend.go
  60. 7
      weed/storage/backend/volume_create.go
  61. 7
      weed/storage/backend/volume_create_linux.go
  62. 7
      weed/storage/backend/volume_create_windows.go
  63. 6
      weed/storage/store.go
  64. 2
      weed/storage/volume_loading.go
  65. 7
      weed/storage/volume_read_write.go
  66. 4
      weed/storage/volume_vacuum.go
  67. 2
      weed/storage/volume_vacuum_test.go
  68. 10
      weed/topology/store_replicate.go
  69. 7
      weed/util/bytes.go
  70. 134
      weed/util/chunk_cache/chunk_cache.go
  71. 12
      weed/util/chunk_cache/chunk_cache_in_memory.go
  72. 145
      weed/util/chunk_cache/chunk_cache_on_disk.go
  73. 58
      weed/util/chunk_cache/chunk_cache_on_disk_test.go
  74. 2
      weed/util/constants.go
  75. 52
      weed/util/log_buffer/log_buffer.go
  76. 40
      weed/util/log_buffer/sealed_buffer.go
  77. 25
      weed/util/network.go
  78. 16
      weed/util/parse.go
  79. 26
      weed/wdclient/masterclient.go

6
README.md

@ -450,6 +450,12 @@ go get github.com/chrislusf/seaweedfs/weed
Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory
Note:
* If you got into this problem, try to `rm -Rf $GOPATH/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace` and build again.
```
panic: /debug/requests is already registered. You may have two independent copies of golang.org/x/net/trace in your binary, trying to maintain separate state. This may involve a vendored copy of golang.org/x/net/trace.
```
Step 4: after you modify your code locally, you could start a local build by calling `go install` under Step 4: after you modify your code locally, you could start a local build by calling `go install` under
``` ```

12
go.mod

@ -15,7 +15,7 @@ require (
github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/disintegration/imaging v1.6.1
github.com/disintegration/imaging v1.6.2
github.com/dustin/go-humanize v1.0.0 github.com/dustin/go-humanize v1.0.0
github.com/eapache/go-resiliency v1.2.0 // indirect github.com/eapache/go-resiliency v1.2.0 // indirect
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a
@ -25,7 +25,7 @@ require (
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/frankban/quicktest v1.7.2 // indirect github.com/frankban/quicktest v1.7.2 // indirect
github.com/gabriel-vasile/mimetype v1.0.0 github.com/gabriel-vasile/mimetype v1.0.0
github.com/go-redis/redis v6.15.2+incompatible
github.com/go-redis/redis v6.15.7+incompatible
github.com/go-sql-driver/mysql v1.4.1 github.com/go-sql-driver/mysql v1.4.1
github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect
@ -38,7 +38,6 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/hashicorp/golang-lru v0.5.3 // indirect
github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f
github.com/jcmturner/gofork v1.0.0 // indirect github.com/jcmturner/gofork v1.0.0 // indirect
github.com/karlseguin/ccache v2.0.3+incompatible github.com/karlseguin/ccache v2.0.3+incompatible
github.com/karlseguin/expect v1.0.1 // indirect github.com/karlseguin/expect v1.0.1 // indirect
@ -58,12 +57,11 @@ require (
github.com/peterh/liner v1.1.0 github.com/peterh/liner v1.1.0
github.com/pierrec/lz4 v2.2.7+incompatible // indirect github.com/pierrec/lz4 v2.2.7+incompatible // indirect
github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_golang v1.1.0
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 // indirect
github.com/prometheus/procfs v0.0.4 // indirect github.com/prometheus/procfs v0.0.4 // indirect
github.com/rakyll/statik v0.1.6
github.com/rakyll/statik v0.1.7
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff
github.com/seaweedfs/goexif v1.0.2
github.com/sirupsen/logrus v1.4.2 // indirect github.com/sirupsen/logrus v1.4.2 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.2.2 // indirect github.com/spf13/afero v1.2.2 // indirect
@ -84,7 +82,7 @@ require (
gocloud.dev/pubsub/natspubsub v0.16.0 gocloud.dev/pubsub/natspubsub v0.16.0
gocloud.dev/pubsub/rabbitpubsub v0.16.0 gocloud.dev/pubsub/rabbitpubsub v0.16.0
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 // indirect golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 // indirect
golang.org/x/image v0.0.0-20190829233526-b3c06291d021 // indirect
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
golang.org/x/net v0.0.0-20190909003024-a7b16738d86b golang.org/x/net v0.0.0-20190909003024-a7b16738d86b
golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b
golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110

18
go.sum

@ -59,6 +59,7 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI= github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI=
@ -92,6 +93,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/disintegration/imaging v1.6.1 h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE= github.com/disintegration/imaging v1.6.1 h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE=
github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ=
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
@ -129,8 +132,8 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4=
github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U=
github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@ -217,8 +220,6 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f h1:X+tnaqoCcBgAwSTJtoYW6p0qKiuPyMfofEHEFUf2kdU=
github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f/go.mod h1:Ip4fOwzCrnDVuluHBd7FXIMb7SHOKfkt9/UDrYSZvqI=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
@ -355,15 +356,17 @@ github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs= github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs=
github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ=
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg= github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg=
github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0= github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0=
github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
@ -463,6 +466,9 @@ golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190829233526-b3c06291d021 h1:j6QOxNFMpEL1wIQX6TUdBPNfGZKmBOJS/vfSm8a7tdM= golang.org/x/image v0.0.0-20190829233526-b3c06291d021 h1:j6QOxNFMpEL1wIQX6TUdBPNfGZKmBOJS/vfSm8a7tdM=
golang.org/x/image v0.0.0-20190829233526-b3c06291d021/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190829233526-b3c06291d021/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg=
golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=

2
k8s/seaweedfs/Chart.yaml

@ -1,4 +1,4 @@
apiVersion: v1 apiVersion: v1
description: SeaweedFS description: SeaweedFS
name: seaweedfs name: seaweedfs
version: 1.71
version: 1.73

2
k8s/seaweedfs/values.yaml

@ -4,7 +4,7 @@ global:
registry: "" registry: ""
repository: "" repository: ""
imageName: chrislusf/seaweedfs imageName: chrislusf/seaweedfs
imageTag: "1.71"
imageTag: "1.73"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
imagePullSecrets: imagepullsecret imagePullSecrets: imagepullsecret
restartPolicy: Always restartPolicy: Always

8
other/java/client/src/main/proto/filer.proto

@ -42,7 +42,7 @@ service SeaweedFiler {
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
} }
rpc ListenForEvents (ListenForEventsRequest) returns (stream FullEventNotification) {
rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
} }
} }
@ -123,6 +123,7 @@ message FuseAttributes {
string user_name = 11; // for hdfs string user_name = 11; // for hdfs
repeated string group_name = 12; // for hdfs repeated string group_name = 12; // for hdfs
string symlink_target = 13; string symlink_target = 13;
bytes md5 = 14;
} }
message CreateEntryRequest { message CreateEntryRequest {
@ -230,16 +231,15 @@ message GetFilerConfigurationResponse {
string collection = 3; string collection = 3;
uint32 max_mb = 4; uint32 max_mb = 4;
string dir_buckets = 5; string dir_buckets = 5;
string dir_queues = 6;
bool cipher = 7; bool cipher = 7;
} }
message ListenForEventsRequest {
message SubscribeMetadataRequest {
string client_name = 1; string client_name = 1;
string path_prefix = 2; string path_prefix = 2;
int64 since_ns = 3; int64 since_ns = 3;
} }
message FullEventNotification {
message SubscribeMetadataResponse {
string directory = 1; string directory = 1;
EventNotification event_notification = 2; EventNotification event_notification = 2;
} }

75
unmaintained/see_log_entry/see_log_entry.go

@ -0,0 +1,75 @@
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
var (
logdataFile = flag.String("logdata", "", "log data file saved under "+ filer2.SystemLogDir)
)
func main() {
flag.Parse()
dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644)
if err != nil {
log.Fatalf("failed to open %s: %v", *logdataFile, err)
}
defer dst.Close()
err = walkLogEntryFile(dst)
if err != nil {
log.Fatalf("failed to visit %s: %v", *logdataFile, err)
}
}
func walkLogEntryFile(dst *os.File) error {
sizeBuf := make([]byte, 4)
for {
if n, err := dst.Read(sizeBuf); n != 4 {
if err == io.EOF {
return nil
}
return err
}
size := util.BytesToUint32(sizeBuf)
data := make([]byte, int(size))
if n, err := dst.Read(data); n != len(data) {
return err
}
logEntry := &filer_pb.LogEntry{}
err := proto.Unmarshal(data, logEntry)
if err != nil {
log.Printf("unexpected unmarshal filer_pb.LogEntry: %v", err)
return nil
}
event := &filer_pb.SubscribeMetadataResponse{}
err = proto.Unmarshal(logEntry.Data, event)
if err != nil {
log.Printf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
return nil
}
fmt.Printf("event: %+v\n", event)
}
}

12
weed/command/mount.go

@ -1,5 +1,9 @@
package command package command
import (
"os"
)
type MountOptions struct { type MountOptions struct {
filer *string filer *string
filerMountRootPath *string filerMountRootPath *string
@ -9,7 +13,8 @@ type MountOptions struct {
replication *string replication *string
ttlSec *int ttlSec *int
chunkSizeLimitMB *int chunkSizeLimitMB *int
chunkCacheCountLimit *int64
cacheDir *string
cacheSizeMB *int64
dataCenter *string dataCenter *string
allowOthers *bool allowOthers *bool
umaskString *string umaskString *string
@ -32,8 +37,9 @@ func init() {
mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files") mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files")
mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.") mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.")
mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files")
mountOptions.chunkCacheCountLimit = cmdMount.Flag.Int64("chunkCacheCountLimit", 1000, "number of file chunks to cache in memory")
mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 16, "local write buffer size, also chunk large files")
mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB (0 will disable cache)")
mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center")
mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system") mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system")
mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111") mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111")

6
weed/command/mount_std.go

@ -129,7 +129,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
} }
options = append(options, osSpecificMountOptions()...) options = append(options, osSpecificMountOptions()...)
if *option.allowOthers { if *option.allowOthers {
options = append(options, fuse.AllowOther()) options = append(options, fuse.AllowOther())
} }
@ -137,12 +136,12 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
options = append(options, fuse.AllowNonEmptyMount()) options = append(options, fuse.AllowNonEmptyMount())
} }
// mount
c, err := fuse.Mount(dir, options...) c, err := fuse.Mount(dir, options...)
if err != nil { if err != nil {
glog.V(0).Infof("mount: %v", err) glog.V(0).Infof("mount: %v", err)
return true return true
} }
defer fuse.Unmount(dir) defer fuse.Unmount(dir)
util.OnInterrupt(func() { util.OnInterrupt(func() {
@ -164,7 +163,8 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
Replication: *option.replication, Replication: *option.replication,
TtlSec: int32(*option.ttlSec), TtlSec: int32(*option.ttlSec),
ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024,
ChunkCacheCountLimit: *option.chunkCacheCountLimit,
CacheDir: *option.cacheDir,
CacheSizeMB: *option.cacheSizeMB,
DataCenter: *option.dataCenter, DataCenter: *option.dataCenter,
DirListCacheLimit: *option.dirListCacheLimit, DirListCacheLimit: *option.dirListCacheLimit,
EntryCacheTtl: 3 * time.Second, EntryCacheTtl: 3 * time.Second,

6
weed/command/msg_broker.go

@ -62,18 +62,14 @@ func (msgBrokerOpt *QueueOptions) startQueueServer() bool {
return false return false
} }
filerQueuesPath := "/queues"
grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
for { for {
err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
_, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil { if err != nil {
return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
} }
filerQueuesPath = resp.DirQueues
glog.V(0).Infof("Queue read filer queues dir: %s", filerQueuesPath)
return nil return nil
}) })
if err != nil { if err != nil {

17
weed/command/scaffold.go

@ -18,7 +18,7 @@ var cmdScaffold = &Command{
For example, the filer.toml mysql password can be overwritten by environment variable For example, the filer.toml mysql password can be overwritten by environment variable
export WEED_MYSQL_PASSWORD=some_password export WEED_MYSQL_PASSWORD=some_password
Environment variable rules: Environment variable rules:
* Prefix fix with "WEED_"
* Prefix the variable name with "WEED_"
* Upppercase the reset of variable name. * Upppercase the reset of variable name.
* Replace '.' with '_' * Replace '.' with '_'
@ -76,8 +76,10 @@ const (
recursive_delete = false recursive_delete = false
# directories under this folder will be automatically creating a separate bucket # directories under this folder will be automatically creating a separate bucket
buckets_folder = "/buckets" buckets_folder = "/buckets"
# directories under this folder will be store message queue data
queues_folder = "/queues"
buckets_fsync = [ # a list of buckets with all write requests fsync=true
"important_bucket",
"should_always_fsync",
]
#################################################### ####################################################
# The following are filer store options # The following are filer store options
@ -139,13 +141,13 @@ hosts=[
"localhost:9042", "localhost:9042",
] ]
[redis]
[redis2]
enabled = false enabled = false
address = "localhost:6379" address = "localhost:6379"
password = "" password = ""
database = 0 database = 0
[redis_cluster]
[redis_cluster2]
enabled = false enabled = false
addresses = [ addresses = [
"localhost:30001", "localhost:30001",
@ -260,6 +262,7 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
region = "us-east-2" region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket bucket = "your_bucket_name" # an existing bucket
directory = "/" # destination directory directory = "/" # destination directory
endpoint = ""
[sink.google_cloud_storage] [sink.google_cloud_storage]
# read credentials doc at https://cloud.google.com/docs/authentication/getting-started # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
@ -358,11 +361,13 @@ scripts = """
ec.rebuild -force ec.rebuild -force
ec.balance -force ec.balance -force
volume.balance -force volume.balance -force
volume.fix.replication
""" """
sleep_minutes = 17 # sleep minutes between each script execution sleep_minutes = 17 # sleep minutes between each script execution
[master.filer] [master.filer]
default_filer_url = "http://localhost:8888/"
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
[master.sequencer] [master.sequencer]
type = "memory" # Choose [memory|etcd] type for storing the file id sequence type = "memory" # Choose [memory|etcd] type for storing the file id sequence

15
weed/command/shell.go

@ -9,14 +9,14 @@ import (
) )
var ( var (
shellOptions shell.ShellOptions
shellInitialFilerUrl *string
shellOptions shell.ShellOptions
shellInitialFiler *string
) )
func init() { func init() {
cmdShell.Run = runShell // break init cycle cmdShell.Run = runShell // break init cycle
shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers") shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers")
shellInitialFilerUrl = cmdShell.Flag.String("filer.url", "http://localhost:8888/", "initial filer url")
shellInitialFiler = cmdShell.Flag.String("filer", "localhost:8888", "filer host and port")
} }
var cmdShell = &Command{ var cmdShell = &Command{
@ -32,12 +32,13 @@ func runShell(command *Command, args []string) bool {
util.LoadConfiguration("security", false) util.LoadConfiguration("security", false)
shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
var filerPwdErr error
shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = util.ParseFilerUrl(*shellInitialFilerUrl)
if filerPwdErr != nil {
fmt.Printf("failed to parse url filer.url=%s : %v\n", *shellInitialFilerUrl, filerPwdErr)
var err error
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler)
if err != nil {
fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err)
return false return false
} }
shellOptions.Directory = "/"
shell.RunShell(shellOptions) shell.RunShell(shellOptions)

3
weed/command/volume.go

@ -127,7 +127,8 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
} }
if *v.ip == "" { if *v.ip == "" {
*v.ip = "127.0.0.1"
*v.ip = util.DetectedHostAddress()
glog.V(0).Infof("detected volume server ip address: %v", *v.ip)
} }
if *v.publicPort == 0 { if *v.publicPort == 0 {

2
weed/command/watch.go

@ -34,7 +34,7 @@ func runWatch(cmd *Command, args []string) bool {
watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
stream, err := client.ListenForEvents(context.Background(), &filer_pb.ListenForEventsRequest{
stream, err := client.SubscribeMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{
ClientName: "watch", ClientName: "watch",
PathPrefix: *watchTarget, PathPrefix: *watchTarget,
SinceNs: 0, SinceNs: 0,

7
weed/command/webdav.go

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"os"
"os/user" "os/user"
"strconv" "strconv"
"time" "time"
@ -26,6 +27,8 @@ type WebDavOption struct {
collection *string collection *string
tlsPrivateKey *string tlsPrivateKey *string
tlsCertificate *string tlsCertificate *string
cacheDir *string
cacheSizeMB *int64
} }
func init() { func init() {
@ -35,6 +38,8 @@ func init() {
webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files") webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files")
webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file") webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file")
webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file") webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file")
webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks")
webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB")
} }
var cmdWebDav = &Command{ var cmdWebDav = &Command{
@ -105,6 +110,8 @@ func (wo *WebDavOption) startWebDav() bool {
Uid: uid, Uid: uid,
Gid: gid, Gid: gid,
Cipher: cipher, Cipher: cipher,
CacheDir: *wo.cacheDir,
CacheSizeMB: *wo.cacheSizeMB,
}) })
if webdavServer_err != nil { if webdavServer_err != nil {
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err) glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)

1
weed/filer2/entry.go

@ -21,6 +21,7 @@ type Attr struct {
UserName string UserName string
GroupNames []string GroupNames []string
SymlinkTarget string SymlinkTarget string
Md5 []byte
} }
func (attr Attr) IsDirectory() bool { func (attr Attr) IsDirectory() bool {

6
weed/filer2/entry_codec.go

@ -52,6 +52,7 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes {
UserName: entry.Attr.UserName, UserName: entry.Attr.UserName,
GroupName: entry.Attr.GroupNames, GroupName: entry.Attr.GroupNames,
SymlinkTarget: entry.Attr.SymlinkTarget, SymlinkTarget: entry.Attr.SymlinkTarget,
Md5: entry.Attr.Md5,
} }
} }
@ -71,6 +72,7 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr {
t.UserName = attr.UserName t.UserName = attr.UserName
t.GroupNames = attr.GroupName t.GroupNames = attr.GroupName
t.SymlinkTarget = attr.SymlinkTarget t.SymlinkTarget = attr.SymlinkTarget
t.Md5 = attr.Md5
return t return t
} }
@ -93,6 +95,10 @@ func EqualEntry(a, b *Entry) bool {
return false return false
} }
if !bytes.Equal(a.Md5, b.Md5) {
return false
}
for i := 0; i < len(a.Chunks); i++ { for i := 0; i < len(a.Chunks); i++ {
if !proto.Equal(a.Chunks[i], b.Chunks[i]) { if !proto.Equal(a.Chunks[i], b.Chunks[i]) {
return false return false

16
weed/filer2/filechunks.go

@ -20,7 +20,21 @@ func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
return return
} }
func ETag(chunks []*filer_pb.FileChunk) (etag string) {
func ETag(entry *filer_pb.Entry) (etag string) {
if entry.Attributes == nil || entry.Attributes.Md5 == nil {
return ETagChunks(entry.Chunks)
}
return fmt.Sprintf("%x", entry.Attributes.Md5)
}
func ETagEntry(entry *Entry) (etag string) {
if entry.Attr.Md5 == nil {
return ETagChunks(entry.Chunks)
}
return fmt.Sprintf("%x", entry.Attr.Md5)
}
func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
if len(chunks) == 1 { if len(chunks) == 1 {
return chunks[0].ETag return chunks[0].ETag
} }

14
weed/filer2/filer.go

@ -13,8 +13,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/queue"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
"github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/chrislusf/seaweedfs/weed/wdclient"
) )
@ -32,20 +32,24 @@ type Filer struct {
fileIdDeletionQueue *util.UnboundedQueue fileIdDeletionQueue *util.UnboundedQueue
GrpcDialOption grpc.DialOption GrpcDialOption grpc.DialOption
DirBucketsPath string DirBucketsPath string
DirQueuesPath string
FsyncBuckets []string
buckets *FilerBuckets buckets *FilerBuckets
Cipher bool Cipher bool
metaLogBuffer *queue.LogBuffer
metaLogBuffer *log_buffer.LogBuffer
metaLogCollection string
metaLogReplication string
} }
func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerGrpcPort uint32, notifyFn func()) *Filer {
func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerGrpcPort uint32, collection string, replication string, notifyFn func()) *Filer {
f := &Filer{ f := &Filer{
directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerGrpcPort, masters), MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerGrpcPort, masters),
fileIdDeletionQueue: util.NewUnboundedQueue(), fileIdDeletionQueue: util.NewUnboundedQueue(),
GrpcDialOption: grpcDialOption, GrpcDialOption: grpcDialOption,
} }
f.metaLogBuffer = queue.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn)
f.metaLogBuffer = log_buffer.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn)
f.metaLogCollection = collection
f.metaLogReplication = replication
go f.loopProcessingDeletion() go f.loopProcessingDeletion()

19
weed/filer2/filer_buckets.go

@ -13,6 +13,7 @@ type BucketName string
type BucketOption struct { type BucketOption struct {
Name BucketName Name BucketName
Replication string Replication string
fsync bool
} }
type FilerBuckets struct { type FilerBuckets struct {
dirBucketsPath string dirBucketsPath string
@ -20,36 +21,42 @@ type FilerBuckets struct {
sync.RWMutex sync.RWMutex
} }
func (f *Filer) LoadBuckets(dirBucketsPath string) {
func (f *Filer) LoadBuckets() {
f.buckets = &FilerBuckets{ f.buckets = &FilerBuckets{
buckets: make(map[BucketName]*BucketOption), buckets: make(map[BucketName]*BucketOption),
} }
f.DirBucketsPath = dirBucketsPath
limit := math.MaxInt32 limit := math.MaxInt32
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(dirBucketsPath), "", false, limit)
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit)
if err != nil { if err != nil {
glog.V(1).Infof("no buckets found: %v", err) glog.V(1).Infof("no buckets found: %v", err)
return return
} }
shouldFsyncMap := make(map[string]bool)
for _, bucket := range f.FsyncBuckets {
shouldFsyncMap[bucket] = true
}
glog.V(1).Infof("buckets found: %d", len(entries)) glog.V(1).Infof("buckets found: %d", len(entries))
f.buckets.Lock() f.buckets.Lock()
for _, entry := range entries { for _, entry := range entries {
_, shouldFsnyc := shouldFsyncMap[entry.Name()]
f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{ f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{
Name: BucketName(entry.Name()), Name: BucketName(entry.Name()),
Replication: entry.Replication, Replication: entry.Replication,
fsync: shouldFsnyc,
} }
} }
f.buckets.Unlock() f.buckets.Unlock()
} }
func (f *Filer) ReadBucketOption(buketName string) (replication string) {
func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) {
f.buckets.RLock() f.buckets.RLock()
defer f.buckets.RUnlock() defer f.buckets.RUnlock()
@ -57,9 +64,9 @@ func (f *Filer) ReadBucketOption(buketName string) (replication string) {
option, found := f.buckets.buckets[BucketName(buketName)] option, found := f.buckets.buckets[BucketName(buketName)]
if !found { if !found {
return ""
return "", false
} }
return option.Replication
return option.Replication, option.fsync
} }

24
weed/filer2/filer_notify.go

@ -25,7 +25,7 @@ func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool)
// println("fullpath:", fullpath) // println("fullpath:", fullpath)
if strings.HasPrefix(fullpath, "/.meta") {
if strings.HasPrefix(fullpath, SystemLogDir) {
return return
} }
@ -45,32 +45,34 @@ func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool)
notification.Queue.SendMessage(fullpath, eventNotification) notification.Queue.SendMessage(fullpath, eventNotification)
} }
f.logMetaEvent(time.Now(), fullpath, eventNotification)
f.logMetaEvent(fullpath, eventNotification)
} }
func (f *Filer) logMetaEvent(ts time.Time, fullpath string, eventNotification *filer_pb.EventNotification) {
func (f *Filer) logMetaEvent(fullpath string, eventNotification *filer_pb.EventNotification) {
dir, _ := util.FullPath(fullpath).DirAndName() dir, _ := util.FullPath(fullpath).DirAndName()
event := &filer_pb.FullEventNotification{
event := &filer_pb.SubscribeMetadataResponse{
Directory: dir, Directory: dir,
EventNotification: eventNotification, EventNotification: eventNotification,
} }
data, err := proto.Marshal(event) data, err := proto.Marshal(event)
if err != nil { if err != nil {
glog.Errorf("failed to marshal filer_pb.FullEventNotification %+v: %v", event, err)
glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
return return
} }
f.metaLogBuffer.AddToBuffer(ts, []byte(dir), data)
f.metaLogBuffer.AddToBuffer([]byte(dir), data)
} }
func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) { func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
targetFile := fmt.Sprintf("/.meta/log/%04d/%02d/%02d/%02d/%02d/%02d.%09d.log",
targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir,
startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
startTime.Second(), startTime.Nanosecond())
// startTime.Second(), startTime.Nanosecond(),
)
if err := f.appendToFile(targetFile, buf); err != nil { if err := f.appendToFile(targetFile, buf); err != nil {
glog.V(0).Infof("log write failed %s: %v", targetFile, err) glog.V(0).Infof("log write failed %s: %v", targetFile, err)
@ -95,11 +97,11 @@ func (f *Filer) ReadLogBuffer(lastReadTime time.Time, eachEventFn func(fullpath
return lastReadTime, fmt.Errorf("unexpected unmarshal filer_pb.LogEntry: %v", err) return lastReadTime, fmt.Errorf("unexpected unmarshal filer_pb.LogEntry: %v", err)
} }
event := &filer_pb.FullEventNotification{}
event := &filer_pb.SubscribeMetadataResponse{}
err = proto.Unmarshal(logEntry.Data, event) err = proto.Unmarshal(logEntry.Data, event)
if err != nil { if err != nil {
glog.Errorf("unexpected unmarshal filer_pb.FullEventNotification: %v", err)
return lastReadTime, fmt.Errorf("unexpected unmarshal filer_pb.FullEventNotification: %v", err)
glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
return lastReadTime, fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err)
} }
err = eachEventFn(event.Directory, event.EventNotification) err = eachEventFn(event.Directory, event.EventNotification)

47
weed/filer2/filer_notify_append.go

@ -13,25 +13,10 @@ import (
func (f *Filer) appendToFile(targetFile string, data []byte) error { func (f *Filer) appendToFile(targetFile string, data []byte) error {
// assign a volume location
assignRequest := &operation.VolumeAssignRequest{
Count: 1,
}
assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
if err != nil {
return fmt.Errorf("AssignVolume: %v", err)
}
if assignResult.Error != "" {
return fmt.Errorf("AssignVolume error: %v", assignResult.Error)
}
// upload data
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
uploadResult, err := operation.UploadData(targetUrl, "", false, data, false, "", nil, assignResult.Auth)
if err != nil {
return fmt.Errorf("upload data %s: %v", targetUrl, err)
assignResult, err, uploadResult, err2 := f.assignAndUpload(data)
if err2 != nil {
return err2
} }
// println("uploaded to", targetUrl)
// find out existing entry // find out existing entry
fullpath := util.FullPath(targetFile) fullpath := util.FullPath(targetFile)
@ -68,3 +53,29 @@ func (f *Filer) appendToFile(targetFile string, data []byte) error {
return err return err
} }
func (f *Filer) assignAndUpload(data []byte) (*operation.AssignResult, error, *operation.UploadResult, error) {
// assign a volume location
assignRequest := &operation.VolumeAssignRequest{
Count: 1,
Collection: f.metaLogCollection,
Replication: f.metaLogReplication,
WritableVolumeCount: 1,
}
assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest)
if err != nil {
return nil, nil, nil, fmt.Errorf("AssignVolume: %v", err)
}
if assignResult.Error != "" {
return nil, nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error)
}
// upload data
targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid
uploadResult, err := operation.UploadData(targetUrl, "", false, data, false, "", nil, assignResult.Auth)
if err != nil {
return nil, nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
}
// println("uploaded to", targetUrl)
return assignResult, err, uploadResult, nil
}

4
weed/filer2/leveldb/leveldb_store_test.go

@ -11,7 +11,7 @@ import (
) )
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler(nil, nil, 0, nil)
filer := filer2.NewFiler(nil, nil, 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}
@ -66,7 +66,7 @@ func TestCreateAndFind(t *testing.T) {
} }
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
filer := filer2.NewFiler(nil, nil, 0, nil)
filer := filer2.NewFiler(nil, nil, 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}

4
weed/filer2/leveldb2/leveldb2_store_test.go

@ -11,7 +11,7 @@ import (
) )
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler(nil, nil, 0, nil)
filer := filer2.NewFiler(nil, nil, 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDB2Store{} store := &LevelDB2Store{}
@ -66,7 +66,7 @@ func TestCreateAndFind(t *testing.T) {
} }
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
filer := filer2.NewFiler(nil, nil, 0, nil)
filer := filer2.NewFiler(nil, nil, 0, "", "", nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDB2Store{} store := &LevelDB2Store{}

12
weed/filer2/reader_at.go

@ -9,8 +9,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/pb_cache"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
"github.com/chrislusf/seaweedfs/weed/wdclient" "github.com/chrislusf/seaweedfs/weed/wdclient"
) )
@ -22,12 +22,12 @@ type ChunkReadAt struct {
lookupFileId func(fileId string) (targetUrl string, err error) lookupFileId func(fileId string) (targetUrl string, err error)
readerLock sync.Mutex readerLock sync.Mutex
chunkCache *pb_cache.ChunkCache
chunkCache *chunk_cache.ChunkCache
} }
// var _ = io.ReaderAt(&ChunkReadAt{}) // var _ = io.ReaderAt(&ChunkReadAt{})
func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *pb_cache.ChunkCache) *ChunkReadAt {
func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt {
return &ChunkReadAt{ return &ChunkReadAt{
chunkViews: chunkViews, chunkViews: chunkViews,
@ -105,9 +105,11 @@ func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err err
// fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) // fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
hasDataInCache := false
chunkData := c.chunkCache.GetChunk(chunkView.FileId) chunkData := c.chunkCache.GetChunk(chunkView.FileId)
if chunkData != nil { if chunkData != nil {
glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
hasDataInCache = true
} else { } else {
chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped) chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
if err != nil { if err != nil {
@ -121,7 +123,9 @@ func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err err
data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)] data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)]
c.chunkCache.SetChunk(chunkView.FileId, chunkData)
if !hasDataInCache {
c.chunkCache.SetChunk(chunkView.FileId, chunkData)
}
return data, nil return data, nil
} }

42
weed/filer2/redis2/redis_cluster_store.go

@ -0,0 +1,42 @@
package redis2
import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/go-redis/redis"
)
func init() {
filer2.Stores = append(filer2.Stores, &RedisCluster2Store{})
}
type RedisCluster2Store struct {
UniversalRedis2Store
}
func (store *RedisCluster2Store) GetName() string {
return "redis_cluster2"
}
func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
configuration.SetDefault(prefix+"useReadOnly", true)
configuration.SetDefault(prefix+"routeByLatency", true)
return store.initialize(
configuration.GetStringSlice(prefix+"addresses"),
configuration.GetString(prefix+"password"),
configuration.GetBool(prefix+"useReadOnly"),
configuration.GetBool(prefix+"routeByLatency"),
)
}
func (store *RedisCluster2Store) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) {
store.Client = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: addresses,
Password: password,
ReadOnly: readOnly,
RouteByLatency: routeByLatency,
})
return
}

36
weed/filer2/redis2/redis_store.go

@ -0,0 +1,36 @@
package redis2
import (
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/go-redis/redis"
)
func init() {
filer2.Stores = append(filer2.Stores, &Redis2Store{})
}
type Redis2Store struct {
UniversalRedis2Store
}
func (store *Redis2Store) GetName() string {
return "redis2"
}
func (store *Redis2Store) Initialize(configuration util.Configuration, prefix string) (err error) {
return store.initialize(
configuration.GetString(prefix+"address"),
configuration.GetString(prefix+"password"),
configuration.GetInt(prefix+"database"),
)
}
func (store *Redis2Store) initialize(hostPort string, password string, database int) (err error) {
store.Client = redis.NewClient(&redis.Options{
Addr: hostPort,
Password: password,
DB: database,
})
return
}

162
weed/filer2/redis2/universal_redis_store.go

@ -0,0 +1,162 @@
package redis2
import (
"context"
"fmt"
"time"
"github.com/go-redis/redis"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
const (
DIR_LIST_MARKER = "\x00"
)
type UniversalRedis2Store struct {
Client redis.UniversalClient
}
func (store *UniversalRedis2Store) BeginTransaction(ctx context.Context) (context.Context, error) {
return ctx, nil
}
func (store *UniversalRedis2Store) CommitTransaction(ctx context.Context) error {
return nil
}
func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) error {
return nil
}
func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) {
value, err := entry.EncodeAttributesAndChunks()
if err != nil {
return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
}
if err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil {
return fmt.Errorf("persisting %s : %v", entry.FullPath, err)
}
dir, name := entry.FullPath.DirAndName()
if name != "" {
if err = store.Client.ZAddNX(genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil {
return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err)
}
}
return nil
}
func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) {
return store.InsertEntry(ctx, entry)
}
func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) {
data, err := store.Client.Get(string(fullpath)).Result()
if err == redis.Nil {
return nil, filer_pb.ErrNotFound
}
if err != nil {
return nil, fmt.Errorf("get %s : %v", fullpath, err)
}
entry = &filer2.Entry{
FullPath: fullpath,
}
err = entry.DecodeAttributesAndChunks([]byte(data))
if err != nil {
return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err)
}
return entry, nil
}
func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
_, err = store.Client.Del(string(fullpath)).Result()
if err != nil {
return fmt.Errorf("delete %s : %v", fullpath, err)
}
dir, name := fullpath.DirAndName()
if name != "" {
_, err = store.Client.ZRem(genDirectoryListKey(dir), name).Result()
if err != nil {
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
}
}
return nil
}
func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
members, err := store.Client.ZRange(genDirectoryListKey(string(fullpath)), 0, -1).Result()
if err != nil {
return fmt.Errorf("delete folder %s : %v", fullpath, err)
}
for _, fileName := range members {
path := util.NewFullPath(string(fullpath), fileName)
_, err = store.Client.Del(string(path)).Result()
if err != nil {
return fmt.Errorf("delete %s in parent dir: %v", fullpath, err)
}
}
return nil
}
func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool,
limit int) (entries []*filer2.Entry, err error) {
dirListKey := genDirectoryListKey(string(fullpath))
start := int64(0)
if startFileName != "" {
start, _ = store.Client.ZRank(dirListKey, startFileName).Result()
if !inclusive {
start++
}
}
members, err := store.Client.ZRange(dirListKey, start, start+int64(limit)-1).Result()
if err != nil {
return nil, fmt.Errorf("list %s : %v", fullpath, err)
}
// fetch entry meta
for _, fileName := range members {
path := util.NewFullPath(string(fullpath), fileName)
entry, err := store.FindEntry(ctx, path)
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
} else {
if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
store.Client.Del(string(path)).Result()
store.Client.ZRem(dirListKey, fileName).Result()
continue
}
}
entries = append(entries, entry)
}
}
return entries, err
}
func genDirectoryListKey(dir string) (dirList string) {
return dir + DIR_LIST_MARKER
}
func (store *UniversalRedis2Store) Shutdown() {
store.Client.Close()
}

6
weed/filer2/topics.go

@ -0,0 +1,6 @@
package filer2
const (
TopicsDir = "/topics"
SystemLogDir = TopicsDir + "/.system/log"
)

4
weed/filesys/dir.go

@ -58,7 +58,7 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Gid = dir.entry.Attributes.Gid attr.Gid = dir.entry.Attributes.Gid
attr.Uid = dir.entry.Attributes.Uid attr.Uid = dir.entry.Attributes.Uid
glog.V(3).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil return nil
} }
@ -200,7 +200,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
glog.V(4).Infof("dir Lookup %s: %s", dir.FullPath(), req.Name)
glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
fullFilePath := util.NewFullPath(dir.FullPath(), req.Name) fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
entry := dir.wfs.cacheGet(fullFilePath) entry := dir.wfs.cacheGet(fullFilePath)

18
weed/filesys/dirty_page.go

@ -125,16 +125,18 @@ func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *fi
return nil, false, nil return nil, false, nil
} }
chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size())
if err == nil {
hasSavedData = true
glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId)
} else {
glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err)
return
for {
chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size())
if err == nil {
hasSavedData = true
glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId)
return
} else {
glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err)
time.Sleep(5 * time.Second)
}
} }
return
} }
func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) { func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) {

20
weed/filesys/filehandle.go

@ -33,12 +33,16 @@ type FileHandle struct {
} }
func newFileHandle(file *File, uid, gid uint32) *FileHandle { func newFileHandle(file *File, uid, gid uint32) *FileHandle {
return &FileHandle{
fh := &FileHandle{
f: file, f: file,
dirtyPages: newDirtyPages(file), dirtyPages: newDirtyPages(file),
Uid: uid, Uid: uid,
Gid: gid, Gid: gid,
} }
if fh.f.entry != nil {
fh.f.entry.Attributes.FileSize = filer2.TotalSize(fh.f.entry.Chunks)
}
return fh
} }
var _ = fs.Handle(&FileHandle{}) var _ = fs.Handle(&FileHandle{})
@ -110,21 +114,23 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
// write the request to volume servers // write the request to volume servers
data := make([]byte, len(req.Data))
copy(data, req.Data)
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize)))
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize)))
// glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data))) // glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)))
chunks, err := fh.dirtyPages.AddPage(req.Offset, req.Data)
chunks, err := fh.dirtyPages.AddPage(req.Offset, data)
if err != nil { if err != nil {
glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err)
glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(data)), err)
return fuse.EIO return fuse.EIO
} }
resp.Size = len(req.Data)
resp.Size = len(data)
if req.Offset == 0 { if req.Offset == 0 {
// detect mime type // detect mime type
detectedMIME := mimetype.Detect(req.Data)
detectedMIME := mimetype.Detect(data)
fh.contentType = detectedMIME.String() fh.contentType = detectedMIME.String()
if ext := path.Ext(fh.f.Name); ext != detectedMIME.Extension() { if ext := path.Ext(fh.f.Name); ext != detectedMIME.Extension() {
fh.contentType = mime.TypeByExtension(ext) fh.contentType = mime.TypeByExtension(ext)
@ -187,7 +193,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
fh.f.entry.Attributes.Gid = req.Gid fh.f.entry.Attributes.Gid = req.Gid
fh.f.entry.Attributes.Mtime = time.Now().Unix() fh.f.entry.Attributes.Mtime = time.Now().Unix()
fh.f.entry.Attributes.Crtime = time.Now().Unix() fh.f.entry.Attributes.Crtime = time.Now().Unix()
fh.f.entry.Attributes.FileMode = uint32(0777 &^ fh.f.wfs.option.Umask)
fh.f.entry.Attributes.FileMode = uint32(0666 &^ fh.f.wfs.option.Umask)
fh.f.entry.Attributes.Collection = fh.dirtyPages.collection fh.f.entry.Attributes.Collection = fh.dirtyPages.collection
fh.f.entry.Attributes.Replication = fh.dirtyPages.replication fh.f.entry.Attributes.Replication = fh.dirtyPages.replication
} }

32
weed/filesys/fscache.go

@ -9,6 +9,7 @@ import (
type FsCache struct { type FsCache struct {
root *FsNode root *FsNode
sync.RWMutex
} }
type FsNode struct { type FsNode struct {
parent *FsNode parent *FsNode
@ -27,6 +28,14 @@ func newFsCache(root fs.Node) *FsCache {
} }
func (c *FsCache) GetFsNode(path util.FullPath) fs.Node { func (c *FsCache) GetFsNode(path util.FullPath) fs.Node {
c.RLock()
defer c.RUnlock()
return c.doGetFsNode(path)
}
func (c *FsCache) doGetFsNode(path util.FullPath) fs.Node {
t := c.root t := c.root
for _, p := range path.Split() { for _, p := range path.Split() {
t = t.findChild(p) t = t.findChild(p)
@ -38,6 +47,14 @@ func (c *FsCache) GetFsNode(path util.FullPath) fs.Node {
} }
func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) { func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) {
c.Lock()
defer c.Unlock()
c.doSetFsNode(path, node)
}
func (c *FsCache) doSetFsNode(path util.FullPath, node fs.Node) {
t := c.root t := c.root
for _, p := range path.Split() { for _, p := range path.Split() {
t = t.ensureChild(p) t = t.ensureChild(p)
@ -46,16 +63,24 @@ func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) {
} }
func (c *FsCache) EnsureFsNode(path util.FullPath, genNodeFn func() fs.Node) fs.Node { func (c *FsCache) EnsureFsNode(path util.FullPath, genNodeFn func() fs.Node) fs.Node {
t := c.GetFsNode(path)
c.Lock()
defer c.Unlock()
t := c.doGetFsNode(path)
if t != nil { if t != nil {
return t return t
} }
t = genNodeFn() t = genNodeFn()
c.SetFsNode(path, t)
c.doSetFsNode(path, t)
return t return t
} }
func (c *FsCache) DeleteFsNode(path util.FullPath) { func (c *FsCache) DeleteFsNode(path util.FullPath) {
c.Lock()
defer c.Unlock()
t := c.root t := c.root
for _, p := range path.Split() { for _, p := range path.Split() {
t = t.findChild(p) t = t.findChild(p)
@ -72,6 +97,9 @@ func (c *FsCache) DeleteFsNode(path util.FullPath) {
// oldPath and newPath are full path including the new name // oldPath and newPath are full path including the new name
func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode { func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode {
c.Lock()
defer c.Unlock()
// find old node // find old node
src := c.root src := c.root
for _, p := range oldPath.Split() { for _, p := range oldPath.Split() {

72
weed/filesys/wfs.go

@ -15,25 +15,26 @@ import (
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/pb_cache"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
) )
type Option struct { type Option struct {
FilerGrpcAddress string
GrpcDialOption grpc.DialOption
FilerMountRootPath string
Collection string
Replication string
TtlSec int32
ChunkSizeLimit int64
ChunkCacheCountLimit int64
DataCenter string
DirListCacheLimit int64
EntryCacheTtl time.Duration
Umask os.FileMode
FilerGrpcAddress string
GrpcDialOption grpc.DialOption
FilerMountRootPath string
Collection string
Replication string
TtlSec int32
ChunkSizeLimit int64
CacheDir string
CacheSizeMB int64
DataCenter string
DirListCacheLimit int64
EntryCacheTtl time.Duration
Umask os.FileMode
MountUid uint32 MountUid uint32
MountGid uint32 MountGid uint32
@ -54,9 +55,8 @@ type WFS struct {
listDirectoryEntriesCache *ccache.Cache listDirectoryEntriesCache *ccache.Cache
// contains all open handles, protected by handlesLock // contains all open handles, protected by handlesLock
handlesLock sync.Mutex
handles []*FileHandle
pathToHandleIndex map[util.FullPath]int
handlesLock sync.Mutex
handles map[uint64]*FileHandle
bufPool sync.Pool bufPool sync.Pool
@ -65,7 +65,7 @@ type WFS struct {
root fs.Node root fs.Node
fsNodeCache *FsCache fsNodeCache *FsCache
chunkCache *pb_cache.ChunkCache
chunkCache *chunk_cache.ChunkCache
} }
type statsCache struct { type statsCache struct {
filer_pb.StatisticsResponse filer_pb.StatisticsResponse
@ -76,13 +76,18 @@ func NewSeaweedFileSystem(option *Option) *WFS {
wfs := &WFS{ wfs := &WFS{
option: option, option: option,
listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)), listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)),
pathToHandleIndex: make(map[util.FullPath]int),
handles: make(map[uint64]*FileHandle),
bufPool: sync.Pool{ bufPool: sync.Pool{
New: func() interface{} { New: func() interface{} {
return make([]byte, option.ChunkSizeLimit) return make([]byte, option.ChunkSizeLimit)
}, },
}, },
chunkCache: pb_cache.NewChunkCache(option.ChunkCacheCountLimit),
}
if option.CacheSizeMB > 0 {
wfs.chunkCache = chunk_cache.NewChunkCache(256, option.CacheDir, option.CacheSizeMB, 4)
util.OnInterrupt(func() {
wfs.chunkCache.Shutdown()
})
} }
wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs} wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs}
@ -117,26 +122,15 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand
wfs.handlesLock.Lock() wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock() defer wfs.handlesLock.Unlock()
index, found := wfs.pathToHandleIndex[fullpath]
if found && wfs.handles[index] != nil {
glog.V(2).Infoln(fullpath, "found fileHandle id", index)
return wfs.handles[index]
inodeId := file.fullpath().AsInode()
existingHandle, found := wfs.handles[inodeId]
if found && existingHandle != nil {
return existingHandle
} }
fileHandle = newFileHandle(file, uid, gid) fileHandle = newFileHandle(file, uid, gid)
for i, h := range wfs.handles {
if h == nil {
wfs.handles[i] = fileHandle
fileHandle.handle = uint64(i)
wfs.pathToHandleIndex[fullpath] = i
glog.V(4).Infof("%s reuse fh %d", fullpath, fileHandle.handle)
return
}
}
wfs.handles = append(wfs.handles, fileHandle)
fileHandle.handle = uint64(len(wfs.handles) - 1)
wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle)
wfs.handles[inodeId] = fileHandle
fileHandle.handle = inodeId
glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle) glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle)
return return
@ -147,10 +141,8 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
defer wfs.handlesLock.Unlock() defer wfs.handlesLock.Unlock()
glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
delete(wfs.pathToHandleIndex, fullpath)
if int(handleId) < len(wfs.handles) {
wfs.handles[int(handleId)] = nil
}
delete(wfs.handles, fullpath.AsInode())
return return
} }

2
weed/images/orientation.go

@ -7,7 +7,7 @@ import (
"image/jpeg" "image/jpeg"
"log" "log"
"github.com/rwcarlsen/goexif/exif"
"github.com/seaweedfs/goexif/exif"
) )
//many code is copied from http://camlistore.org/pkg/images/images.go //many code is copied from http://camlistore.org/pkg/images/images.go

4
weed/operation/upload_content.go

@ -45,11 +45,9 @@ var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
// Upload sends a POST request to a volume server to upload the content with adjustable compression level // Upload sends a POST request to a volume server to upload the content with adjustable compression level
func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) {
hash := md5.New()
hash.Write(data)
uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputGzipped, mtype, pairMap, jwt) uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputGzipped, mtype, pairMap, jwt)
if uploadResult != nil { if uploadResult != nil {
uploadResult.Md5 = fmt.Sprintf("%x", hash.Sum(nil))
uploadResult.Md5 = util.Md5(data)
} }
return return
} }

8
weed/pb/filer.proto

@ -42,7 +42,7 @@ service SeaweedFiler {
rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) {
} }
rpc ListenForEvents (ListenForEventsRequest) returns (stream FullEventNotification) {
rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) {
} }
} }
@ -123,6 +123,7 @@ message FuseAttributes {
string user_name = 11; // for hdfs string user_name = 11; // for hdfs
repeated string group_name = 12; // for hdfs repeated string group_name = 12; // for hdfs
string symlink_target = 13; string symlink_target = 13;
bytes md5 = 14;
} }
message CreateEntryRequest { message CreateEntryRequest {
@ -230,16 +231,15 @@ message GetFilerConfigurationResponse {
string collection = 3; string collection = 3;
uint32 max_mb = 4; uint32 max_mb = 4;
string dir_buckets = 5; string dir_buckets = 5;
string dir_queues = 6;
bool cipher = 7; bool cipher = 7;
} }
message ListenForEventsRequest {
message SubscribeMetadataRequest {
string client_name = 1; string client_name = 1;
string path_prefix = 2; string path_prefix = 2;
int64 since_ns = 3; int64 since_ns = 3;
} }
message FullEventNotification {
message SubscribeMetadataResponse {
string directory = 1; string directory = 1;
EventNotification event_notification = 2; EventNotification event_notification = 2;
} }

331
weed/pb/filer_pb/filer.pb.go

@ -39,8 +39,8 @@ It has these top-level messages:
StatisticsResponse StatisticsResponse
GetFilerConfigurationRequest GetFilerConfigurationRequest
GetFilerConfigurationResponse GetFilerConfigurationResponse
ListenForEventsRequest
FullEventNotification
SubscribeMetadataRequest
SubscribeMetadataResponse
LogEntry LogEntry
*/ */
package filer_pb package filer_pb
@ -415,6 +415,7 @@ type FuseAttributes struct {
UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName" json:"user_name,omitempty"` UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName" json:"user_name,omitempty"`
GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName" json:"group_name,omitempty"` GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName" json:"group_name,omitempty"`
SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget" json:"symlink_target,omitempty"` SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget" json:"symlink_target,omitempty"`
Md5 []byte `protobuf:"bytes,14,opt,name=md5,proto3" json:"md5,omitempty"`
} }
func (m *FuseAttributes) Reset() { *m = FuseAttributes{} } func (m *FuseAttributes) Reset() { *m = FuseAttributes{} }
@ -513,6 +514,13 @@ func (m *FuseAttributes) GetSymlinkTarget() string {
return "" return ""
} }
func (m *FuseAttributes) GetMd5() []byte {
if m != nil {
return m.Md5
}
return nil
}
type CreateEntryRequest struct { type CreateEntryRequest struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"`
@ -1032,7 +1040,6 @@ type GetFilerConfigurationResponse struct {
Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"`
MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"` MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"`
DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets" json:"dir_buckets,omitempty"` DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets" json:"dir_buckets,omitempty"`
DirQueues string `protobuf:"bytes,6,opt,name=dir_queues,json=dirQueues" json:"dir_queues,omitempty"`
Cipher bool `protobuf:"varint,7,opt,name=cipher" json:"cipher,omitempty"` Cipher bool `protobuf:"varint,7,opt,name=cipher" json:"cipher,omitempty"`
} }
@ -1076,13 +1083,6 @@ func (m *GetFilerConfigurationResponse) GetDirBuckets() string {
return "" return ""
} }
func (m *GetFilerConfigurationResponse) GetDirQueues() string {
if m != nil {
return m.DirQueues
}
return ""
}
func (m *GetFilerConfigurationResponse) GetCipher() bool { func (m *GetFilerConfigurationResponse) GetCipher() bool {
if m != nil { if m != nil {
return m.Cipher return m.Cipher
@ -1090,56 +1090,56 @@ func (m *GetFilerConfigurationResponse) GetCipher() bool {
return false return false
} }
type ListenForEventsRequest struct {
type SubscribeMetadataRequest struct {
ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName" json:"client_name,omitempty"` ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName" json:"client_name,omitempty"`
PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix" json:"path_prefix,omitempty"` PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix" json:"path_prefix,omitempty"`
SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"` SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"`
} }
func (m *ListenForEventsRequest) Reset() { *m = ListenForEventsRequest{} }
func (m *ListenForEventsRequest) String() string { return proto.CompactTextString(m) }
func (*ListenForEventsRequest) ProtoMessage() {}
func (*ListenForEventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
func (m *SubscribeMetadataRequest) Reset() { *m = SubscribeMetadataRequest{} }
func (m *SubscribeMetadataRequest) String() string { return proto.CompactTextString(m) }
func (*SubscribeMetadataRequest) ProtoMessage() {}
func (*SubscribeMetadataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
func (m *ListenForEventsRequest) GetClientName() string {
func (m *SubscribeMetadataRequest) GetClientName() string {
if m != nil { if m != nil {
return m.ClientName return m.ClientName
} }
return "" return ""
} }
func (m *ListenForEventsRequest) GetPathPrefix() string {
func (m *SubscribeMetadataRequest) GetPathPrefix() string {
if m != nil { if m != nil {
return m.PathPrefix return m.PathPrefix
} }
return "" return ""
} }
func (m *ListenForEventsRequest) GetSinceNs() int64 {
func (m *SubscribeMetadataRequest) GetSinceNs() int64 {
if m != nil { if m != nil {
return m.SinceNs return m.SinceNs
} }
return 0 return 0
} }
type FullEventNotification struct {
type SubscribeMetadataResponse struct {
Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"`
EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification" json:"event_notification,omitempty"` EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification" json:"event_notification,omitempty"`
} }
func (m *FullEventNotification) Reset() { *m = FullEventNotification{} }
func (m *FullEventNotification) String() string { return proto.CompactTextString(m) }
func (*FullEventNotification) ProtoMessage() {}
func (*FullEventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
func (m *SubscribeMetadataResponse) Reset() { *m = SubscribeMetadataResponse{} }
func (m *SubscribeMetadataResponse) String() string { return proto.CompactTextString(m) }
func (*SubscribeMetadataResponse) ProtoMessage() {}
func (*SubscribeMetadataResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
func (m *FullEventNotification) GetDirectory() string {
func (m *SubscribeMetadataResponse) GetDirectory() string {
if m != nil { if m != nil {
return m.Directory return m.Directory
} }
return "" return ""
} }
func (m *FullEventNotification) GetEventNotification() *EventNotification {
func (m *SubscribeMetadataResponse) GetEventNotification() *EventNotification {
if m != nil { if m != nil {
return m.EventNotification return m.EventNotification
} }
@ -1209,8 +1209,8 @@ func init() {
proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse") proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse")
proto.RegisterType((*GetFilerConfigurationRequest)(nil), "filer_pb.GetFilerConfigurationRequest") proto.RegisterType((*GetFilerConfigurationRequest)(nil), "filer_pb.GetFilerConfigurationRequest")
proto.RegisterType((*GetFilerConfigurationResponse)(nil), "filer_pb.GetFilerConfigurationResponse") proto.RegisterType((*GetFilerConfigurationResponse)(nil), "filer_pb.GetFilerConfigurationResponse")
proto.RegisterType((*ListenForEventsRequest)(nil), "filer_pb.ListenForEventsRequest")
proto.RegisterType((*FullEventNotification)(nil), "filer_pb.FullEventNotification")
proto.RegisterType((*SubscribeMetadataRequest)(nil), "filer_pb.SubscribeMetadataRequest")
proto.RegisterType((*SubscribeMetadataResponse)(nil), "filer_pb.SubscribeMetadataResponse")
proto.RegisterType((*LogEntry)(nil), "filer_pb.LogEntry") proto.RegisterType((*LogEntry)(nil), "filer_pb.LogEntry")
} }
@ -1236,7 +1236,7 @@ type SeaweedFilerClient interface {
DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error)
Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error)
GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error)
ListenForEvents(ctx context.Context, in *ListenForEventsRequest, opts ...grpc.CallOption) (SeaweedFiler_ListenForEventsClient, error)
SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error)
} }
type seaweedFilerClient struct { type seaweedFilerClient struct {
@ -1369,12 +1369,12 @@ func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetF
return out, nil return out, nil
} }
func (c *seaweedFilerClient) ListenForEvents(ctx context.Context, in *ListenForEventsRequest, opts ...grpc.CallOption) (SeaweedFiler_ListenForEventsClient, error) {
stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], c.cc, "/filer_pb.SeaweedFiler/ListenForEvents", opts...)
func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) {
stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], c.cc, "/filer_pb.SeaweedFiler/SubscribeMetadata", opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
x := &seaweedFilerListenForEventsClient{stream}
x := &seaweedFilerSubscribeMetadataClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil { if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err return nil, err
} }
@ -1384,17 +1384,17 @@ func (c *seaweedFilerClient) ListenForEvents(ctx context.Context, in *ListenForE
return x, nil return x, nil
} }
type SeaweedFiler_ListenForEventsClient interface {
Recv() (*FullEventNotification, error)
type SeaweedFiler_SubscribeMetadataClient interface {
Recv() (*SubscribeMetadataResponse, error)
grpc.ClientStream grpc.ClientStream
} }
type seaweedFilerListenForEventsClient struct {
type seaweedFilerSubscribeMetadataClient struct {
grpc.ClientStream grpc.ClientStream
} }
func (x *seaweedFilerListenForEventsClient) Recv() (*FullEventNotification, error) {
m := new(FullEventNotification)
func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse, error) {
m := new(SubscribeMetadataResponse)
if err := x.ClientStream.RecvMsg(m); err != nil { if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err return nil, err
} }
@ -1415,7 +1415,7 @@ type SeaweedFilerServer interface {
DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error)
Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error)
GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error)
ListenForEvents(*ListenForEventsRequest, SeaweedFiler_ListenForEventsServer) error
SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error
} }
func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) { func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) {
@ -1623,24 +1623,24 @@ func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Co
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _SeaweedFiler_ListenForEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(ListenForEventsRequest)
func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(SubscribeMetadataRequest)
if err := stream.RecvMsg(m); err != nil { if err := stream.RecvMsg(m); err != nil {
return err return err
} }
return srv.(SeaweedFilerServer).ListenForEvents(m, &seaweedFilerListenForEventsServer{stream})
return srv.(SeaweedFilerServer).SubscribeMetadata(m, &seaweedFilerSubscribeMetadataServer{stream})
} }
type SeaweedFiler_ListenForEventsServer interface {
Send(*FullEventNotification) error
type SeaweedFiler_SubscribeMetadataServer interface {
Send(*SubscribeMetadataResponse) error
grpc.ServerStream grpc.ServerStream
} }
type seaweedFilerListenForEventsServer struct {
type seaweedFilerSubscribeMetadataServer struct {
grpc.ServerStream grpc.ServerStream
} }
func (x *seaweedFilerListenForEventsServer) Send(m *FullEventNotification) error {
func (x *seaweedFilerSubscribeMetadataServer) Send(m *SubscribeMetadataResponse) error {
return x.ServerStream.SendMsg(m) return x.ServerStream.SendMsg(m)
} }
@ -1696,8 +1696,8 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
ServerStreams: true, ServerStreams: true,
}, },
{ {
StreamName: "ListenForEvents",
Handler: _SeaweedFiler_ListenForEvents_Handler,
StreamName: "SubscribeMetadata",
Handler: _SeaweedFiler_SubscribeMetadata_Handler,
ServerStreams: true, ServerStreams: true,
}, },
}, },
@ -1707,125 +1707,124 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{
func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } func init() { proto.RegisterFile("filer.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{ var fileDescriptor0 = []byte{
// 1909 bytes of a gzipped FileDescriptorProto
// 1903 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x5f, 0x6f, 0xdc, 0xc6, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x5f, 0x6f, 0xdc, 0xc6,
0x11, 0x37, 0xef, 0x74, 0x7f, 0x38, 0x77, 0x67, 0x4b, 0x7b, 0xb2, 0x73, 0x3e, 0x4b, 0xb6, 0x42,
0xd7, 0xa9, 0x0b, 0x1b, 0xaa, 0xa1, 0xe6, 0x21, 0x69, 0xda, 0x07, 0x5b, 0x96, 0x52, 0x37, 0xb6,
0xe2, 0x52, 0x76, 0x91, 0xa2, 0x40, 0x09, 0x8a, 0x5c, 0xdd, 0x6d, 0xc5, 0x23, 0x99, 0xdd, 0xa5,
0xfe, 0xe4, 0xad, 0x5f, 0xa3, 0x40, 0x1f, 0xfa, 0x1d, 0xfa, 0x58, 0xf4, 0xa5, 0x28, 0xd0, 0xcf,
0xd1, 0xc7, 0x3e, 0xf4, 0x33, 0x14, 0x3b, 0x4b, 0xf2, 0x96, 0xc7, 0x93, 0x94, 0x20, 0xc8, 0x1b,
0x77, 0x66, 0x76, 0x76, 0x76, 0xfe, 0xfc, 0x66, 0x96, 0xd0, 0x3b, 0x66, 0x11, 0xe5, 0xdb, 0x29,
0x4f, 0x64, 0x42, 0xba, 0xb8, 0xf0, 0xd2, 0x23, 0xe7, 0x4b, 0xb8, 0xf7, 0x3a, 0x49, 0x4e, 0xb2,
0xf4, 0x25, 0xe3, 0x34, 0x90, 0x09, 0xbf, 0xd8, 0x8b, 0x25, 0xbf, 0x70, 0xe9, 0xd7, 0x19, 0x15,
0x92, 0x6c, 0x80, 0x1d, 0x16, 0x8c, 0x91, 0xb5, 0x65, 0x3d, 0xb6, 0xdd, 0x39, 0x81, 0x10, 0x58,
0x89, 0xfd, 0x19, 0x1d, 0x35, 0x90, 0x81, 0xdf, 0xce, 0x1e, 0x6c, 0x2c, 0x57, 0x28, 0xd2, 0x24,
0x16, 0x94, 0x3c, 0x82, 0x16, 0x55, 0x04, 0xd4, 0xd6, 0xdb, 0xb9, 0xb5, 0x5d, 0x98, 0xb2, 0xad,
0xe5, 0x34, 0xd7, 0xf9, 0x87, 0x05, 0xe4, 0x35, 0x13, 0x52, 0x11, 0x19, 0x15, 0xdf, 0xce, 0x9e,
0x3b, 0xd0, 0x4e, 0x39, 0x3d, 0x66, 0xe7, 0xb9, 0x45, 0xf9, 0x8a, 0x3c, 0x85, 0x35, 0x21, 0x7d,
0x2e, 0xf7, 0x79, 0x32, 0xdb, 0x67, 0x11, 0x3d, 0x50, 0x46, 0x37, 0x51, 0xa4, 0xce, 0x20, 0xdb,
0x40, 0x58, 0x1c, 0x44, 0x99, 0x60, 0xa7, 0xf4, 0xb0, 0xe0, 0x8e, 0x56, 0xb6, 0xac, 0xc7, 0x5d,
0x77, 0x09, 0x87, 0xac, 0x43, 0x2b, 0x62, 0x33, 0x26, 0x47, 0xad, 0x2d, 0xeb, 0xf1, 0xc0, 0xd5,
0x0b, 0xe7, 0x17, 0x30, 0xac, 0xd8, 0xff, 0xdd, 0xae, 0xff, 0x97, 0x06, 0xb4, 0x90, 0x50, 0xfa,
0xd8, 0x9a, 0xfb, 0x98, 0x7c, 0x08, 0x7d, 0x26, 0xbc, 0xb9, 0x23, 0x1a, 0x68, 0x5b, 0x8f, 0x89,
0xd2, 0xe7, 0xe4, 0x09, 0xb4, 0x83, 0x69, 0x16, 0x9f, 0x88, 0x51, 0x73, 0xab, 0xf9, 0xb8, 0xb7,
0x33, 0x9c, 0x1f, 0xa4, 0x2e, 0xba, 0xab, 0x78, 0x6e, 0x2e, 0x42, 0x3e, 0x01, 0xf0, 0xa5, 0xe4,
0xec, 0x28, 0x93, 0x54, 0xe0, 0x4d, 0x7b, 0x3b, 0x23, 0x63, 0x43, 0x26, 0xe8, 0xf3, 0x92, 0xef,
0x1a, 0xb2, 0xe4, 0x53, 0xe8, 0xd2, 0x73, 0x49, 0xe3, 0x90, 0x86, 0xa3, 0x16, 0x1e, 0xb4, 0xb9,
0x70, 0xa3, 0xed, 0xbd, 0x9c, 0xaf, 0xef, 0x57, 0x8a, 0x8f, 0x3f, 0x83, 0x41, 0x85, 0x45, 0x56,
0xa1, 0x79, 0x42, 0x8b, 0xa8, 0xaa, 0x4f, 0xe5, 0xd9, 0x53, 0x3f, 0xca, 0x74, 0x82, 0xf5, 0x5d,
0xbd, 0xf8, 0x79, 0xe3, 0x13, 0xcb, 0x79, 0x09, 0xf6, 0x7e, 0x16, 0x45, 0xe5, 0xc6, 0x90, 0xf1,
0x62, 0x63, 0xc8, 0xf8, 0xdc, 0xcb, 0x8d, 0x2b, 0xbd, 0xfc, 0x77, 0x0b, 0xd6, 0xf6, 0x4e, 0x69,
0x2c, 0x0f, 0x12, 0xc9, 0x8e, 0x59, 0xe0, 0x4b, 0x96, 0xc4, 0xe4, 0x29, 0xd8, 0x49, 0x14, 0x7a,
0x57, 0x86, 0xa9, 0x9b, 0x44, 0xb9, 0xd5, 0x4f, 0xc1, 0x8e, 0xe9, 0x99, 0x77, 0xe5, 0x71, 0xdd,
0x98, 0x9e, 0x69, 0xe9, 0x87, 0x30, 0x08, 0x69, 0x44, 0x25, 0xf5, 0xca, 0xe8, 0xa8, 0xd0, 0xf5,
0x35, 0x71, 0x57, 0x87, 0xe3, 0x23, 0xb8, 0xa5, 0x54, 0xa6, 0x3e, 0xa7, 0xb1, 0xf4, 0x52, 0x5f,
0x4e, 0x31, 0x26, 0xb6, 0x3b, 0x88, 0xe9, 0xd9, 0x5b, 0xa4, 0xbe, 0xf5, 0xe5, 0xd4, 0xf9, 0x5b,
0x03, 0xec, 0x32, 0x98, 0xe4, 0x03, 0xe8, 0xa8, 0x63, 0x3d, 0x16, 0xe6, 0x9e, 0x68, 0xab, 0xe5,
0xab, 0x50, 0x55, 0x45, 0x72, 0x7c, 0x2c, 0xa8, 0x44, 0xf3, 0x9a, 0x6e, 0xbe, 0x52, 0x99, 0x25,
0xd8, 0x37, 0xba, 0x10, 0x56, 0x5c, 0xfc, 0x56, 0x1e, 0x9f, 0x49, 0x36, 0xa3, 0x78, 0x60, 0xd3,
0xd5, 0x0b, 0x32, 0x84, 0x16, 0xf5, 0xa4, 0x3f, 0xc1, 0x0c, 0xb7, 0xdd, 0x15, 0xfa, 0xce, 0x9f,
0x90, 0x1f, 0xc1, 0x4d, 0x91, 0x64, 0x3c, 0xa0, 0x5e, 0x71, 0x6c, 0x1b, 0xb9, 0x7d, 0x4d, 0xdd,
0xd7, 0x87, 0x3b, 0xd0, 0x3c, 0x66, 0xe1, 0xa8, 0x83, 0x8e, 0x59, 0xad, 0x26, 0xe1, 0xab, 0xd0,
0x55, 0x4c, 0xf2, 0x53, 0x80, 0x52, 0x53, 0x38, 0xea, 0x5e, 0x22, 0x6a, 0x17, 0x7a, 0x43, 0xb2,
0x09, 0x10, 0xb0, 0x74, 0x4a, 0xb9, 0xa7, 0x12, 0xc6, 0xc6, 0xe4, 0xb0, 0x35, 0xe5, 0x0b, 0x7a,
0xa1, 0xd8, 0x4c, 0x78, 0x93, 0x6f, 0x58, 0x9a, 0xd2, 0x70, 0x04, 0xe8, 0x61, 0x9b, 0x89, 0xcf,
0x35, 0xc1, 0xf9, 0x0a, 0xda, 0xb9, 0x71, 0xf7, 0xc0, 0x3e, 0x4d, 0xa2, 0x6c, 0x56, 0x3a, 0x6d,
0xe0, 0x76, 0x35, 0xe1, 0x55, 0x48, 0xee, 0x02, 0xa2, 0x24, 0x1e, 0xd1, 0x40, 0x17, 0xa1, 0x7f,
0xd5, 0x01, 0x77, 0xa0, 0x1d, 0x24, 0xc9, 0x09, 0xd3, 0xbe, 0xeb, 0xb8, 0xf9, 0xca, 0xf9, 0x5f,
0x03, 0x6e, 0x56, 0x8b, 0x45, 0x1d, 0x81, 0x5a, 0xd0, 0xd3, 0x16, 0xaa, 0x41, 0xb5, 0x87, 0x15,
0x6f, 0x37, 0x4c, 0x6f, 0x17, 0x5b, 0x66, 0x49, 0xa8, 0x0f, 0x18, 0xe8, 0x2d, 0x6f, 0x92, 0x90,
0xaa, 0x5c, 0xcf, 0x58, 0x88, 0xe1, 0x19, 0xb8, 0xea, 0x53, 0x51, 0x26, 0x2c, 0xcc, 0xc1, 0x47,
0x7d, 0xa2, 0x79, 0x1c, 0xf5, 0xb6, 0x75, 0xc0, 0xf5, 0x4a, 0x05, 0x7c, 0xa6, 0xa8, 0x1d, 0x1d,
0x45, 0xf5, 0x4d, 0xb6, 0xa0, 0xc7, 0x69, 0x1a, 0xe5, 0xb9, 0x8f, 0xce, 0xb7, 0x5d, 0x93, 0x44,
0xee, 0x03, 0x04, 0x49, 0x14, 0xd1, 0x00, 0x05, 0x6c, 0x14, 0x30, 0x28, 0x2a, 0xef, 0xa4, 0x8c,
0x3c, 0x41, 0x03, 0x74, 0x75, 0xcb, 0x6d, 0x4b, 0x19, 0x1d, 0xd2, 0x40, 0xdd, 0x23, 0x13, 0x94,
0x7b, 0x08, 0x5f, 0x3d, 0xdc, 0xd7, 0x55, 0x04, 0x04, 0xd9, 0x4d, 0x80, 0x09, 0x4f, 0xb2, 0x54,
0x73, 0xfb, 0x5b, 0x4d, 0x85, 0xe4, 0x48, 0x41, 0xf6, 0x23, 0xb8, 0x29, 0x2e, 0x66, 0x11, 0x8b,
0x4f, 0x3c, 0xe9, 0xf3, 0x09, 0x95, 0xa3, 0x81, 0xae, 0x80, 0x9c, 0xfa, 0x0e, 0x89, 0x4e, 0x0a,
0x64, 0x97, 0x53, 0x5f, 0xd2, 0xef, 0xd0, 0xb4, 0xbe, 0x1d, 0x36, 0x90, 0xdb, 0xd0, 0x4e, 0x3c,
0x7a, 0x1e, 0x44, 0x79, 0x89, 0xb6, 0x92, 0xbd, 0xf3, 0x20, 0x72, 0x9e, 0xc0, 0xb0, 0x72, 0x62,
0x0e, 0xeb, 0xeb, 0xd0, 0xa2, 0x9c, 0x27, 0x05, 0x08, 0xe9, 0x85, 0xf3, 0x3b, 0x20, 0xef, 0xd3,
0xf0, 0x87, 0x30, 0xcf, 0xb9, 0x0d, 0xc3, 0x8a, 0x6a, 0x6d, 0x87, 0xf3, 0x2f, 0x0b, 0xc8, 0x4b,
0xc4, 0x92, 0xef, 0xd7, 0xc6, 0x55, 0x75, 0xab, 0x16, 0xa3, 0xb1, 0x2a, 0xf4, 0xa5, 0x9f, 0x37,
0xc0, 0x3e, 0x13, 0x5a, 0xff, 0x4b, 0x5f, 0xfa, 0x79, 0x23, 0xe2, 0x34, 0xc8, 0xb8, 0xea, 0x89,
0x98, 0x84, 0xd8, 0x88, 0xdc, 0x82, 0x44, 0x3e, 0x86, 0x3b, 0x6c, 0x12, 0x27, 0x9c, 0xce, 0xc5,
0x3c, 0xed, 0xaa, 0x36, 0x0a, 0xaf, 0x6b, 0x6e, 0xb9, 0x61, 0x0f, 0x3d, 0xf7, 0x04, 0x86, 0x95,
0x6b, 0x5c, 0xe9, 0xe6, 0x3f, 0x5b, 0x30, 0x7a, 0x2e, 0x93, 0x19, 0x0b, 0x5c, 0xaa, 0x8c, 0xaf,
0x5c, 0xfd, 0x21, 0x0c, 0x14, 0x9a, 0x2f, 0x5e, 0xbf, 0x9f, 0x44, 0xe1, 0xbc, 0x5b, 0xde, 0x05,
0x05, 0xe8, 0x9e, 0xe1, 0x85, 0x4e, 0x12, 0x85, 0x98, 0x89, 0x0f, 0x41, 0xa1, 0xae, 0xb1, 0x5f,
0xcf, 0x0d, 0xfd, 0x98, 0x9e, 0x55, 0xf6, 0x2b, 0x21, 0xdc, 0xaf, 0xa1, 0xba, 0x13, 0xd3, 0x33,
0xb5, 0xdf, 0xb9, 0x07, 0x77, 0x97, 0xd8, 0x96, 0x87, 0xeb, 0xdf, 0x16, 0x0c, 0x9f, 0x0b, 0xc1,
0x26, 0xf1, 0x6f, 0x11, 0x76, 0x0a, 0xa3, 0xd7, 0xa1, 0x15, 0x24, 0x59, 0x2c, 0xd1, 0xd8, 0x96,
0xab, 0x17, 0x0b, 0x95, 0xd8, 0xa8, 0x55, 0xe2, 0x42, 0x2d, 0x37, 0xeb, 0xb5, 0x6c, 0xd4, 0xea,
0x4a, 0xa5, 0x56, 0x1f, 0x40, 0x4f, 0x05, 0xd9, 0x0b, 0x68, 0x2c, 0x29, 0xcf, 0x71, 0x1e, 0x14,
0x69, 0x17, 0x29, 0x4a, 0xc0, 0xec, 0x47, 0x1a, 0xea, 0x21, 0x9d, 0x37, 0xa3, 0xff, 0x58, 0xb0,
0x5e, 0xbd, 0x4a, 0x1e, 0xb3, 0x4b, 0xfb, 0x92, 0x82, 0x32, 0x1e, 0xe5, 0xf7, 0x50, 0x9f, 0x0a,
0x14, 0xd2, 0xec, 0x28, 0x62, 0x81, 0xa7, 0x18, 0xda, 0x7e, 0x5b, 0x53, 0xde, 0xf3, 0x68, 0xee,
0x95, 0x15, 0xd3, 0x2b, 0x04, 0x56, 0xfc, 0x4c, 0x4e, 0x8b, 0xde, 0xa4, 0xbe, 0x17, 0x3c, 0xd5,
0xbe, 0xce, 0x53, 0x9d, 0xba, 0xa7, 0xca, 0x4c, 0xeb, 0x9a, 0x99, 0xf6, 0x31, 0x0c, 0xf5, 0x70,
0x5b, 0x0d, 0xd7, 0x26, 0x40, 0xd9, 0x47, 0xc4, 0xc8, 0xd2, 0x60, 0x56, 0x34, 0x12, 0xe1, 0xfc,
0x12, 0xec, 0xd7, 0x89, 0xd6, 0x2b, 0xc8, 0x33, 0xb0, 0xa3, 0x62, 0x81, 0xa2, 0xbd, 0x1d, 0x32,
0xaf, 0xf1, 0x42, 0xce, 0x9d, 0x0b, 0x39, 0x9f, 0x41, 0xb7, 0x20, 0x17, 0x3e, 0xb3, 0x2e, 0xf3,
0x59, 0x63, 0xc1, 0x67, 0xce, 0x3f, 0x2d, 0x58, 0xaf, 0x9a, 0x9c, 0x87, 0xe5, 0x3d, 0x0c, 0xca,
0x23, 0xbc, 0x99, 0x9f, 0xe6, 0xb6, 0x3c, 0x33, 0x6d, 0xa9, 0x6f, 0x2b, 0x0d, 0x14, 0x6f, 0xfc,
0x54, 0xe7, 0x72, 0x3f, 0x32, 0x48, 0xe3, 0x77, 0xb0, 0x56, 0x13, 0x59, 0x32, 0xd9, 0xfd, 0xc4,
0x9c, 0xec, 0x2a, 0xd3, 0x69, 0xb9, 0xdb, 0x1c, 0xf7, 0x3e, 0x85, 0x0f, 0x34, 0x1c, 0xec, 0x96,
0x31, 0x2c, 0x7c, 0x5f, 0x0d, 0xb5, 0xb5, 0x18, 0x6a, 0x67, 0x0c, 0xa3, 0xfa, 0xd6, 0xbc, 0xfc,
0x26, 0xb0, 0x76, 0x28, 0x7d, 0xc9, 0x84, 0x64, 0x41, 0xf9, 0xc4, 0x58, 0xc8, 0x0d, 0xeb, 0xba,
0x8e, 0x58, 0xaf, 0xc3, 0x55, 0x68, 0x4a, 0x59, 0xe4, 0xaf, 0xfa, 0x54, 0x51, 0x20, 0xe6, 0x49,
0x79, 0x0c, 0x7e, 0x80, 0xa3, 0x54, 0x3e, 0xc8, 0x44, 0xfa, 0x91, 0x9e, 0x38, 0x56, 0x70, 0xe2,
0xb0, 0x91, 0x82, 0x23, 0x87, 0x6e, 0xca, 0xa1, 0xe6, 0xb6, 0xf4, 0x3c, 0xa2, 0x08, 0xc8, 0xdc,
0x04, 0xc0, 0x52, 0xd5, 0x55, 0xd6, 0xd6, 0x7b, 0x15, 0x65, 0x57, 0x11, 0x9c, 0xfb, 0xb0, 0xf1,
0x39, 0x95, 0x6a, 0x76, 0xe2, 0xbb, 0x49, 0x7c, 0xcc, 0x26, 0x19, 0xf7, 0x8d, 0x50, 0x38, 0xff,
0xb5, 0x60, 0xf3, 0x12, 0x81, 0xfc, 0xc2, 0x23, 0xe8, 0xcc, 0x7c, 0x21, 0x29, 0x2f, 0xaa, 0xa4,
0x58, 0x2e, 0xba, 0xa2, 0x71, 0x9d, 0x2b, 0x9a, 0x35, 0x57, 0xdc, 0x86, 0xf6, 0xcc, 0x3f, 0xf7,
0x66, 0x47, 0xf9, 0x70, 0xd4, 0x9a, 0xf9, 0xe7, 0x6f, 0x8e, 0x10, 0xd9, 0x18, 0xf7, 0x8e, 0xb2,
0xe0, 0x84, 0x4a, 0x51, 0x22, 0x1b, 0xe3, 0x2f, 0x34, 0x45, 0x5d, 0x5a, 0x09, 0x7c, 0x9d, 0xd1,
0x8c, 0x8a, 0x1c, 0x2b, 0x54, 0x73, 0xfc, 0x0d, 0x12, 0x70, 0x98, 0xc2, 0xc9, 0x12, 0x51, 0xa2,
0xeb, 0xe6, 0x2b, 0x27, 0x83, 0x3b, 0xea, 0x7d, 0x47, 0xe3, 0xfd, 0x84, 0xe3, 0x1b, 0xa2, 0x4c,
0xa0, 0x07, 0xd0, 0x0b, 0x22, 0xa6, 0xa0, 0xd2, 0x78, 0xb8, 0x81, 0x26, 0x61, 0x4b, 0x41, 0x2c,
0x95, 0x53, 0xaf, 0xf2, 0x56, 0x05, 0x45, 0x7a, 0xab, 0xdf, 0xab, 0x77, 0xa1, 0x2b, 0x58, 0x1c,
0x50, 0x2f, 0xd6, 0x0f, 0x84, 0xa6, 0xdb, 0xc1, 0xf5, 0x81, 0x70, 0xfe, 0x64, 0xc1, 0x6d, 0x7c,
0xf9, 0xd4, 0x9e, 0x2d, 0x57, 0xf7, 0xf8, 0x5f, 0x03, 0xa1, 0xa7, 0x68, 0x93, 0xb1, 0x27, 0xaf,
0xbe, 0x7b, 0xc6, 0x8c, 0xb1, 0xa8, 0xd6, 0x5d, 0xa3, 0x8b, 0x24, 0xc7, 0x57, 0x80, 0x34, 0xd1,
0xa5, 0x3d, 0x84, 0x96, 0x14, 0x1e, 0x42, 0x99, 0xb2, 0x73, 0x45, 0x8a, 0x03, 0x41, 0x9e, 0x02,
0x49, 0x7d, 0x2e, 0x99, 0x92, 0x56, 0xf3, 0xb3, 0x37, 0xf5, 0xc5, 0x14, 0x0f, 0x6b, 0xb9, 0xab,
0x25, 0xe7, 0x0b, 0x7a, 0xf1, 0x2b, 0x5f, 0x4c, 0x15, 0x80, 0xe3, 0x80, 0xd1, 0xc4, 0x39, 0x1e,
0xbf, 0x77, 0xfe, 0xda, 0x85, 0xfe, 0x21, 0xf5, 0xcf, 0x28, 0x0d, 0x31, 0x9d, 0xc8, 0xa4, 0x80,
0xb1, 0xea, 0x6f, 0x05, 0xf2, 0x68, 0x11, 0xaf, 0x96, 0xfe, 0xc7, 0x18, 0x7f, 0x74, 0x9d, 0x58,
0x8e, 0x08, 0x37, 0xc8, 0x01, 0xf4, 0x8c, 0x77, 0x3b, 0xd9, 0x30, 0x36, 0xd6, 0x7e, 0x47, 0x8c,
0x37, 0x2f, 0xe1, 0x16, 0xda, 0x9e, 0x59, 0xe4, 0x35, 0xf4, 0x8c, 0x81, 0xd1, 0xd4, 0x57, 0x9f,
0x5c, 0x4d, 0x7d, 0x4b, 0xa6, 0x4c, 0xe7, 0x86, 0xd2, 0x66, 0x8c, 0x7d, 0xa6, 0xb6, 0xfa, 0xa0,
0x69, 0x6a, 0x5b, 0x36, 0x2b, 0xa2, 0x36, 0x63, 0xca, 0x32, 0xb5, 0xd5, 0x67, 0x48, 0x53, 0xdb,
0x92, 0xd1, 0xcc, 0xb9, 0x41, 0xfe, 0x00, 0x6b, 0xb5, 0x49, 0x87, 0x38, 0xf3, 0x5d, 0x97, 0x8d,
0x68, 0xe3, 0x87, 0x57, 0xca, 0x94, 0xfa, 0xbf, 0x84, 0xbe, 0x39, 0x60, 0x10, 0xc3, 0xa0, 0x25,
0x33, 0xd4, 0xf8, 0xfe, 0x65, 0x6c, 0x53, 0xa1, 0xd9, 0xe3, 0x4c, 0x85, 0x4b, 0xba, 0xbc, 0xa9,
0x70, 0x59, 0x6b, 0x74, 0x6e, 0x90, 0xdf, 0xc3, 0xea, 0x62, 0xaf, 0x21, 0x1f, 0x2e, 0xba, 0xad,
0xd6, 0xc2, 0xc6, 0xce, 0x55, 0x22, 0xa5, 0xf2, 0x57, 0x00, 0xf3, 0x16, 0x42, 0x8c, 0x9a, 0xad,
0xb5, 0xb0, 0xf1, 0xc6, 0x72, 0x66, 0xa9, 0xea, 0x8f, 0x70, 0x7b, 0x29, 0x4e, 0x13, 0xa3, 0x4c,
0xae, 0x42, 0xfa, 0xf1, 0x8f, 0xaf, 0x95, 0x2b, 0xcf, 0xfa, 0x0a, 0x6e, 0x2d, 0xe0, 0x24, 0xd9,
0xaa, 0x56, 0x4d, 0x1d, 0x42, 0xc7, 0x0f, 0xcc, 0x9f, 0x4f, 0x4b, 0xc0, 0x4e, 0x55, 0xd6, 0x8b,
0xfb, 0xb0, 0x2a, 0x34, 0x44, 0x1c, 0x8b, 0x6d, 0x0d, 0xaf, 0x2f, 0x00, 0x6d, 0x79, 0xcb, 0x13,
0x99, 0x1c, 0xb5, 0xf1, 0x5f, 0xe7, 0xcf, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x32, 0x75,
0x14, 0xfa, 0x14, 0x00, 0x00,
0x11, 0x37, 0xef, 0x3f, 0xe7, 0xee, 0x1c, 0x69, 0x4f, 0x4e, 0x4e, 0x67, 0xc9, 0x51, 0xe8, 0x3a,
0x75, 0x61, 0x43, 0x35, 0xd4, 0x14, 0x48, 0x9a, 0xf6, 0xc1, 0x96, 0xe5, 0xd4, 0x8d, 0xad, 0x18,
0x94, 0x5d, 0xb4, 0x28, 0x50, 0x96, 0x47, 0xae, 0xee, 0xb6, 0xe2, 0x91, 0xec, 0xee, 0x52, 0x7f,
0xf2, 0xd4, 0x97, 0x7e, 0x89, 0x02, 0xfd, 0x00, 0x7d, 0xef, 0x63, 0xd1, 0x97, 0xa2, 0x40, 0x81,
0x7e, 0x8b, 0x7e, 0x92, 0x62, 0x67, 0x49, 0xde, 0xf2, 0xfe, 0x48, 0x09, 0x8a, 0xbc, 0xed, 0xce,
0xcc, 0xce, 0xce, 0xce, 0x9f, 0xdf, 0x0c, 0x09, 0xdd, 0x53, 0x16, 0x51, 0xbe, 0x9f, 0xf2, 0x44,
0x26, 0xa4, 0x83, 0x1b, 0x2f, 0x1d, 0x3b, 0x5f, 0xc1, 0xdd, 0x57, 0x49, 0x72, 0x96, 0xa5, 0xcf,
0x19, 0xa7, 0x81, 0x4c, 0xf8, 0xd5, 0x51, 0x2c, 0xf9, 0x95, 0x4b, 0xff, 0x90, 0x51, 0x21, 0xc9,
0x0e, 0xd8, 0x61, 0xc1, 0x18, 0x5a, 0x7b, 0xd6, 0x43, 0xdb, 0x9d, 0x13, 0x08, 0x81, 0x46, 0xec,
0xcf, 0xe8, 0xb0, 0x86, 0x0c, 0x5c, 0x3b, 0x47, 0xb0, 0xb3, 0x5a, 0xa1, 0x48, 0x93, 0x58, 0x50,
0xf2, 0x00, 0x9a, 0x54, 0x11, 0x50, 0x5b, 0xf7, 0xe0, 0xbd, 0xfd, 0xc2, 0x94, 0x7d, 0x2d, 0xa7,
0xb9, 0xce, 0x3f, 0x2c, 0x20, 0xaf, 0x98, 0x90, 0x8a, 0xc8, 0xa8, 0xf8, 0x66, 0xf6, 0xbc, 0x0f,
0xad, 0x94, 0xd3, 0x53, 0x76, 0x99, 0x5b, 0x94, 0xef, 0xc8, 0x63, 0xd8, 0x14, 0xd2, 0xe7, 0xf2,
0x05, 0x4f, 0x66, 0x2f, 0x58, 0x44, 0x8f, 0x95, 0xd1, 0x75, 0x14, 0x59, 0x66, 0x90, 0x7d, 0x20,
0x2c, 0x0e, 0xa2, 0x4c, 0xb0, 0x73, 0x7a, 0x52, 0x70, 0x87, 0x8d, 0x3d, 0xeb, 0x61, 0xc7, 0x5d,
0xc1, 0x21, 0x5b, 0xd0, 0x8c, 0xd8, 0x8c, 0xc9, 0x61, 0x73, 0xcf, 0x7a, 0xd8, 0x77, 0xf5, 0xc6,
0xf9, 0x29, 0x0c, 0x2a, 0xf6, 0x7f, 0xbb, 0xe7, 0xff, 0xa5, 0x06, 0x4d, 0x24, 0x94, 0x3e, 0xb6,
0xe6, 0x3e, 0x26, 0x1f, 0x41, 0x8f, 0x09, 0x6f, 0xee, 0x88, 0x1a, 0xda, 0xd6, 0x65, 0xa2, 0xf4,
0x39, 0x79, 0x04, 0xad, 0x60, 0x9a, 0xc5, 0x67, 0x62, 0x58, 0xdf, 0xab, 0x3f, 0xec, 0x1e, 0x0c,
0xe6, 0x17, 0xa9, 0x87, 0x1e, 0x2a, 0x9e, 0x9b, 0x8b, 0x90, 0x4f, 0x01, 0x7c, 0x29, 0x39, 0x1b,
0x67, 0x92, 0x0a, 0x7c, 0x69, 0xf7, 0x60, 0x68, 0x1c, 0xc8, 0x04, 0x7d, 0x5a, 0xf2, 0x5d, 0x43,
0x96, 0x7c, 0x06, 0x1d, 0x7a, 0x29, 0x69, 0x1c, 0xd2, 0x70, 0xd8, 0xc4, 0x8b, 0x76, 0x17, 0x5e,
0xb4, 0x7f, 0x94, 0xf3, 0xf5, 0xfb, 0x4a, 0xf1, 0xd1, 0xe7, 0xd0, 0xaf, 0xb0, 0xc8, 0x06, 0xd4,
0xcf, 0x68, 0x11, 0x55, 0xb5, 0x54, 0x9e, 0x3d, 0xf7, 0xa3, 0x4c, 0x27, 0x58, 0xcf, 0xd5, 0x9b,
0x9f, 0xd4, 0x3e, 0xb5, 0x9c, 0xe7, 0x60, 0xbf, 0xc8, 0xa2, 0xa8, 0x3c, 0x18, 0x32, 0x5e, 0x1c,
0x0c, 0x19, 0x9f, 0x7b, 0xb9, 0x76, 0xad, 0x97, 0xff, 0x6e, 0xc1, 0xe6, 0xd1, 0x39, 0x8d, 0xe5,
0x71, 0x22, 0xd9, 0x29, 0x0b, 0x7c, 0xc9, 0x92, 0x98, 0x3c, 0x06, 0x3b, 0x89, 0x42, 0xef, 0xda,
0x30, 0x75, 0x92, 0x28, 0xb7, 0xfa, 0x31, 0xd8, 0x31, 0xbd, 0xf0, 0xae, 0xbd, 0xae, 0x13, 0xd3,
0x0b, 0x2d, 0x7d, 0x1f, 0xfa, 0x21, 0x8d, 0xa8, 0xa4, 0x5e, 0x19, 0x1d, 0x15, 0xba, 0x9e, 0x26,
0x1e, 0xea, 0x70, 0x7c, 0x0c, 0xef, 0x29, 0x95, 0xa9, 0xcf, 0x69, 0x2c, 0xbd, 0xd4, 0x97, 0x53,
0x8c, 0x89, 0xed, 0xf6, 0x63, 0x7a, 0xf1, 0x06, 0xa9, 0x6f, 0x7c, 0x39, 0x75, 0xfe, 0x56, 0x03,
0xbb, 0x0c, 0x26, 0xf9, 0x00, 0xda, 0xea, 0x5a, 0x8f, 0x85, 0xb9, 0x27, 0x5a, 0x6a, 0xfb, 0x32,
0x54, 0x55, 0x91, 0x9c, 0x9e, 0x0a, 0x2a, 0xd1, 0xbc, 0xba, 0x9b, 0xef, 0x54, 0x66, 0x09, 0xf6,
0xb5, 0x2e, 0x84, 0x86, 0x8b, 0x6b, 0xe5, 0xf1, 0x99, 0x64, 0x33, 0x8a, 0x17, 0xd6, 0x5d, 0xbd,
0x21, 0x03, 0x68, 0x52, 0x4f, 0xfa, 0x13, 0xcc, 0x70, 0xdb, 0x6d, 0xd0, 0xb7, 0xfe, 0x84, 0x7c,
0x0f, 0x6e, 0x8b, 0x24, 0xe3, 0x01, 0xf5, 0x8a, 0x6b, 0x5b, 0xc8, 0xed, 0x69, 0xea, 0x0b, 0x7d,
0xb9, 0x03, 0xf5, 0x53, 0x16, 0x0e, 0xdb, 0xe8, 0x98, 0x8d, 0x6a, 0x12, 0xbe, 0x0c, 0x5d, 0xc5,
0x24, 0x3f, 0x04, 0x28, 0x35, 0x85, 0xc3, 0xce, 0x1a, 0x51, 0xbb, 0xd0, 0x1b, 0x92, 0x5d, 0x80,
0x80, 0xa5, 0x53, 0xca, 0x3d, 0x95, 0x30, 0x36, 0x26, 0x87, 0xad, 0x29, 0x5f, 0xd2, 0x2b, 0xc5,
0x66, 0xc2, 0x9b, 0x7c, 0xcd, 0xd2, 0x94, 0x86, 0x43, 0x40, 0x0f, 0xdb, 0x4c, 0x7c, 0xa1, 0x09,
0xce, 0xaf, 0xa0, 0x95, 0x1b, 0x77, 0x17, 0xec, 0xf3, 0x24, 0xca, 0x66, 0xa5, 0xd3, 0xfa, 0x6e,
0x47, 0x13, 0x5e, 0x86, 0x64, 0x1b, 0x10, 0x25, 0xf1, 0x8a, 0x1a, 0xba, 0x08, 0xfd, 0xab, 0x2e,
0x78, 0x1f, 0x5a, 0x41, 0x92, 0x9c, 0x31, 0xed, 0xbb, 0xb6, 0x9b, 0xef, 0x9c, 0x3f, 0xd6, 0xe1,
0x76, 0xb5, 0x58, 0xd4, 0x15, 0xa8, 0x05, 0x3d, 0x6d, 0xa1, 0x1a, 0x54, 0x7b, 0x52, 0xf1, 0x76,
0xcd, 0xf4, 0x76, 0x71, 0x64, 0x96, 0x84, 0xfa, 0x82, 0xbe, 0x3e, 0xf2, 0x3a, 0x09, 0xa9, 0xca,
0xf5, 0x8c, 0x85, 0x18, 0x9e, 0xbe, 0xab, 0x96, 0x8a, 0x32, 0x61, 0x61, 0x0e, 0x3e, 0x6a, 0x89,
0xe6, 0x71, 0xd4, 0xdb, 0xd2, 0x01, 0xd7, 0x3b, 0x15, 0xf0, 0x99, 0xa2, 0xb6, 0x75, 0x14, 0xd5,
0x9a, 0xec, 0x41, 0x97, 0xd3, 0x34, 0xca, 0x73, 0x1f, 0x9d, 0x6f, 0xbb, 0x26, 0x89, 0xdc, 0x03,
0x08, 0x92, 0x28, 0xa2, 0x01, 0x0a, 0xd8, 0x28, 0x60, 0x50, 0x54, 0xde, 0x49, 0x19, 0x79, 0x82,
0x06, 0xe8, 0xea, 0xa6, 0xdb, 0x92, 0x32, 0x3a, 0xa1, 0x81, 0x7a, 0x47, 0x26, 0x28, 0xf7, 0x10,
0xbe, 0xba, 0x78, 0xae, 0xa3, 0x08, 0x08, 0xb2, 0xbb, 0x00, 0x13, 0x9e, 0x64, 0xa9, 0xe6, 0xf6,
0xf6, 0xea, 0x0a, 0xc9, 0x91, 0x82, 0xec, 0x07, 0x70, 0x5b, 0x5c, 0xcd, 0x22, 0x16, 0x9f, 0x79,
0xd2, 0xe7, 0x13, 0x2a, 0x87, 0x7d, 0x5d, 0x01, 0x39, 0xf5, 0x2d, 0x12, 0xd5, 0xdb, 0x67, 0xe1,
0x8f, 0x87, 0xb7, 0x31, 0x03, 0xd4, 0xd2, 0x49, 0x81, 0x1c, 0x72, 0xea, 0x4b, 0xfa, 0x2d, 0xda,
0xd8, 0x37, 0x43, 0x0b, 0x72, 0x07, 0x5a, 0x89, 0x47, 0x2f, 0x83, 0x28, 0x2f, 0xda, 0x66, 0x72,
0x74, 0x19, 0x44, 0xce, 0x23, 0x18, 0x54, 0x6e, 0xcc, 0x81, 0x7e, 0x0b, 0x9a, 0x94, 0xf3, 0xa4,
0x80, 0x25, 0xbd, 0x71, 0x7e, 0x0d, 0xe4, 0x5d, 0x1a, 0x7e, 0x17, 0xe6, 0x39, 0x77, 0x60, 0x50,
0x51, 0xad, 0xed, 0x70, 0xfe, 0x65, 0x01, 0x79, 0x8e, 0xe8, 0xf2, 0xff, 0x35, 0x76, 0x55, 0xef,
0xaa, 0xe9, 0x68, 0xf4, 0x0a, 0x7d, 0xe9, 0xe7, 0x2d, 0xb1, 0xc7, 0x84, 0xd6, 0xff, 0xdc, 0x97,
0x7e, 0xde, 0x9a, 0x38, 0x0d, 0x32, 0xae, 0xba, 0x24, 0xa6, 0x25, 0xb6, 0x26, 0xb7, 0x20, 0x91,
0x4f, 0xe0, 0x7d, 0x36, 0x89, 0x13, 0x4e, 0xe7, 0x62, 0x9e, 0x76, 0x55, 0x0b, 0x85, 0xb7, 0x34,
0xb7, 0x3c, 0x70, 0x84, 0x9e, 0x7b, 0x04, 0x83, 0xca, 0x33, 0xae, 0x75, 0xf3, 0x9f, 0x2d, 0x18,
0x3e, 0x95, 0xc9, 0x8c, 0x05, 0x2e, 0x55, 0xc6, 0x57, 0x9e, 0x7e, 0x1f, 0xfa, 0x0a, 0xdf, 0x17,
0x9f, 0xdf, 0x4b, 0xa2, 0x70, 0xde, 0x3f, 0xb7, 0x41, 0x41, 0xbc, 0x67, 0x78, 0xa1, 0x9d, 0x44,
0x21, 0xe6, 0xe6, 0x7d, 0x50, 0x38, 0x6c, 0x9c, 0xd7, 0x93, 0x44, 0x2f, 0xa6, 0x17, 0x95, 0xf3,
0x4a, 0x08, 0xcf, 0x6b, 0xf0, 0x6e, 0xc7, 0xf4, 0x42, 0x9d, 0x77, 0xee, 0xc2, 0xf6, 0x0a, 0xdb,
0xf2, 0x70, 0xfd, 0xdb, 0x82, 0xc1, 0x53, 0x21, 0xd8, 0x24, 0xfe, 0x25, 0x02, 0x51, 0x61, 0xf4,
0x16, 0x34, 0x83, 0x24, 0x8b, 0x25, 0x1a, 0xdb, 0x74, 0xf5, 0x66, 0xa1, 0x36, 0x6b, 0x4b, 0xb5,
0xb9, 0x50, 0xdd, 0xf5, 0xe5, 0xea, 0x36, 0xaa, 0xb7, 0x51, 0xa9, 0xde, 0x0f, 0xa1, 0xab, 0x82,
0xec, 0x05, 0x34, 0x96, 0x94, 0xe7, 0xc8, 0x0f, 0x8a, 0x74, 0x88, 0x14, 0x25, 0x60, 0x76, 0x28,
0x0d, 0xfe, 0x90, 0xce, 0xdb, 0xd3, 0x7f, 0x2d, 0xd8, 0xaa, 0x3e, 0x25, 0x8f, 0xd9, 0xda, 0x4e,
0xa5, 0xc0, 0x8d, 0x47, 0xf9, 0x3b, 0xd4, 0x52, 0xc1, 0x44, 0x9a, 0x8d, 0x23, 0x16, 0x78, 0x8a,
0xa1, 0xed, 0xb7, 0x35, 0xe5, 0x1d, 0x8f, 0xe6, 0x5e, 0x69, 0x98, 0x5e, 0x21, 0xd0, 0xf0, 0x33,
0x39, 0x2d, 0xba, 0x95, 0x5a, 0x2f, 0x78, 0xaa, 0x75, 0x93, 0xa7, 0xda, 0xcb, 0x9e, 0x2a, 0x33,
0xad, 0x63, 0x66, 0xda, 0x27, 0x30, 0xd0, 0xe3, 0x6e, 0x35, 0x5c, 0xbb, 0x00, 0x65, 0x67, 0x11,
0x43, 0x4b, 0xc3, 0x5b, 0xd1, 0x5a, 0x84, 0xf3, 0x33, 0xb0, 0x5f, 0x25, 0x5a, 0xaf, 0x20, 0x4f,
0xc0, 0x8e, 0x8a, 0x0d, 0x8a, 0x76, 0x0f, 0xc8, 0xbc, 0xc6, 0x0b, 0x39, 0x77, 0x2e, 0xe4, 0x7c,
0x0e, 0x9d, 0x82, 0x5c, 0xf8, 0xcc, 0x5a, 0xe7, 0xb3, 0xda, 0x82, 0xcf, 0x9c, 0x7f, 0x5a, 0xb0,
0x55, 0x35, 0x39, 0x0f, 0xcb, 0x3b, 0xe8, 0x97, 0x57, 0x78, 0x33, 0x3f, 0xcd, 0x6d, 0x79, 0x62,
0xda, 0xb2, 0x7c, 0xac, 0x34, 0x50, 0xbc, 0xf6, 0x53, 0x9d, 0xcb, 0xbd, 0xc8, 0x20, 0x8d, 0xde,
0xc2, 0xe6, 0x92, 0xc8, 0x8a, 0x59, 0xef, 0x07, 0xe6, 0xac, 0x57, 0x99, 0x57, 0xcb, 0xd3, 0xe6,
0x00, 0xf8, 0x19, 0x7c, 0xa0, 0xe1, 0xe0, 0xb0, 0x8c, 0x61, 0xe1, 0xfb, 0x6a, 0xa8, 0xad, 0xc5,
0x50, 0x3b, 0x23, 0x18, 0x2e, 0x1f, 0xcd, 0xcb, 0x6f, 0x02, 0x9b, 0x27, 0xd2, 0x97, 0x4c, 0x48,
0x16, 0x94, 0x1f, 0x1d, 0x0b, 0xb9, 0x61, 0xdd, 0xd4, 0x23, 0x97, 0xeb, 0x70, 0x03, 0xea, 0x52,
0x16, 0xf9, 0xab, 0x96, 0x2a, 0x0a, 0xc4, 0xbc, 0x29, 0x8f, 0xc1, 0x77, 0x70, 0x95, 0xca, 0x07,
0x99, 0x48, 0x3f, 0xd2, 0x33, 0x48, 0x03, 0x67, 0x10, 0x1b, 0x29, 0x38, 0x84, 0xe8, 0x36, 0x1d,
0x6a, 0x6e, 0x53, 0x4f, 0x28, 0x8a, 0x80, 0xcc, 0x5d, 0x00, 0x2c, 0x55, 0x5d, 0x65, 0x2d, 0x7d,
0x56, 0x51, 0x0e, 0x15, 0xc1, 0xb9, 0x07, 0x3b, 0x5f, 0x50, 0xa9, 0xa6, 0x29, 0x7e, 0x98, 0xc4,
0xa7, 0x6c, 0x92, 0x71, 0xdf, 0x08, 0x85, 0xf3, 0x1f, 0x0b, 0x76, 0xd7, 0x08, 0xe4, 0x0f, 0x1e,
0x42, 0x7b, 0xe6, 0x0b, 0x49, 0x79, 0x51, 0x25, 0xc5, 0x76, 0xd1, 0x15, 0xb5, 0x9b, 0x5c, 0x51,
0x5f, 0x72, 0xc5, 0x1d, 0x68, 0xcd, 0xfc, 0x4b, 0x6f, 0x36, 0xce, 0xc7, 0xa5, 0xe6, 0xcc, 0xbf,
0x7c, 0x3d, 0x46, 0x64, 0x63, 0xdc, 0x1b, 0x67, 0xc1, 0x19, 0x95, 0xa2, 0x44, 0x36, 0xc6, 0x9f,
0x69, 0x0a, 0xce, 0x4f, 0x38, 0x4c, 0x22, 0x0c, 0x74, 0xdc, 0x7c, 0xe7, 0x5c, 0xc0, 0xf0, 0x24,
0x1b, 0x8b, 0x80, 0xb3, 0x31, 0x7d, 0x4d, 0xa5, 0xaf, 0xc0, 0xb0, 0xc8, 0x91, 0x0f, 0xa1, 0x1b,
0x44, 0x4c, 0xa1, 0xa1, 0xf1, 0xb5, 0x06, 0x9a, 0x84, 0x5d, 0x03, 0xe1, 0x52, 0x4e, 0xbd, 0xca,
0x07, 0x2a, 0x28, 0xd2, 0x1b, 0xfd, 0x91, 0xba, 0x0d, 0x1d, 0xc1, 0xe2, 0x80, 0x7a, 0xb1, 0xfe,
0x2a, 0xa8, 0xbb, 0x6d, 0xdc, 0x1f, 0x0b, 0xe7, 0x4f, 0x16, 0x6c, 0xaf, 0xb8, 0x39, 0x77, 0xe1,
0xf5, 0xad, 0xfc, 0x17, 0x40, 0xe8, 0x39, 0xda, 0x65, 0x7c, 0xe3, 0xe4, 0x45, 0x76, 0xd7, 0x18,
0x25, 0x16, 0x3f, 0x83, 0xdc, 0x4d, 0xba, 0x48, 0x72, 0x7c, 0x85, 0x3b, 0x13, 0x5d, 0xc1, 0x03,
0x68, 0x4a, 0xe1, 0x21, 0x62, 0x29, 0x5b, 0x1b, 0x52, 0x1c, 0x0b, 0xf2, 0x18, 0x48, 0xea, 0x73,
0xc9, 0x94, 0xb4, 0x1a, 0x9c, 0xbd, 0xa9, 0x2f, 0xa6, 0x78, 0x59, 0xd3, 0xdd, 0x28, 0x39, 0x5f,
0xd2, 0xab, 0x9f, 0xfb, 0x62, 0xaa, 0x70, 0x1a, 0xe7, 0x88, 0x3a, 0x8e, 0x6f, 0xb8, 0x3e, 0xf8,
0x6b, 0x07, 0x7a, 0x27, 0xd4, 0xbf, 0xa0, 0x34, 0xc4, 0xac, 0x21, 0x93, 0x02, 0xad, 0xaa, 0xff,
0x13, 0xc8, 0x83, 0x45, 0x58, 0x5a, 0xf9, 0x03, 0x63, 0xf4, 0xf1, 0x4d, 0x62, 0x79, 0xe1, 0xdf,
0x22, 0xc7, 0xd0, 0x35, 0x3e, 0xd8, 0xc9, 0x8e, 0x71, 0x70, 0xe9, 0x3f, 0xc4, 0x68, 0x77, 0x0d,
0xb7, 0xd0, 0xf6, 0xc4, 0x22, 0xaf, 0xa0, 0x6b, 0xcc, 0x85, 0xa6, 0xbe, 0xe5, 0x01, 0xd5, 0xd4,
0xb7, 0x62, 0x98, 0x74, 0x6e, 0x29, 0x6d, 0xc6, 0x74, 0x67, 0x6a, 0x5b, 0x9e, 0x27, 0x4d, 0x6d,
0xab, 0x46, 0x42, 0xd4, 0x66, 0x0c, 0x53, 0xa6, 0xb6, 0xe5, 0x51, 0xd1, 0xd4, 0xb6, 0x62, 0x02,
0x73, 0x6e, 0x91, 0xdf, 0xc2, 0xe6, 0xd2, 0x40, 0x43, 0x9c, 0xf9, 0xa9, 0x75, 0x93, 0xd8, 0xe8,
0xfe, 0xb5, 0x32, 0xa5, 0xfe, 0xaf, 0xa0, 0x67, 0xce, 0x11, 0xc4, 0x30, 0x68, 0xc5, 0xa8, 0x34,
0xba, 0xb7, 0x8e, 0x6d, 0x2a, 0x34, 0x5b, 0x99, 0xa9, 0x70, 0x45, 0x33, 0x37, 0x15, 0xae, 0xea,
0x80, 0xce, 0x2d, 0xf2, 0x1b, 0xd8, 0x58, 0x6c, 0x29, 0xe4, 0xa3, 0x45, 0xb7, 0x2d, 0x75, 0xaa,
0x91, 0x73, 0x9d, 0x48, 0xa9, 0xfc, 0x25, 0xc0, 0xbc, 0x53, 0x10, 0xa3, 0x66, 0x97, 0x3a, 0xd5,
0x68, 0x67, 0x35, 0xb3, 0x54, 0xf5, 0x7b, 0xb8, 0xb3, 0x12, 0x8e, 0x89, 0x51, 0x26, 0xd7, 0x01,
0xfa, 0xe8, 0xfb, 0x37, 0xca, 0x95, 0x77, 0xfd, 0x0e, 0x36, 0x97, 0x30, 0xcb, 0xcc, 0x8a, 0x75,
0x50, 0x6a, 0x66, 0xc5, 0x5a, 0xd0, 0x53, 0x15, 0xf6, 0xec, 0x1e, 0x6c, 0x08, 0x0d, 0x15, 0xa7,
0x62, 0x5f, 0x43, 0xed, 0x33, 0x40, 0x9b, 0xde, 0xf0, 0x44, 0x26, 0xe3, 0x16, 0xfe, 0xec, 0xfc,
0xd1, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x37, 0x58, 0xa5, 0x1f, 0xfb, 0x14, 0x00, 0x00,
} }

5
weed/pb/shared_values.go

@ -0,0 +1,5 @@
package pb
const (
AdminShellClient = "shell"
)

4
weed/replication/sink/filersink/filer_sink.go

@ -90,7 +90,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error {
} }
glog.V(1).Infof("lookup: %v", lookupRequest) glog.V(1).Infof("lookup: %v", lookupRequest)
if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) {
if filer2.ETag(resp.Entry) == filer2.ETag(entry) {
glog.V(0).Infof("already replicated %s", key) glog.V(0).Infof("already replicated %s", key)
return nil return nil
} }
@ -160,7 +160,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
// skip if already changed // skip if already changed
// this usually happens when the messages are not ordered // this usually happens when the messages are not ordered
glog.V(0).Infof("late updates %s", key) glog.V(0).Infof("late updates %s", key)
} else if filer2.ETag(newEntry.Chunks) == filer2.ETag(existingEntry.Chunks) {
} else if filer2.ETag(newEntry) == filer2.ETag(existingEntry) {
// skip if no change // skip if no change
// this usually happens when retrying the replication // this usually happens when retrying the replication
glog.V(0).Infof("already replicated %s", key) glog.V(0).Infof("already replicated %s", key)

9
weed/replication/sink/s3sink/s3_sink.go

@ -25,6 +25,7 @@ type S3Sink struct {
region string region string
bucket string bucket string
dir string dir string
endpoint string
filerSource *source.FilerSource filerSource *source.FilerSource
} }
@ -44,12 +45,14 @@ func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string
glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region")) glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket")) glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory")) glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
return s3sink.initialize( return s3sink.initialize(
configuration.GetString(prefix+"aws_access_key_id"), configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"), configuration.GetString(prefix+"aws_secret_access_key"),
configuration.GetString(prefix+"region"), configuration.GetString(prefix+"region"),
configuration.GetString(prefix+"bucket"), configuration.GetString(prefix+"bucket"),
configuration.GetString(prefix+"directory"), configuration.GetString(prefix+"directory"),
configuration.GetString(prefix+"endpoint"),
) )
} }
@ -57,13 +60,15 @@ func (s3sink *S3Sink) SetSourceFiler(s *source.FilerSource) {
s3sink.filerSource = s s3sink.filerSource = s
} }
func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir string) error {
func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir, endpoint string) error {
s3sink.region = region s3sink.region = region
s3sink.bucket = bucket s3sink.bucket = bucket
s3sink.dir = dir s3sink.dir = dir
s3sink.endpoint = endpoint
config := &aws.Config{ config := &aws.Config{
Region: aws.String(s3sink.region),
Region: aws.String(s3sink.region),
Endpoint: aws.String(s3sink.endpoint),
} }
if awsAccessKeyId != "" && awsSecretAccessKey != "" { if awsAccessKeyId != "" && awsSecretAccessKey != "" {
config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "")

4
weed/replication/sub/notification_aws_sqs.go

@ -92,7 +92,9 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif
} }
// process the message // process the message
key = *result.Messages[0].Attributes["key"]
// fmt.Printf("messages: %+v\n", result.Messages[0])
keyValue := result.Messages[0].MessageAttributes["key"]
key = *keyValue.StringValue
text := *result.Messages[0].Body text := *result.Messages[0].Body
message = &filer_pb.EventNotification{} message = &filer_pb.EventNotification{}
err = proto.UnmarshalText(text, message) err = proto.UnmarshalText(text, message)

4
weed/s3api/filer_multipart.go

@ -107,7 +107,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{ CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)), Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)),
Bucket: input.Bucket, Bucket: input.Bucket,
ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""),
ETag: aws.String("\"" + filer2.ETagChunks(finalParts) + "\""),
Key: objectKey(input.Key), Key: objectKey(input.Key),
}, },
} }
@ -208,7 +208,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP
PartNumber: aws.Int64(int64(partNumber)), PartNumber: aws.Int64(int64(partNumber)),
LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)), LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)),
Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))), Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))),
ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""),
ETag: aws.String("\"" + filer2.ETag(entry) + "\""),
}) })
} }
} }

2
weed/s3api/s3api_objects_list_handlers.go

@ -139,7 +139,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys
contents = append(contents, ListEntry{ contents = append(contents, ListEntry{
Key: fmt.Sprintf("%s%s", dir, entry.Name), Key: fmt.Sprintf("%s%s", dir, entry.Name),
LastModified: time.Unix(entry.Attributes.Mtime, 0), LastModified: time.Unix(entry.Attributes.Mtime, 0),
ETag: "\"" + filer2.ETag(entry.Chunks) + "\"",
ETag: "\"" + filer2.ETag(entry) + "\"",
Size: int64(filer2.TotalSize(entry.Chunks)), Size: int64(filer2.TotalSize(entry.Chunks)),
Owner: CanonicalUser{ Owner: CanonicalUser{
ID: fmt.Sprintf("%x", entry.Attributes.Uid), ID: fmt.Sprintf("%x", entry.Attributes.Uid),

3
weed/server/filer_grpc_server.go

@ -232,7 +232,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
if req.TtlSec > 0 { if req.TtlSec > 0 {
ttlStr = strconv.Itoa(int(req.TtlSec)) ttlStr = strconv.Itoa(int(req.TtlSec))
} }
collection, replication := fs.detectCollection(req.ParentPath, req.Collection, req.Replication)
collection, replication, _ := fs.detectCollection(req.ParentPath, req.Collection, req.Replication)
var altRequest *operation.VolumeAssignRequest var altRequest *operation.VolumeAssignRequest
@ -327,7 +327,6 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.
Replication: fs.option.DefaultReplication, Replication: fs.option.DefaultReplication,
MaxMb: uint32(fs.option.MaxMB), MaxMb: uint32(fs.option.MaxMB),
DirBuckets: fs.filer.DirBucketsPath, DirBuckets: fs.filer.DirBucketsPath,
DirQueues: fs.filer.DirQueuesPath,
Cipher: fs.filer.Cipher, Cipher: fs.filer.Cipher,
}, nil }, nil
} }

7
weed/server/filer_grpc_server_listen.go

@ -4,12 +4,13 @@ import (
"strings" "strings"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
func (fs *FilerServer) ListenForEvents(req *filer_pb.ListenForEventsRequest, stream filer_pb.SeaweedFiler_ListenForEventsServer) error {
func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer) error {
peerAddress := findClientAddress(stream.Context(), 0) peerAddress := findClientAddress(stream.Context(), 0)
@ -37,7 +38,7 @@ func (fs *FilerServer) ListenForEvents(req *filer_pb.ListenForEventsRequest, str
fullpath := util.Join(dirPath, entryName) fullpath := util.Join(dirPath, entryName)
// skip on filer internal meta logs // skip on filer internal meta logs
if strings.HasPrefix(fullpath, "/.meta") {
if strings.HasPrefix(fullpath, filer2.SystemLogDir) {
return nil return nil
} }
@ -45,7 +46,7 @@ func (fs *FilerServer) ListenForEvents(req *filer_pb.ListenForEventsRequest, str
return nil return nil
} }
message := &filer_pb.FullEventNotification{
message := &filer_pb.SubscribeMetadataResponse{
Directory: dirPath, Directory: dirPath,
EventNotification: eventNotification, EventNotification: eventNotification,
} }

28
weed/server/filer_grpc_server_rename.go

@ -44,12 +44,19 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom
} }
func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error { func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
if entry.IsDirectory() {
if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil {
return err
if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events, func() error {
if entry.IsDirectory() {
if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil {
return err
}
} }
return nil
}); err != nil {
return fmt.Errorf("fail to move %s => %s: %v", oldParent.Child(entry.Name()), newParent.Child(newName), err)
} }
return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events)
return nil
} }
func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error { func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
@ -85,7 +92,8 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
return nil return nil
} }
func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error {
func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents,
moveFolderSubEntries func() error) error {
oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName)
@ -107,6 +115,14 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
return createErr return createErr
} }
events.newEntries = append(events.newEntries, newEntry)
if moveFolderSubEntries != nil {
if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil {
return moveChildrenErr
}
}
// delete old entry // delete old entry
deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false) deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false)
if deleteErr != nil { if deleteErr != nil {
@ -114,7 +130,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPat
} }
events.oldEntries = append(events.oldEntries, entry) events.oldEntries = append(events.oldEntries, entry)
events.newEntries = append(events.newEntries, newEntry)
return nil return nil
} }

12
weed/server/filer_server.go

@ -24,6 +24,7 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis" _ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/notification"
_ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs"
@ -72,7 +73,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
glog.Fatal("master list is required!") glog.Fatal("master list is required!")
} }
fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Port+10000, fs.notifyMetaListeners)
fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Port+10000, option.Collection, option.DefaultReplication, fs.notifyMetaListeners)
fs.filer.Cipher = option.Cipher fs.filer.Cipher = option.Cipher
maybeStartMetrics(fs, option) maybeStartMetrics(fs, option)
@ -91,10 +92,9 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
util.LoadConfiguration("notification", false) util.LoadConfiguration("notification", false)
fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete")
v.Set("filer.option.buckets_folder", "/buckets")
v.Set("filer.option.queues_folder", "/queues")
fs.filer.DirBucketsPath = v.GetString("filer.option.buckets_folder")
fs.filer.DirQueuesPath = v.GetString("filer.option.queues_folder")
v.SetDefault("filer.options.buckets_folder", "/buckets")
fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder")
fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync")
fs.filer.LoadConfiguration(v) fs.filer.LoadConfiguration(v)
notification.LoadConfiguration(v, "notification.") notification.LoadConfiguration(v, "notification.")
@ -107,7 +107,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption)
readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) readonlyMux.HandleFunc("/", fs.readonlyFilerHandler)
} }
fs.filer.LoadBuckets(fs.filer.DirBucketsPath)
fs.filer.LoadBuckets()
util.OnInterrupt(func() { util.OnInterrupt(func() {
fs.filer.Shutdown() fs.filer.Shutdown()

21
weed/server/filer_server_handlers_read.go

@ -8,6 +8,7 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"time"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -78,8 +79,26 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
w.Header().Set("Content-Type", mimeType) w.Header().Set("Content-Type", mimeType)
} }
// if modified since
if !entry.Attr.Mtime.IsZero() {
w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat))
if r.Header.Get("If-Modified-Since") != "" {
if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil {
if t.After(entry.Attr.Mtime) {
w.WriteHeader(http.StatusNotModified)
return
}
}
}
}
// set etag // set etag
setEtag(w, filer2.ETag(entry.Chunks))
etag := filer2.ETagEntry(entry)
if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" {
w.WriteHeader(http.StatusNotModified)
return
}
setEtag(w, etag)
if r.Method == "HEAD" { if r.Method == "HEAD" {
w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10))

42
weed/server/filer_server_handlers_write.go

@ -40,7 +40,7 @@ type FilerPostResult struct {
Url string `json:"url,omitempty"` Url string `json:"url,omitempty"`
} }
func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection, dataCenter, ttlString string) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection, dataCenter, ttlString string, fsync bool) (fileId, urlLocation string, auth security.EncodedJwt, err error) {
stats.FilerRequestCounter.WithLabelValues("assign").Inc() stats.FilerRequestCounter.WithLabelValues("assign").Inc()
start := time.Now() start := time.Now()
@ -73,6 +73,9 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
} }
fileId = assignResult.Fid fileId = assignResult.Fid
urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid
if fsync {
urlLocation += "?fsync=true"
}
auth = assignResult.Auth auth = assignResult.Auth
return return
} }
@ -82,7 +85,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
ctx := context.Background() ctx := context.Background()
query := r.URL.Query() query := r.URL.Query()
collection, replication := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication"))
collection, replication, fsync := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication"))
dataCenter := query.Get("dataCenter") dataCenter := query.Get("dataCenter")
if dataCenter == "" { if dataCenter == "" {
dataCenter = fs.option.DataCenter dataCenter = fs.option.DataCenter
@ -96,12 +99,12 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
ttlSeconds = int32(ttl.Minutes()) * 60 ttlSeconds = int32(ttl.Minutes()) * 60
} }
if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString); autoChunked {
if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync); autoChunked {
return return
} }
if fs.option.Cipher { if fs.option.Cipher {
reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString)
reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync)
if err != nil { if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err) writeJsonError(w, r, http.StatusInternalServerError, err)
} else if reply != nil { } else if reply != nil {
@ -111,7 +114,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString)
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
if err != nil || fileId == "" || urlLocation == "" { if err != nil || fileId == "" || urlLocation == "" {
glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
@ -122,12 +125,12 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
u, _ := url.Parse(urlLocation) u, _ := url.Parse(urlLocation)
ret, err := fs.uploadToVolumeServer(r, u, auth, w, fileId)
ret, md5value, err := fs.uploadToVolumeServer(r, u, auth, w, fileId)
if err != nil { if err != nil {
return return
} }
if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, fileId, ttlSeconds); err != nil {
if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, md5value, fileId, ttlSeconds); err != nil {
return return
} }
@ -144,8 +147,8 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) {
} }
// update metadata in filer store // update metadata in filer store
func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter,
replication string, collection string, ret *operation.UploadResult, fileId string, ttlSeconds int32) (err error) {
func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, replication string,
collection string, ret *operation.UploadResult, md5value []byte, fileId string, ttlSeconds int32) (err error) {
stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc() stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc()
start := time.Now() start := time.Now()
@ -186,6 +189,7 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w
Collection: collection, Collection: collection,
TtlSec: ttlSeconds, TtlSec: ttlSeconds,
Mime: ret.Mime, Mime: ret.Mime,
Md5: md5value,
}, },
Chunks: []*filer_pb.FileChunk{{ Chunks: []*filer_pb.FileChunk{{
FileId: fileId, FileId: fileId,
@ -212,15 +216,20 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w
} }
// send request to volume server // send request to volume server
func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, err error) {
func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, md5value []byte, err error) {
stats.FilerRequestCounter.WithLabelValues("postUpload").Inc() stats.FilerRequestCounter.WithLabelValues("postUpload").Inc()
start := time.Now() start := time.Now()
defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }() defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }()
ret = &operation.UploadResult{} ret = &operation.UploadResult{}
hash := md5.New()
var body = ioutil.NopCloser(io.TeeReader(r.Body, hash))
md5Hash := md5.New()
body := r.Body
if r.Method == "PUT" {
// only PUT or large chunked files has Md5 in attributes
body = ioutil.NopCloser(io.TeeReader(r.Body, md5Hash))
}
request := &http.Request{ request := &http.Request{
Method: r.Method, Method: r.Method,
@ -285,7 +294,10 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se
} }
} }
// use filer calculated md5 ETag, instead of the volume server crc ETag // use filer calculated md5 ETag, instead of the volume server crc ETag
ret.ETag = fmt.Sprintf("%x", hash.Sum(nil))
if r.Method == "PUT" {
md5value = md5Hash.Sum(nil)
}
ret.ETag = getEtag(resp)
return return
} }
@ -318,7 +330,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
} }
func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string) {
func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string, fsync bool) {
// default // default
collection = fs.option.Collection collection = fs.option.Collection
replication = fs.option.DefaultReplication replication = fs.option.DefaultReplication
@ -341,7 +353,7 @@ func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication st
if t > 0 { if t > 0 {
collection = bucketAndObjectKey[:t] collection = bucketAndObjectKey[:t]
} }
replication = fs.filer.ReadBucketOption(collection)
replication, fsync = fs.filer.ReadBucketOption(collection)
} }
return return

16
weed/server/filer_server_handlers_write_autochunk.go

@ -2,7 +2,9 @@ package weed_server
import ( import (
"context" "context"
"crypto/md5"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"path" "path"
"strconv" "strconv"
@ -19,7 +21,7 @@ import (
) )
func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request,
replication string, collection string, dataCenter string, ttlSec int32, ttlString string) bool {
replication string, collection string, dataCenter string, ttlSec int32, ttlString string, fsync bool) bool {
if r.Method != "POST" { if r.Method != "POST" {
glog.V(4).Infoln("AutoChunking not supported for method", r.Method) glog.V(4).Infoln("AutoChunking not supported for method", r.Method)
return false return false
@ -55,7 +57,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
return false return false
} }
reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter, ttlSec, ttlString)
reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter, ttlSec, ttlString, fsync)
if err != nil { if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err) writeJsonError(w, r, http.StatusInternalServerError, err)
} else if reply != nil { } else if reply != nil {
@ -65,7 +67,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *
} }
func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request,
contentLength int64, chunkSize int32, replication string, collection string, dataCenter string, ttlSec int32, ttlString string) (filerResult *FilerPostResult, replyerr error) {
contentLength int64, chunkSize int32, replication string, collection string, dataCenter string, ttlSec int32, ttlString string, fsync bool) (filerResult *FilerPostResult, replyerr error) {
stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc() stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc()
start := time.Now() start := time.Now()
@ -91,13 +93,16 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
var fileChunks []*filer_pb.FileChunk var fileChunks []*filer_pb.FileChunk
md5Hash := md5.New()
var partReader = ioutil.NopCloser(io.TeeReader(part1, md5Hash))
chunkOffset := int64(0) chunkOffset := int64(0)
for chunkOffset < contentLength { for chunkOffset < contentLength {
limitedReader := io.LimitReader(part1, int64(chunkSize))
limitedReader := io.LimitReader(partReader, int64(chunkSize))
// assign one file id for one chunk // assign one file id for one chunk
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString)
fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
if assignErr != nil { if assignErr != nil {
return nil, assignErr return nil, assignErr
} }
@ -157,6 +162,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r
Collection: collection, Collection: collection,
TtlSec: ttlSec, TtlSec: ttlSec,
Mime: contentType, Mime: contentType,
Md5: md5Hash.Sum(nil),
}, },
Chunks: fileChunks, Chunks: fileChunks,
} }

4
weed/server/filer_server_handlers_write_cipher.go

@ -17,9 +17,9 @@ import (
// handling single chunk POST or PUT upload // handling single chunk POST or PUT upload
func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request,
replication string, collection string, dataCenter string, ttlSeconds int32, ttlString string) (filerResult *FilerPostResult, err error) {
replication string, collection string, dataCenter string, ttlSeconds int32, ttlString string, fsync bool) (filerResult *FilerPostResult, err error) {
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString)
fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync)
if err != nil || fileId == "" || urlLocation == "" { if err != nil || fileId == "" || urlLocation == "" {
return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)

13
weed/server/master_grpc_server.go

@ -11,6 +11,7 @@ import (
"google.golang.org/grpc/peer" "google.golang.org/grpc/peer"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
@ -190,6 +191,18 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ
peerAddress := findClientAddress(stream.Context(), req.GrpcPort) peerAddress := findClientAddress(stream.Context(), req.GrpcPort)
// only one shell can be connected at any time
if req.Name == pb.AdminShellClient {
if ms.currentAdminShellClient == "" {
ms.currentAdminShellClient = peerAddress
defer func() {
ms.currentAdminShellClient = ""
}()
} else {
return fmt.Errorf("only one concurrent shell allowed, but another shell is already connected from %s", peerAddress)
}
}
stopChan := make(chan bool) stopChan := make(chan bool)
clientName, messageChan := ms.addClient(req.Name, peerAddress) clientName, messageChan := ms.addClient(req.Name, peerAddress)

11
weed/server/master_server.go

@ -64,6 +64,8 @@ type MasterServer struct {
grpcDialOption grpc.DialOption grpcDialOption grpc.DialOption
MasterClient *wdclient.MasterClient MasterClient *wdclient.MasterClient
currentAdminShellClient string
} }
func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *MasterServer { func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *MasterServer {
@ -197,8 +199,8 @@ func (ms *MasterServer) startAdminScripts() {
v.SetDefault("master.maintenance.sleep_minutes", 17) v.SetDefault("master.maintenance.sleep_minutes", 17)
sleepMinutes := v.GetInt("master.maintenance.sleep_minutes") sleepMinutes := v.GetInt("master.maintenance.sleep_minutes")
v.SetDefault("master.filer.default_filer_url", "http://localhost:8888/")
filerURL := v.GetString("master.filer.default_filer_url")
v.SetDefault("master.filer.default", "localhost:8888")
filerHostPort := v.GetString("master.filer.default")
scriptLines := strings.Split(adminScripts, "\n") scriptLines := strings.Split(adminScripts, "\n")
@ -208,9 +210,10 @@ func (ms *MasterServer) startAdminScripts() {
shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master") shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master")
shellOptions.Masters = &masterAddress shellOptions.Masters = &masterAddress
shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, err = util.ParseFilerUrl(filerURL)
shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(filerHostPort)
shellOptions.Directory = "/"
if err != nil { if err != nil {
glog.V(0).Infof("failed to parse master.filer.default_filer_urll=%s : %v\n", filerURL, err)
glog.V(0).Infof("failed to parse master.filer.default = %s : %v\n", filerHostPort, err)
return return
} }

2
weed/server/volume_grpc_tail.go

@ -90,7 +90,7 @@ func (vs *VolumeServer) VolumeTailReceiver(ctx context.Context, req *volume_serv
defer glog.V(1).Infof("receive tailing volume %d finished", v.Id) defer glog.V(1).Infof("receive tailing volume %d finished", v.Id)
return resp, operation.TailVolumeFromSource(req.SourceVolumeServer, vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error { return resp, operation.TailVolumeFromSource(req.SourceVolumeServer, vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error {
_, err := vs.store.WriteVolumeNeedle(v.Id, n)
_, err := vs.store.WriteVolumeNeedle(v.Id, n, false)
return err return err
}) })

8
weed/server/volume_server_handlers_write.go

@ -166,3 +166,11 @@ func setEtag(w http.ResponseWriter, etag string) {
} }
} }
} }
func getEtag(resp *http.Response) (etag string) {
etag = resp.Header.Get("ETag")
if strings.HasPrefix(etag, "\"") && strings.HasSuffix(etag, "\"") {
return etag[1 : len(etag)-1]
}
return
}

13
weed/server/webdav_server.go

@ -16,8 +16,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/pb_cache"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -34,6 +34,8 @@ type WebDavOption struct {
Uid uint32 Uid uint32
Gid uint32 Gid uint32
Cipher bool Cipher bool
CacheDir string
CacheSizeMB int64
} }
type WebDavServer struct { type WebDavServer struct {
@ -67,7 +69,7 @@ type WebDavFileSystem struct {
secret security.SigningKey secret security.SigningKey
filer *filer2.Filer filer *filer2.Filer
grpcDialOption grpc.DialOption grpcDialOption grpc.DialOption
chunkCache *pb_cache.ChunkCache
chunkCache *chunk_cache.ChunkCache
} }
type FileInfo struct { type FileInfo struct {
@ -96,9 +98,14 @@ type WebDavFile struct {
} }
func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) {
chunkCache := chunk_cache.NewChunkCache(256, option.CacheDir, option.CacheSizeMB, 4)
util.OnInterrupt(func() {
chunkCache.Shutdown()
})
return &WebDavFileSystem{ return &WebDavFileSystem{
option: option, option: option,
chunkCache: pb_cache.NewChunkCache(1000),
chunkCache: chunkCache,
}, nil }, nil
} }

2
weed/shell/commands.go

@ -43,7 +43,7 @@ var (
func NewCommandEnv(options ShellOptions) *CommandEnv { func NewCommandEnv(options ShellOptions) *CommandEnv {
return &CommandEnv{ return &CommandEnv{
env: make(map[string]string), env: make(map[string]string),
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, "shell", 0, strings.Split(*options.Masters, ",")),
MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, pb.AdminShellClient, 0, strings.Split(*options.Masters, ",")),
option: options, option: options,
} }
} }

4
weed/storage/backend/memory_map/memory_map_backend.go

@ -3,12 +3,10 @@ package memory_map
import ( import (
"os" "os"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
) )
var ( var (
_ backend.BackendStorageFile = &MemoryMappedFile{}
// _ backend.BackendStorageFile = &MemoryMappedFile{} // remove this to break import cycle
) )
type MemoryMappedFile struct { type MemoryMappedFile struct {

7
weed/storage/volume_create.go → weed/storage/backend/volume_create.go

@ -1,15 +1,14 @@
// +build !linux,!windows // +build !linux,!windows
package storage
package backend
import ( import (
"os" "os"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
) )
func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) {
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if e != nil { if e != nil {
return nil, e return nil, e
@ -17,5 +16,5 @@ func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
if preallocate > 0 { if preallocate > 0 {
glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
} }
return backend.NewDiskFile(file), nil
return NewDiskFile(file), nil
} }

7
weed/storage/volume_create_linux.go → weed/storage/backend/volume_create_linux.go

@ -1,16 +1,15 @@
// +build linux // +build linux
package storage
package backend
import ( import (
"os" "os"
"syscall" "syscall"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
) )
func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) {
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) file, e := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if e != nil { if e != nil {
return nil, e return nil, e
@ -19,5 +18,5 @@ func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
syscall.Fallocate(int(file.Fd()), 1, 0, preallocate) syscall.Fallocate(int(file.Fd()), 1, 0, preallocate)
glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName) glog.V(0).Infof("Preallocated %d bytes disk space for %s", preallocate, fileName)
} }
return backend.NewDiskFile(file), nil
return NewDiskFile(file), nil
} }

7
weed/storage/volume_create_windows.go → weed/storage/backend/volume_create_windows.go

@ -1,17 +1,16 @@
// +build windows // +build windows
package storage
package backend
import ( import (
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map" "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map"
"golang.org/x/sys/windows" "golang.org/x/sys/windows"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map/os_overloads" "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map/os_overloads"
) )
func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (backend.BackendStorageFile, error) {
func CreateVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32) (BackendStorageFile, error) {
if preallocate > 0 { if preallocate > 0 {
glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName) glog.V(0).Infof("Preallocated disk space for %s is not supported", fileName)
} }
@ -27,7 +26,7 @@ func createVolumeFile(fileName string, preallocate int64, memoryMapSizeMB uint32
if e != nil { if e != nil {
return nil, e return nil, e
} }
return backend.NewDiskFile(file), nil
return NewDiskFile(file), nil
} }
} }

6
weed/storage/store.go

@ -252,7 +252,7 @@ func (s *Store) Close() {
} }
} }
func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (isUnchanged bool, err error) {
func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle, fsync bool) (isUnchanged bool, err error) {
if v := s.findVolume(i); v != nil { if v := s.findVolume(i); v != nil {
if v.IsReadOnly() { if v.IsReadOnly() {
err = fmt.Errorf("volume %d is read only", i) err = fmt.Errorf("volume %d is read only", i)
@ -260,7 +260,7 @@ func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (isUnchan
} }
// using len(n.Data) here instead of n.Size before n.Size is populated in v.writeNeedle(n) // using len(n.Data) here instead of n.Size before n.Size is populated in v.writeNeedle(n)
if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(uint32(len(n.Data)), v.Version())) { if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(uint32(len(n.Data)), v.Version())) {
_, _, isUnchanged, err = v.writeNeedle(n)
_, _, isUnchanged, err = v.writeNeedle(n, fsync)
} else { } else {
err = fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize()) err = fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize())
} }
@ -421,7 +421,7 @@ func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) {
maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1 maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1
} }
diskLocation.MaxVolumeCount = maxVolumeCount diskLocation.MaxVolumeCount = maxVolumeCount
glog.V(0).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%d/MB",
glog.V(0).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%dMB",
diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024) diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024)
hasChanges = true hasChanges = true
} }

2
weed/storage/volume_loading.go

@ -54,7 +54,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind
v.DataBackend = backend.NewDiskFile(dataFile) v.DataBackend = backend.NewDiskFile(dataFile)
} else { } else {
if createDatIfMissing { if createDatIfMissing {
v.DataBackend, err = createVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb)
v.DataBackend, err = backend.CreateVolumeFile(fileName+".dat", preallocate, v.MemoryMapMaxSizeMb)
} else { } else {
return fmt.Errorf("Volume Data file %s.dat does not exist.", fileName) return fmt.Errorf("Volume Data file %s.dat does not exist.", fileName)
} }

7
weed/storage/volume_read_write.go

@ -63,7 +63,7 @@ func (v *Volume) Destroy() (err error) {
return return
} }
func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) {
func (v *Volume) writeNeedle(n *needle.Needle, fsync bool) (offset uint64, size uint32, isUnchanged bool, err error) {
// glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String())
v.dataFileAccessLock.Lock() v.dataFileAccessLock.Lock()
defer v.dataFileAccessLock.Unlock() defer v.dataFileAccessLock.Unlock()
@ -98,6 +98,11 @@ func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUn
if offset, size, _, err = n.Append(v.DataBackend, v.Version()); err != nil { if offset, size, _, err = n.Append(v.DataBackend, v.Version()); err != nil {
return return
} }
if fsync {
if err = v.DataBackend.Sync(); err != nil {
return
}
}
v.lastAppendAtNs = n.AppendAtNs v.lastAppendAtNs = n.AppendAtNs
// add to needle map // add to needle map

4
weed/storage/volume_vacuum.go

@ -354,7 +354,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca
var ( var (
dst backend.BackendStorageFile dst backend.BackendStorageFile
) )
if dst, err = createVolumeFile(dstName, preallocate, 0); err != nil {
if dst, err = backend.CreateVolumeFile(dstName, preallocate, 0); err != nil {
return return
} }
defer dst.Close() defer dst.Close()
@ -383,7 +383,7 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str
srcDatBackend, dstDatBackend backend.BackendStorageFile srcDatBackend, dstDatBackend backend.BackendStorageFile
dataFile *os.File dataFile *os.File
) )
if dstDatBackend, err = createVolumeFile(dstDatName, preallocate, 0); err != nil {
if dstDatBackend, err = backend.CreateVolumeFile(dstDatName, preallocate, 0); err != nil {
return return
} }
defer dstDatBackend.Close() defer dstDatBackend.Close()

2
weed/storage/volume_vacuum_test.go

@ -129,7 +129,7 @@ func TestCompaction(t *testing.T) {
} }
func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) { func doSomeWritesDeletes(i int, v *Volume, t *testing.T, infos []*needleInfo) {
n := newRandomNeedle(uint64(i)) n := newRandomNeedle(uint64(i))
_, size, _, err := v.writeNeedle(n)
_, size, _, err := v.writeNeedle(n, false)
if err != nil { if err != nil {
t.Fatalf("write file %d: %v", i, err) t.Fatalf("write file %d: %v", i, err)
} }

10
weed/topology/store_replicate.go

@ -22,8 +22,10 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.Volume
//check JWT //check JWT
jwt := security.GetJwt(r) jwt := security.GetJwt(r)
// check whether this is a replicated write request
var remoteLocations []operation.Location var remoteLocations []operation.Location
if r.FormValue("type") != "replicate" { if r.FormValue("type") != "replicate" {
// this is the initial request
remoteLocations, err = getWritableRemoteReplications(s, volumeId, masterNode) remoteLocations, err = getWritableRemoteReplications(s, volumeId, masterNode)
if err != nil { if err != nil {
glog.V(0).Infoln(err) glog.V(0).Infoln(err)
@ -31,8 +33,14 @@ func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.Volume
} }
} }
// read fsync value
fsync := false
if r.FormValue("fsync") == "true" {
fsync = true
}
if s.GetVolume(volumeId) != nil { if s.GetVolume(volumeId) != nil {
isUnchanged, err = s.WriteVolumeNeedle(volumeId, n)
isUnchanged, err = s.WriteVolumeNeedle(volumeId, n, fsync)
if err != nil { if err != nil {
err = fmt.Errorf("failed to write to local disk: %v", err) err = fmt.Errorf("failed to write to local disk: %v", err)
glog.V(0).Infoln(err) glog.V(0).Infoln(err)

7
weed/util/bytes.go

@ -2,6 +2,7 @@ package util
import ( import (
"crypto/md5" "crypto/md5"
"fmt"
"io" "io"
) )
@ -91,3 +92,9 @@ func HashToInt32(data []byte) (v int32) {
return return
} }
func Md5(data []byte) string {
hash := md5.New()
hash.Write(data)
return fmt.Sprintf("%x", hash.Sum(nil))
}

134
weed/util/chunk_cache/chunk_cache.go

@ -0,0 +1,134 @@
package chunk_cache
import (
"fmt"
"path"
"sort"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
)
// a global cache for recently accessed file chunks
type ChunkCache struct {
memCache *ChunkCacheInMemory
diskCaches []*ChunkCacheVolume
sync.RWMutex
}
func NewChunkCache(maxEntries int64, dir string, diskSizeMB int64, segmentCount int) *ChunkCache {
c := &ChunkCache{
memCache: NewChunkCacheInMemory(maxEntries),
}
volumeCount, volumeSize := int(diskSizeMB/30000), int64(30000)
if volumeCount < segmentCount {
volumeCount, volumeSize = segmentCount, diskSizeMB/int64(segmentCount)
}
for i := 0; i < volumeCount; i++ {
fileName := path.Join(dir, fmt.Sprintf("cache_%d", i))
diskCache, err := LoadOrCreateChunkCacheVolume(fileName, volumeSize*1024*1024)
if err != nil {
glog.Errorf("failed to add cache %s : %v", fileName, err)
} else {
c.diskCaches = append(c.diskCaches, diskCache)
}
}
// keep newest cache to the front
sort.Slice(c.diskCaches, func(i, j int) bool {
return c.diskCaches[i].lastModTime.After(c.diskCaches[j].lastModTime)
})
return c
}
func (c *ChunkCache) GetChunk(fileId string) (data []byte) {
if c == nil {
return
}
c.RLock()
defer c.RUnlock()
return c.doGetChunk(fileId)
}
func (c *ChunkCache) doGetChunk(fileId string) (data []byte) {
if data = c.memCache.GetChunk(fileId); data != nil {
return data
}
fid, err := needle.ParseFileIdFromString(fileId)
if err != nil {
glog.Errorf("failed to parse file id %s", fileId)
return nil
}
for _, diskCache := range c.diskCaches {
data, err = diskCache.GetNeedle(fid.Key)
if err == storage.ErrorNotFound {
continue
}
if err != nil {
glog.Errorf("failed to read cache file %s id %s", diskCache.fileName, fileId)
continue
}
if len(data) != 0 {
return
}
}
return nil
}
func (c *ChunkCache) SetChunk(fileId string, data []byte) {
if c == nil {
return
}
c.Lock()
defer c.Unlock()
c.doSetChunk(fileId, data)
}
func (c *ChunkCache) doSetChunk(fileId string, data []byte) {
c.memCache.SetChunk(fileId, data)
if len(c.diskCaches) == 0 {
return
}
if c.diskCaches[0].fileSize+int64(len(data)) > c.diskCaches[0].sizeLimit {
t, resetErr := c.diskCaches[len(c.diskCaches)-1].Reset()
if resetErr != nil {
glog.Errorf("failed to reset cache file %s", c.diskCaches[len(c.diskCaches)-1].fileName)
return
}
for i := len(c.diskCaches) - 1; i > 0; i-- {
c.diskCaches[i] = c.diskCaches[i-1]
}
c.diskCaches[0] = t
}
fid, err := needle.ParseFileIdFromString(fileId)
if err != nil {
glog.Errorf("failed to parse file id %s", fileId)
return
}
c.diskCaches[0].WriteNeedle(fid.Key, data)
}
func (c *ChunkCache) Shutdown() {
if c == nil {
return
}
c.Lock()
defer c.Unlock()
for _, diskCache := range c.diskCaches {
diskCache.Shutdown()
}
}

12
weed/pb/pb_cache/chunk_cache.go → weed/util/chunk_cache/chunk_cache_in_memory.go

@ -1,4 +1,4 @@
package pb_cache
package chunk_cache
import ( import (
"time" "time"
@ -7,21 +7,21 @@ import (
) )
// a global cache for recently accessed file chunks // a global cache for recently accessed file chunks
type ChunkCache struct {
type ChunkCacheInMemory struct {
cache *ccache.Cache cache *ccache.Cache
} }
func NewChunkCache(maxEntries int64) *ChunkCache {
func NewChunkCacheInMemory(maxEntries int64) *ChunkCacheInMemory {
pruneCount := maxEntries >> 3 pruneCount := maxEntries >> 3
if pruneCount <= 0 { if pruneCount <= 0 {
pruneCount = 500 pruneCount = 500
} }
return &ChunkCache{
return &ChunkCacheInMemory{
cache: ccache.New(ccache.Configure().MaxSize(maxEntries).ItemsToPrune(uint32(pruneCount))), cache: ccache.New(ccache.Configure().MaxSize(maxEntries).ItemsToPrune(uint32(pruneCount))),
} }
} }
func (c *ChunkCache) GetChunk(fileId string) []byte {
func (c *ChunkCacheInMemory) GetChunk(fileId string) []byte {
item := c.cache.Get(fileId) item := c.cache.Get(fileId)
if item == nil { if item == nil {
return nil return nil
@ -31,6 +31,6 @@ func (c *ChunkCache) GetChunk(fileId string) []byte {
return data return data
} }
func (c *ChunkCache) SetChunk(fileId string, data []byte) {
func (c *ChunkCacheInMemory) SetChunk(fileId string, data []byte) {
c.cache.Set(fileId, data, time.Hour) c.cache.Set(fileId, data, time.Hour)
} }

145
weed/util/chunk_cache/chunk_cache_on_disk.go

@ -0,0 +1,145 @@
package chunk_cache
import (
"fmt"
"os"
"time"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"github.com/chrislusf/seaweedfs/weed/util"
)
// This implements an on disk cache
// The entries are an FIFO with a size limit
type ChunkCacheVolume struct {
DataBackend backend.BackendStorageFile
nm storage.NeedleMapper
fileName string
smallBuffer []byte
sizeLimit int64
lastModTime time.Time
fileSize int64
}
func LoadOrCreateChunkCacheVolume(fileName string, preallocate int64) (*ChunkCacheVolume, error) {
v := &ChunkCacheVolume{
smallBuffer: make([]byte, types.NeedlePaddingSize),
fileName: fileName,
sizeLimit: preallocate,
}
var err error
if exists, canRead, canWrite, modTime, fileSize := util.CheckFile(v.fileName + ".dat"); exists {
if !canRead {
return nil, fmt.Errorf("cannot read cache file %s.dat", v.fileName)
}
if !canWrite {
return nil, fmt.Errorf("cannot write cache file %s.dat", v.fileName)
}
if dataFile, err := os.OpenFile(v.fileName+".dat", os.O_RDWR|os.O_CREATE, 0644); err != nil {
return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
} else {
v.DataBackend = backend.NewDiskFile(dataFile)
v.lastModTime = modTime
v.fileSize = fileSize
}
} else {
if v.DataBackend, err = backend.CreateVolumeFile(v.fileName+".dat", preallocate, 0); err != nil {
return nil, fmt.Errorf("cannot create cache file %s.dat: %v", v.fileName, err)
}
v.lastModTime = time.Now()
}
var indexFile *os.File
if indexFile, err = os.OpenFile(v.fileName+".idx", os.O_RDWR|os.O_CREATE, 0644); err != nil {
return nil, fmt.Errorf("cannot write cache index %s.idx: %v", v.fileName, err)
}
glog.V(0).Infoln("loading leveldb", v.fileName+".ldb")
opts := &opt.Options{
BlockCacheCapacity: 2 * 1024 * 1024, // default value is 8MiB
WriteBuffer: 1 * 1024 * 1024, // default value is 4MiB
CompactionTableSizeMultiplier: 10, // default value is 1
}
if v.nm, err = storage.NewLevelDbNeedleMap(v.fileName+".ldb", indexFile, opts); err != nil {
return nil, fmt.Errorf("loading leveldb %s error: %v", v.fileName+".ldb", err)
}
return v, nil
}
func (v *ChunkCacheVolume) Shutdown() {
if v.DataBackend != nil {
v.DataBackend.Close()
v.DataBackend = nil
}
if v.nm != nil {
v.nm.Close()
v.nm = nil
}
}
func (v *ChunkCacheVolume) destroy() {
v.Shutdown()
os.Remove(v.fileName + ".dat")
os.Remove(v.fileName + ".idx")
os.RemoveAll(v.fileName + ".ldb")
}
func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) {
v.destroy()
return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit)
}
func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
nv, ok := v.nm.Get(key)
if !ok {
return nil, storage.ErrorNotFound
}
data := make([]byte, nv.Size)
if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToAcutalOffset()); readErr != nil {
return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
v.fileName, nv.Offset.ToAcutalOffset(), nv.Offset.ToAcutalOffset()+int64(nv.Size), readErr)
} else {
if readSize != int(nv.Size) {
return nil, fmt.Errorf("read %d, expected %d", readSize, nv.Size)
}
}
return data, nil
}
func (v *ChunkCacheVolume) WriteNeedle(key types.NeedleId, data []byte) error {
offset := v.fileSize
written, err := v.DataBackend.WriteAt(data, offset)
if err != nil {
return err
} else if written != len(data) {
return fmt.Errorf("partial written %d, expected %d", written, len(data))
}
v.fileSize += int64(written)
extraSize := written % types.NeedlePaddingSize
if extraSize != 0 {
v.DataBackend.WriteAt(v.smallBuffer[:types.NeedlePaddingSize-extraSize], offset+int64(written))
v.fileSize += int64(types.NeedlePaddingSize - extraSize)
}
if err := v.nm.Put(key, types.ToOffset(offset), uint32(len(data))); err != nil {
glog.V(4).Infof("failed to save in needle map %d: %v", key, err)
}
return nil
}

58
weed/util/chunk_cache/chunk_cache_on_disk_test.go

@ -0,0 +1,58 @@
package chunk_cache
import (
"bytes"
"fmt"
"io/ioutil"
"math/rand"
"os"
"testing"
)
func TestOnDisk(t *testing.T) {
tmpDir, _ := ioutil.TempDir("", "c")
defer os.RemoveAll(tmpDir)
totalDiskSizeMb := int64(6)
segmentCount := 2
cache := NewChunkCache(0, tmpDir, totalDiskSizeMb, segmentCount)
writeCount := 5
type test_data struct {
data []byte
fileId string
}
testData := make([]*test_data, writeCount)
for i := 0; i < writeCount; i++ {
buff := make([]byte, 1024*1024)
rand.Read(buff)
testData[i] = &test_data{
data: buff,
fileId: fmt.Sprintf("1,%daabbccdd", i+1),
}
cache.SetChunk(testData[i].fileId, testData[i].data)
}
for i := 0; i < writeCount; i++ {
data := cache.GetChunk(testData[i].fileId)
if bytes.Compare(data, testData[i].data) != 0 {
t.Errorf("failed to write to and read from cache: %d", i)
}
}
cache.Shutdown()
cache = NewChunkCache(0, tmpDir, totalDiskSizeMb, segmentCount)
for i := 0; i < writeCount; i++ {
data := cache.GetChunk(testData[i].fileId)
if bytes.Compare(data, testData[i].data) != 0 {
t.Errorf("failed to write to and read from cache: %d", i)
}
}
cache.Shutdown()
}

2
weed/util/constants.go

@ -5,5 +5,5 @@ import (
) )
var ( var (
VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 71)
VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 73)
) )

52
weed/queue/log_buffer.go → weed/util/log_buffer/log_buffer.go

@ -1,4 +1,4 @@
package queue
package log_buffer
import ( import (
"sync" "sync"
@ -11,6 +11,9 @@ import (
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
const BufferSize = 4 * 1024 * 1024
const PreviousBufferCount = 3
type dataToFlush struct { type dataToFlush struct {
startTime time.Time startTime time.Time
stopTime time.Time stopTime time.Time
@ -18,6 +21,7 @@ type dataToFlush struct {
} }
type LogBuffer struct { type LogBuffer struct {
prevBuffers *SealedBuffers
buf []byte buf []byte
idx []int idx []int
pos int pos int
@ -34,7 +38,8 @@ type LogBuffer struct {
func NewLogBuffer(flushInterval time.Duration, flushFn func(startTime, stopTime time.Time, buf []byte), notifyFn func()) *LogBuffer { func NewLogBuffer(flushInterval time.Duration, flushFn func(startTime, stopTime time.Time, buf []byte), notifyFn func()) *LogBuffer {
lb := &LogBuffer{ lb := &LogBuffer{
buf: make([]byte, 4*1024*1024),
prevBuffers: newSealedBuffers(PreviousBufferCount),
buf: make([]byte, BufferSize),
sizeBuf: make([]byte, 4), sizeBuf: make([]byte, 4),
flushInterval: flushInterval, flushInterval: flushInterval,
flushFn: flushFn, flushFn: flushFn,
@ -46,8 +51,18 @@ func NewLogBuffer(flushInterval time.Duration, flushFn func(startTime, stopTime
return lb return lb
} }
func (m *LogBuffer) AddToBuffer(ts time.Time, key, data []byte) {
func (m *LogBuffer) AddToBuffer(key, data []byte) {
m.Lock()
defer func() {
m.Unlock()
if m.notifyFn != nil {
m.notifyFn()
}
}()
// need to put the timestamp inside the lock
ts := time.Now()
logEntry := &filer_pb.LogEntry{ logEntry := &filer_pb.LogEntry{
TsNs: ts.UnixNano(), TsNs: ts.UnixNano(),
PartitionKeyHash: util.HashToInt32(key), PartitionKeyHash: util.HashToInt32(key),
@ -58,14 +73,6 @@ func (m *LogBuffer) AddToBuffer(ts time.Time, key, data []byte) {
size := len(logEntryData) size := len(logEntryData)
m.Lock()
defer func() {
m.Unlock()
if m.notifyFn != nil {
m.notifyFn()
}
}()
if m.pos == 0 { if m.pos == 0 {
m.startTime = ts m.startTime = ts
} }
@ -125,6 +132,7 @@ func (m *LogBuffer) copyToFlush() *dataToFlush {
stopTime: m.stopTime, stopTime: m.stopTime,
data: copiedBytes(m.buf[:m.pos]), data: copiedBytes(m.buf[:m.pos]),
} }
m.buf = m.prevBuffers.SealBuffer(m.startTime, m.stopTime, m.buf)
m.pos = 0 m.pos = 0
m.idx = m.idx[:0] m.idx = m.idx[:0]
return d return d
@ -153,18 +161,18 @@ func (m *LogBuffer) ReadFromBuffer(lastReadTime time.Time) (ts time.Time, buffer
l, h := 0, len(m.idx)-1 l, h := 0, len(m.idx)-1
/* /*
for i, pos := range m.idx {
logEntry, ts := readTs(m.buf, pos)
event := &filer_pb.FullEventNotification{}
proto.Unmarshal(logEntry.Data, event)
entry := event.EventNotification.OldEntry
if entry == nil {
entry = event.EventNotification.NewEntry
for i, pos := range m.idx {
logEntry, ts := readTs(m.buf, pos)
event := &filer_pb.SubscribeMetadataResponse{}
proto.Unmarshal(logEntry.Data, event)
entry := event.EventNotification.OldEntry
if entry == nil {
entry = event.EventNotification.NewEntry
}
fmt.Printf("entry %d ts: %v offset:%d dir:%s name:%s\n", i, time.Unix(0, ts), pos, event.Directory, entry.Name)
} }
fmt.Printf("entry %d ts: %v offset:%d dir:%s name:%s\n", i, time.Unix(0, ts), pos, event.Directory, entry.Name)
}
fmt.Printf("l=%d, h=%d\n", l, h)
*/
fmt.Printf("l=%d, h=%d\n", l, h)
*/
for l <= h { for l <= h {
mid := (l + h) / 2 mid := (l + h) / 2

40
weed/util/log_buffer/sealed_buffer.go

@ -0,0 +1,40 @@
package log_buffer
import "time"
type MemBuffer struct {
buf []byte
startTime time.Time
stopTime time.Time
}
type SealedBuffers struct {
buffers []*MemBuffer
}
func newSealedBuffers(size int) *SealedBuffers {
sbs := &SealedBuffers{}
sbs.buffers = make([]*MemBuffer, size)
for i := 0; i < size; i++ {
sbs.buffers[i] = &MemBuffer{
buf: make([]byte, BufferSize),
}
}
return sbs
}
func (sbs *SealedBuffers) SealBuffer(startTime, stopTime time.Time, buf []byte) (newBuf []byte) {
oldMemBuffer := sbs.buffers[0]
size := len(sbs.buffers)
for i := 0; i < size-1; i++ {
sbs.buffers[i].buf = sbs.buffers[i+1].buf
sbs.buffers[i].startTime = sbs.buffers[i+1].startTime
sbs.buffers[i].stopTime = sbs.buffers[i+1].stopTime
}
sbs.buffers[size-1].buf = buf
sbs.buffers[size-1].startTime = startTime
sbs.buffers[size-1].stopTime = stopTime
return oldMemBuffer.buf
}

25
weed/util/network.go

@ -0,0 +1,25 @@
package util
import (
"net"
"github.com/chrislusf/seaweedfs/weed/glog"
)
func DetectedHostAddress() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
glog.V(0).Infof("failed to detect ip address: %v", err)
return ""
}
for _, a := range addrs {
if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
return "localhost"
}

16
weed/util/parse.go

@ -1,6 +1,7 @@
package util package util
import ( import (
"fmt"
"net/url" "net/url"
"strconv" "strconv"
"strings" "strings"
@ -45,3 +46,18 @@ func ParseFilerUrl(entryPath string) (filerServer string, filerPort int64, path
path = u.Path path = u.Path
return return
} }
func ParseHostPort(hostPort string) (filerServer string, filerPort int64, err error) {
parts := strings.Split(hostPort, ":")
if len(parts) != 2 {
err = fmt.Errorf("failed to parse %s\n", hostPort)
return
}
filerPort, err = strconv.ParseInt(parts[1], 10, 64)
if err == nil {
filerServer = parts[0]
}
return
}

26
weed/wdclient/masterclient.go

@ -13,7 +13,7 @@ import (
) )
type MasterClient struct { type MasterClient struct {
name string
clientType string
grpcPort uint32 grpcPort uint32
currentMaster string currentMaster string
masters []string masters []string
@ -22,9 +22,9 @@ type MasterClient struct {
vidMap vidMap
} }
func NewMasterClient(grpcDialOption grpc.DialOption, clientName string, clientGrpcPort uint32, masters []string) *MasterClient {
func NewMasterClient(grpcDialOption grpc.DialOption, clientType string, clientGrpcPort uint32, masters []string) *MasterClient {
return &MasterClient{ return &MasterClient{
name: clientName,
clientType: clientType,
grpcPort: clientGrpcPort, grpcPort: clientGrpcPort,
masters: masters, masters: masters,
grpcDialOption: grpcDialOption, grpcDialOption: grpcDialOption,
@ -43,7 +43,7 @@ func (mc *MasterClient) WaitUntilConnected() {
} }
func (mc *MasterClient) KeepConnectedToMaster() { func (mc *MasterClient) KeepConnectedToMaster() {
glog.V(1).Infof("%s bootstraps with masters %v", mc.name, mc.masters)
glog.V(1).Infof("%s bootstraps with masters %v", mc.clientType, mc.masters)
for { for {
mc.tryAllMasters() mc.tryAllMasters()
time.Sleep(time.Second) time.Sleep(time.Second)
@ -65,27 +65,27 @@ func (mc *MasterClient) tryAllMasters() {
} }
func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader string) { func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader string) {
glog.V(1).Infof("%s Connecting to master %v", mc.name, master)
glog.V(1).Infof("%s Connecting to master %v", mc.clientType, master)
gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
stream, err := client.KeepConnected(context.Background()) stream, err := client.KeepConnected(context.Background())
if err != nil { if err != nil {
glog.V(0).Infof("%s failed to keep connected to %s: %v", mc.name, master, err)
glog.V(0).Infof("%s failed to keep connected to %s: %v", mc.clientType, master, err)
return err return err
} }
if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.name, GrpcPort: mc.grpcPort}); err != nil {
glog.V(0).Infof("%s failed to send to %s: %v", mc.name, master, err)
if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.clientType, GrpcPort: mc.grpcPort}); err != nil {
glog.V(0).Infof("%s failed to send to %s: %v", mc.clientType, master, err)
return err return err
} }
glog.V(1).Infof("%s Connected to %v", mc.name, master)
glog.V(1).Infof("%s Connected to %v", mc.clientType, master)
mc.currentMaster = master mc.currentMaster = master
for { for {
volumeLocation, err := stream.Recv() volumeLocation, err := stream.Recv()
if err != nil { if err != nil {
glog.V(0).Infof("%s failed to receive from %s: %v", mc.name, master, err)
glog.V(0).Infof("%s failed to receive from %s: %v", mc.clientType, master, err)
return err return err
} }
@ -102,18 +102,18 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri
PublicUrl: volumeLocation.PublicUrl, PublicUrl: volumeLocation.PublicUrl,
} }
for _, newVid := range volumeLocation.NewVids { for _, newVid := range volumeLocation.NewVids {
glog.V(1).Infof("%s: %s adds volume %d", mc.name, loc.Url, newVid)
glog.V(1).Infof("%s: %s adds volume %d", mc.clientType, loc.Url, newVid)
mc.addLocation(newVid, loc) mc.addLocation(newVid, loc)
} }
for _, deletedVid := range volumeLocation.DeletedVids { for _, deletedVid := range volumeLocation.DeletedVids {
glog.V(1).Infof("%s: %s removes volume %d", mc.name, loc.Url, deletedVid)
glog.V(1).Infof("%s: %s removes volume %d", mc.clientType, loc.Url, deletedVid)
mc.deleteLocation(deletedVid, loc) mc.deleteLocation(deletedVid, loc)
} }
} }
}) })
if gprcErr != nil { if gprcErr != nil {
glog.V(0).Infof("%s failed to connect with master %v: %v", mc.name, master, gprcErr)
glog.V(0).Infof("%s failed to connect with master %v: %v", mc.clientType, master, gprcErr)
} }
return return
} }

Loading…
Cancel
Save