From 12797ec3adfd883f729b492e5c633ee5f97ed616 Mon Sep 17 00:00:00 2001 From: Alan Pope Date: Tue, 30 Oct 2018 12:21:27 +0000 Subject: [PATCH 0001/2432] Create snapcraft.yaml --- snap/snapcraft.yaml | 51 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 snap/snapcraft.yaml diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml new file mode 100644 index 000000000..6198cfb3c --- /dev/null +++ b/snap/snapcraft.yaml @@ -0,0 +1,51 @@ +# Name of snap as registered in the store +name: seaweedfs +# Automatically derive snap version from git tags +version: git +# Short human readable name as seen in 'snap find $SNAPNAME' +summary: SeaweedFS +# Longer multi-line description found in 'snap info $SNAPNAME' +description: | + SeaweedFS is a simple and highly scalable distributed file system. + There are two objectives: to store billions of files! to serve the + files fast! SeaweedFS implements an object store with O(1) disk seek, + and an optional Filer with POSIX interface. + +# Grade is stable for snaps expected to land in the stable channel +grade: stable +# Uses the strict confinement model and uses interfaces to open up access to +# resources on the target host +confinement: strict + +# List of parts which comprise the snap +parts: + # The main part which defines how to build the application in the snap + seaweedfs: + # This part needs a newer version of golang, so we use a separate part + # which defines how to get a newer golang during the build + after: [go] + # The go plugin knows how to build go applications into a snap + plugin: go + # Snapcraft will look in this location for the source of the application + source: . + go-importpath: github.com/chrislusf/seaweedfs + go: + # Defines the version of golang which will be bootstrapped into the snap + source-tag: go1.10.4 + +# Apps exposes the binaries inside the snap to the host system once installed +apps: + # We expose the weed command. + # This differs from the snap name, so it will be namespaced as seaweedfs.weed + # An alias can be added to expose this as 'weed' if requested in the snapcraft forum + weed: + # The path to the binary inside the snap, relative to the $SNAP home + command: bin/weed + # Plugs connect the snap to resources on the host system. We enable network connectivity + # We also add home and removable-media (latter not autoconnected by default) + # so users can access files in their home or on removable disks + plugs: + - network + - network-bind + - home + - removable-media From 86d4b18eb5e7ebc813f2e11d6826a9ace384cd61 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 31 Dec 2019 11:52:54 -0800 Subject: [PATCH 0002/2432] filer: optional recursive deletion fix https://github.com/chrislusf/seaweedfs/issues/1176 --- weed/command/scaffold.go | 17 +++++++++++++---- weed/server/filer_server.go | 8 ++++++-- weed/server/filer_server_handlers_write.go | 5 +++++ 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index a76466ed6..8519598b3 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -59,16 +59,25 @@ const ( # $HOME/.seaweedfs/filer.toml # /etc/seaweedfs/filer.toml +#################################################### +# Customizable filer server options +#################################################### +[filer.options] +# with http DELETE, by default the filer would check whether a folder is empty. +# recursive_delete will delete all sub folders and files, similar to "rm -Rf" +recursive_delete = false + + +#################################################### +# The following are filer store options +#################################################### + [leveldb2] # local on disk, mostly for simple single-machine setup, fairly scalable # faster than previous leveldb, recommended. enabled = true dir = "." # directory to store level db files -#################################################### -# multiple filers on shared storage, fairly scalable -#################################################### - [mysql] # or tidb # CREATE TABLE IF NOT EXISTS filemeta ( # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 41ba81366..c703b8c6f 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -7,11 +7,14 @@ import ( "os" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" + + "github.com/spf13/viper" "github.com/chrislusf/seaweedfs/weed/filer2" _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" @@ -30,7 +33,6 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" _ "github.com/chrislusf/seaweedfs/weed/notification/log" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" ) type FilerOption struct { @@ -45,6 +47,7 @@ type FilerOption struct { DefaultLevelDbDir string DisableHttp bool Port int + recursiveDelete bool } type FilerServer struct { @@ -80,6 +83,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) } util.LoadConfiguration("notification", false) + fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") fs.filer.LoadConfiguration(v) notification.LoadConfiguration(v.Sub("notification")) diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 236e7027d..a7dcc39a0 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -284,6 +284,11 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { isRecursive := r.FormValue("recursive") == "true" + if !isRecursive && fs.option.recursiveDelete { + if r.FormValue("recursive") != "false" { + isRecursive = true + } + } ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true" skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true" From aa1807e0825999e83e12633357c7272f22a2495f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 09:27:02 -0800 Subject: [PATCH 0003/2432] 1.48 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 0916850ef..fc62c6950 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 47) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 48) ) From e40a051d4b986f295a17ae4e4894f01cb8925bea Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 09:47:25 -0800 Subject: [PATCH 0004/2432] fix dev docker build --- docker/Dockerfile.go_build | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index 85cbb6143..572f191b9 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -1,5 +1,7 @@ FROM golang:latest RUN go get github.com/chrislusf/seaweedfs/weed +RUN rm -rf /go/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace +RUN rm -rf /go/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace # volume server gprc port EXPOSE 18080 From 2401168c13883b1ccddc45775fbf7898041139e6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 11:00:02 -0800 Subject: [PATCH 0005/2432] fix dev build --- docker/Dockerfile.go_build | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index 572f191b9..67d391fdc 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -2,6 +2,7 @@ FROM golang:latest RUN go get github.com/chrislusf/seaweedfs/weed RUN rm -rf /go/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace RUN rm -rf /go/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace +RUN go get github.com/chrislusf/seaweedfs/weed # volume server gprc port EXPOSE 18080 From 50d44480e7b77e87f676afb596e67ce3ee3fc74f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 12:00:08 -0800 Subject: [PATCH 0006/2432] Update README.md --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index dd748878b..b5f78da7c 100644 --- a/README.md +++ b/README.md @@ -81,13 +81,15 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two 1. to store billions of files! 2. to serve the files fast! -SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (just one disk read operation). +SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation). + +SeaweedFS can be tiered to the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can transparently achieve both fast local access time and elastic cloud storage capacity. There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf) -SeaweedFS can work very well with just the object store. [[Filer]] can then be added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Etcd/Cassandra/LevelDB/MemSql/TiDB/CockroachDB. +On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Etcd/Cassandra/LevelDB/MemSql/TiDB/CockroachDB/etc. [Back to TOC](#table-of-contents) From 5927f4f1b7cead64091104b144ce11532c13453e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 12:01:27 -0800 Subject: [PATCH 0007/2432] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b5f78da7c..39494325a 100644 --- a/README.md +++ b/README.md @@ -83,7 +83,7 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation). -SeaweedFS can be tiered to the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can transparently achieve both fast local access time and elastic cloud storage capacity. +SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity, without any client side changes. There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. From 8eca5037ccbee9be0f38f8de964316b19223fefe Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 12:04:51 -0800 Subject: [PATCH 0008/2432] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 39494325a..d659b11b2 100644 --- a/README.md +++ b/README.md @@ -106,8 +106,9 @@ On top of the object store, optional [Filer] can support directories and POSIX a * Adding/Removing servers does **not** cause any data re-balancing. * Optionally fix the orientation for jpeg pictures. * Support ETag, Accept-Range, Last-Modified, etc. -* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance. +* Support in-memory/leveldb/readonly mode tuning for memory/performance balance. * Support rebalancing the writable and readonly volumes. +* [Transparent cloud integration](Cloud-Tier): store warm data on cloud storage. [Back to TOC](#table-of-contents) From 92d1219bb153cf6922481f21308ce203757bae6d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 12:18:16 -0800 Subject: [PATCH 0009/2432] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d659b11b2..5c5a2d1b3 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a * Support in-memory/leveldb/readonly mode tuning for memory/performance balance. * Support rebalancing the writable and readonly volumes. * [Transparent cloud integration](Cloud-Tier): store warm data on cloud storage. +* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. [Back to TOC](#table-of-contents) @@ -116,7 +117,6 @@ On top of the object store, optional [Filer] can support directories and POSIX a * [filer server][Filer] provide "normal" directories and files via http. * [mount filer][Mount] to read and write files directly as a local directory via FUSE. * [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling. -* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. * [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs. * [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. * [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. From 88a80ab557712128934b9bb90c26d5a8e205d99d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 12:37:38 -0800 Subject: [PATCH 0010/2432] fix help message --- weed/shell/command_volume_tier_download.go | 2 +- weed/shell/command_volume_tier_upload.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/shell/command_volume_tier_download.go b/weed/shell/command_volume_tier_download.go index 88e2e8b92..4584289d7 100644 --- a/weed/shell/command_volume_tier_download.go +++ b/weed/shell/command_volume_tier_download.go @@ -26,7 +26,7 @@ func (c *commandVolumeTierDownload) Name() string { } func (c *commandVolumeTierDownload) Help() string { - return `move the dat file of a volume to a remote tier + return `download the dat file of a volume from a remote tier volume.tier.download [-collection=""] volume.tier.download [-collection=""] -volumeId= diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go index b3a0d9fe8..0a9e6165f 100644 --- a/weed/shell/command_volume_tier_upload.go +++ b/weed/shell/command_volume_tier_upload.go @@ -26,7 +26,7 @@ func (c *commandVolumeTierUpload) Name() string { } func (c *commandVolumeTierUpload) Help() string { - return `move the dat file of a volume to a remote tier + return `upload the dat file of a volume to a remote tier volume.tier.upload [-collection=""] [-fullPercent=95] [-quietFor=1h] volume.tier.upload [-collection=""] -volumeId= -dest= [-keepLocalDatFile] From 9c139e060131344445d5e90c4ed0aa7d15857b0f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 12:38:29 -0800 Subject: [PATCH 0011/2432] update help message --- weed/shell/command_volume_move.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/shell/command_volume_move.go b/weed/shell/command_volume_move.go index 08d87c988..e74b43ed4 100644 --- a/weed/shell/command_volume_move.go +++ b/weed/shell/command_volume_move.go @@ -25,7 +25,7 @@ func (c *commandVolumeMove) Name() string { } func (c *commandVolumeMove) Help() string { - return ` move a live volume from one volume server to another volume server + return `move a live volume from one volume server to another volume server volume.move From a21012fea1917274b36b225bfcc32fee366b5fd4 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 13:06:16 -0800 Subject: [PATCH 0012/2432] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5c5a2d1b3..b3b3db2b1 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,7 @@ There is only 40 bytes of disk storage overhead for each file's metadata. It is SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf) -On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Etcd/Cassandra/LevelDB/MemSql/TiDB/CockroachDB/etc. +On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc. [Back to TOC](#table-of-contents) From 70ee8f26ac58e726c5b7f4c6b6e29af9f61164e4 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 1 Jan 2020 13:47:06 -0800 Subject: [PATCH 0013/2432] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b3b3db2b1..bee90a39d 100644 --- a/README.md +++ b/README.md @@ -108,7 +108,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a * Support ETag, Accept-Range, Last-Modified, etc. * Support in-memory/leveldb/readonly mode tuning for memory/performance balance. * Support rebalancing the writable and readonly volumes. -* [Transparent cloud integration](Cloud-Tier): store warm data on cloud storage. +* [Transparent cloud integration][CloudTier]: store warm data on cloud storage. * [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. [Back to TOC](#table-of-contents) @@ -128,6 +128,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a [Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System [WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV [ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage +[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier [Back to TOC](#table-of-contents) From d82fd7c7f32a9cd094a2a65aeb4bc0cbef47ed4a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 2 Jan 2020 19:17:00 -0800 Subject: [PATCH 0014/2432] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index bee90a39d..73e42f162 100644 --- a/README.md +++ b/README.md @@ -517,6 +517,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +The text of this page is available for modification and reuse under the terms of the Creative Commons Attribution-Sharealike 3.0 Unported License and the GNU Free Documentation License (unversioned, with no invariant sections, front-cover texts, or back-cover texts). + [Back to TOC](#table-of-contents) ## Stargazers over time ## From 0ef789fd5afd6611a525347e97350b689b4ebe4f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 2 Jan 2020 20:08:57 -0800 Subject: [PATCH 0015/2432] Update README.md --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 73e42f162..8ed89b4c7 100644 --- a/README.md +++ b/README.md @@ -322,6 +322,14 @@ Each individual file size is limited to the volume size. All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. +### Extensible to the cloud ### + +The local volume servers are much faster, while cloud storage has elastic capacity and more cost-efficient (usually free to upload, but relatively costly to access). SeaweedFS can take advantage of both local and cloud storage. + +Usually hot data are fresh and warm data are old. SeaweedFS puts the newly created volumes on local servers, and optionally upload the older volumes on the cloud. If the older data are accessed less often, this literally gives you unlimited capacity with limited local servers, and still fast for new data. + +If the hot~warm data is split as 20~80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput. + [Back to TOC](#table-of-contents) ## Compared to Other File Systems ## From a3a2e69900290256646f24d9bbd2f204c188af2f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 2 Jan 2020 20:10:18 -0800 Subject: [PATCH 0016/2432] Update README.md --- README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.md b/README.md index 8ed89b4c7..5c6300635 100644 --- a/README.md +++ b/README.md @@ -93,10 +93,6 @@ On top of the object store, optional [Filer] can support directories and POSIX a [Back to TOC](#table-of-contents) -## Features ## - -[Back to TOC](#table-of-contents) - ## Additional Features ## * Can choose no replication or different replication levels, rack and data center aware. * Automatic master servers failover - no single point of failure (SPOF). From 3eafec4b29b9a5090eb6dd5f0ddee6ee1f713792 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 3 Jan 2020 00:37:24 -0800 Subject: [PATCH 0017/2432] volume: add option to limit file size --- weed/command/server.go | 1 + weed/command/volume.go | 11 ++++++--- weed/server/common.go | 2 +- weed/server/volume_server.go | 9 ++++++-- weed/server/volume_server_handlers_write.go | 2 +- weed/storage/needle/needle.go | 18 +++++++++------ weed/storage/needle/needle_parse_multipart.go | 23 ++++++++++++++----- 7 files changed, 46 insertions(+), 20 deletions(-) diff --git a/weed/command/server.go b/weed/command/server.go index 87f404ed3..6aa68b6d2 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -89,6 +89,7 @@ func init() { serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second") + serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets") diff --git a/weed/command/volume.go b/weed/command/volume.go index 3e8341ef8..b0f46bbf3 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -10,17 +10,19 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/util/httpdown" "github.com/spf13/viper" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util/httpdown" + + "google.golang.org/grpc/reflection" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( @@ -47,6 +49,7 @@ type VolumeServerOptions struct { cpuProfile *string memProfile *string compactionMBPerSecond *int + fileSizeLimitMB *int } func init() { @@ -67,6 +70,7 @@ func init() { v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file") v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") + v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory") } var cmdVolume = &Command{ @@ -158,6 +162,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v v.whiteList, *v.fixJpgOrientation, *v.readRedirect, *v.compactionMBPerSecond, + *v.fileSizeLimitMB, ) // starting grpc server diff --git a/weed/server/common.go b/weed/server/common.go index 888ddec49..6828e9dc5 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -98,7 +98,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r) + fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r, 256*1024*1024) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index 6cf654738..a406b36cc 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -4,13 +4,15 @@ import ( "fmt" "net/http" - "github.com/chrislusf/seaweedfs/weed/stats" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/stats" + + "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage" - "github.com/spf13/viper" ) type VolumeServer struct { @@ -29,6 +31,7 @@ type VolumeServer struct { compactionBytePerSecond int64 MetricsAddress string MetricsIntervalSec int + fileSizeLimitBytes int64 } func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, @@ -41,6 +44,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, fixJpgOrientation bool, readRedirect bool, compactionMBPerSecond int, + fileSizeLimitMB int, ) *VolumeServer { v := viper.GetViper() @@ -62,6 +66,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, ReadRedirect: readRedirect, grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"), compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024, + fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024, } vs.SeedMasterNodes = masterNodes vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index 05e21612b..cd35255e5 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -43,7 +43,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { return } - needle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation) + needle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.FixJpgOrientation, vs.fileSizeLimitBytes) if ne != nil { writeJsonError(w, r, http.StatusBadRequest, ne) return diff --git a/weed/storage/needle/needle.go b/weed/storage/needle/needle.go index 2f03ba87b..494cc138e 100644 --- a/weed/storage/needle/needle.go +++ b/weed/storage/needle/needle.go @@ -3,13 +3,13 @@ package needle import ( "encoding/json" "fmt" + "io" + "io/ioutil" "net/http" "strconv" "strings" "time" - "io/ioutil" - "github.com/chrislusf/seaweedfs/weed/images" . "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -50,7 +50,7 @@ func (n *Needle) String() (str string) { return } -func ParseUpload(r *http.Request) ( +func ParseUpload(r *http.Request, sizeLimit int64) ( fileName string, data []byte, mimeType string, pairMap map[string]string, isGzipped bool, originalDataSize int, modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) { pairMap = make(map[string]string) @@ -61,13 +61,17 @@ func ParseUpload(r *http.Request) ( } if r.Method == "POST" { - fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r) + fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r, sizeLimit) } else { isGzipped = false mimeType = r.Header.Get("Content-Type") fileName = "" - data, e = ioutil.ReadAll(r.Body) + data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1)) originalDataSize = len(data) + if e == io.EOF || int64(originalDataSize) == sizeLimit+1 { + io.Copy(ioutil.Discard, r.Body) + } + r.Body.Close() } if e != nil { return @@ -78,11 +82,11 @@ func ParseUpload(r *http.Request) ( return } -func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool) (n *Needle, originalSize int, e error) { +func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit int64) (n *Needle, originalSize int, e error) { var pairMap map[string]string fname, mimeType, isGzipped, isChunkedFile := "", "", false, false n = new(Needle) - fname, n.Data, mimeType, pairMap, isGzipped, originalSize, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r) + fname, n.Data, mimeType, pairMap, isGzipped, originalSize, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r, sizeLimit) if e != nil { return } diff --git a/weed/storage/needle/needle_parse_multipart.go b/weed/storage/needle/needle_parse_multipart.go index 8be1a1da4..f0a239f9f 100644 --- a/weed/storage/needle/needle_parse_multipart.go +++ b/weed/storage/needle/needle_parse_multipart.go @@ -1,9 +1,7 @@ package needle import ( - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" - + "fmt" "io" "io/ioutil" "mime" @@ -11,9 +9,12 @@ import ( "path" "strconv" "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" ) -func parseMultipart(r *http.Request) ( +func parseMultipart(r *http.Request, sizeLimit int64) ( fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) { defer func() { if e != nil && r.Body != nil { @@ -41,11 +42,17 @@ func parseMultipart(r *http.Request) ( fileName = path.Base(fileName) } - data, e = ioutil.ReadAll(part) + println("reading part", sizeLimit) + + data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1)) if e != nil { glog.V(0).Infoln("Reading Content [ERROR]", e) return } + if len(data) == int(sizeLimit)+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } //if the filename is empty string, do a search on the other multi-part items for fileName == "" { @@ -58,12 +65,16 @@ func parseMultipart(r *http.Request) ( //found the first multi-part has filename if fName != "" { - data2, fe2 := ioutil.ReadAll(part2) + data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1)) if fe2 != nil { glog.V(0).Infoln("Reading Content [ERROR]", fe2) e = fe2 return } + if len(data) == int(sizeLimit)+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } //update data = data2 From a057cd6c6135847c12de538879ca418ca875096d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 3 Jan 2020 09:31:12 -0800 Subject: [PATCH 0018/2432] Update README.md --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 5c6300635..8c32a4e58 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a * Support ETag, Accept-Range, Last-Modified, etc. * Support in-memory/leveldb/readonly mode tuning for memory/performance balance. * Support rebalancing the writable and readonly volumes. -* [Transparent cloud integration][CloudTier]: store warm data on cloud storage. +* [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data. * [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. [Back to TOC](#table-of-contents) @@ -318,12 +318,14 @@ Each individual file size is limited to the volume size. All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. -### Extensible to the cloud ### +### Tiered Storage to the cloud ### -The local volume servers are much faster, while cloud storage has elastic capacity and more cost-efficient (usually free to upload, but relatively costly to access). SeaweedFS can take advantage of both local and cloud storage. +The local volume servers are much faster, while cloud storages have elastic capacity and are actually more cost-efficient if not accessed often (usually free to upload, but relatively costly to access). With the append-only structure and O(1) access time, SeaweedFS can take advantage of both local and cloud storage by offloading the warm data to the cloud. Usually hot data are fresh and warm data are old. SeaweedFS puts the newly created volumes on local servers, and optionally upload the older volumes on the cloud. If the older data are accessed less often, this literally gives you unlimited capacity with limited local servers, and still fast for new data. +With the O(1) access time, the network latency cost is kept at minimum. + If the hot~warm data is split as 20~80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput. [Back to TOC](#table-of-contents) From df636e4c14e28603215a7f7b88ed6e8803d5381b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 3 Jan 2020 12:46:39 -0800 Subject: [PATCH 0019/2432] remove .vif files --- weed/server/volume_grpc_erasure_coding.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 4bca9948e..67efc0f6d 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -173,6 +173,7 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se // check whether to delete the .ecx and .ecj file also hasEcxFile := false + hasIdxFile := false existingShardCount := 0 bName := filepath.Base(baseFilename) @@ -186,6 +187,10 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se hasEcxFile = true continue } + if fileInfo.Name() == bName+".idx" { + hasIdxFile = true + continue + } if strings.HasPrefix(fileInfo.Name(), bName+".ec") { existingShardCount++ } @@ -200,6 +205,10 @@ func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_se return nil, err } } + if !hasIdxFile { + // .vif is used for ec volumes and normal volumes + os.Remove(baseFilename + ".vif") + } return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil } From 4e731f1c8baffc6f5231bb1d16fff103b8949768 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 4 Jan 2020 11:28:29 -0800 Subject: [PATCH 0020/2432] volume: copy volumes also include .vif file --- weed/server/volume_grpc_copy.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go index a54a1e343..0153d5efc 100644 --- a/weed/server/volume_grpc_copy.go +++ b/weed/server/volume_grpc_copy.go @@ -20,7 +20,7 @@ import ( const BufferSizeLimit = 1024 * 1024 * 2 -// VolumeCopy copy the .idx .dat files, and mount the volume +// VolumeCopy copy the .idx .dat .vif files, and mount the volume func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.VolumeCopyRequest) (*volume_server_pb.VolumeCopyResponse, error) { v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) @@ -63,6 +63,10 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo return err } + if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil { + return err + } + return nil }) @@ -70,12 +74,9 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo datFileName = volumeFileName + ".dat" if err != nil && volumeFileName != "" { - if idxFileName != "" { - os.Remove(idxFileName) - } - if datFileName != "" { - os.Remove(datFileName) - } + os.Remove(idxFileName) + os.Remove(datFileName) + os.Remove(volumeFileName + ".vif") return nil, err } From aa73364a246548c20557804fb9a9323a72950985 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 6 Jan 2020 16:29:59 -0800 Subject: [PATCH 0021/2432] weed fix: add back .idx file generation --- weed/command/fix.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/weed/command/fix.go b/weed/command/fix.go index 76bc19f7e..e813efc32 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -68,11 +68,6 @@ func runFix(cmd *Command, args []string) bool { baseFileName = *fixVolumeCollection + "_" + baseFileName } indexFileName := path.Join(*fixVolumePath, baseFileName+".idx") - indexFile, err := os.OpenFile(indexFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - glog.Fatalf("Create Volume Index [ERROR] %s\n", err) - } - defer indexFile.Close() nm := needle_map.NewMemDb() @@ -81,9 +76,13 @@ func runFix(cmd *Command, args []string) bool { nm: nm, } - err = storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner) - if err != nil { - glog.Fatalf("Export Volume File [ERROR] %s\n", err) + if err = storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil { + glog.Fatalf("scan .dat File: %v", err) + os.Remove(indexFileName) + } + + if err := nm.SaveToIdx(indexFileName); err != nil { + glog.Fatalf("save to .idx File: %v", err) os.Remove(indexFileName) } From dc9457fb8718f607a96ccc41d610063bf364f50a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 7 Jan 2020 09:03:47 -0800 Subject: [PATCH 0022/2432] fix compilation error --- weed/command/fix.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/command/fix.go b/weed/command/fix.go index e813efc32..8903595fa 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -76,7 +76,7 @@ func runFix(cmd *Command, args []string) bool { nm: nm, } - if err = storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil { + if err := storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil { glog.Fatalf("scan .dat File: %v", err) os.Remove(indexFileName) } From 9995d3bcb581d3d20e152ff2a359b22b6a8563fd Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 7 Jan 2020 21:49:28 -0800 Subject: [PATCH 0023/2432] remove println --- weed/storage/needle/needle_parse_multipart.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/weed/storage/needle/needle_parse_multipart.go b/weed/storage/needle/needle_parse_multipart.go index f0a239f9f..8c9032f5f 100644 --- a/weed/storage/needle/needle_parse_multipart.go +++ b/weed/storage/needle/needle_parse_multipart.go @@ -42,8 +42,6 @@ func parseMultipart(r *http.Request, sizeLimit int64) ( fileName = path.Base(fileName) } - println("reading part", sizeLimit) - data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1)) if e != nil { glog.V(0).Infoln("Reading Content [ERROR]", e) From 943f4986ef27fda2487eff0669ecf79faaaacda1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 8 Jan 2020 08:49:18 -0800 Subject: [PATCH 0024/2432] fix possible nil --- weed/storage/needle/volume_ttl.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/weed/storage/needle/volume_ttl.go b/weed/storage/needle/volume_ttl.go index 4a169870d..179057876 100644 --- a/weed/storage/needle/volume_ttl.go +++ b/weed/storage/needle/volume_ttl.go @@ -69,6 +69,9 @@ func (t *TTL) ToBytes(output []byte) { } func (t *TTL) ToUint32() (output uint32) { + if t == nil || t.Count == 0 { + return 0 + } output = uint32(t.Count) << 8 output += uint32(t.Unit) return output From acf7ca7b93e0a33e9fc987dec600d1b9f1dc1e32 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 8 Jan 2020 09:45:03 -0800 Subject: [PATCH 0025/2432] volume: fix compaction --- weed/storage/needle_map/memdb.go | 5 ++++- weed/storage/volume_vacuum.go | 38 ++++++++++++++------------------ 2 files changed, 20 insertions(+), 23 deletions(-) diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go index 6aba6adeb..9eb4d9f56 100644 --- a/weed/storage/needle_map/memdb.go +++ b/weed/storage/needle_map/memdb.go @@ -89,6 +89,9 @@ func (cm *MemDb) SaveToIdx(idxName string) (ret error) { defer idxFile.Close() return cm.AscendingVisit(func(value NeedleValue) error { + if value.Offset.IsZero() || value.Size == TombstoneFileSize { + return nil + } _, err := idxFile.Write(value.ToBytes()) return err }) @@ -104,7 +107,7 @@ func (cm *MemDb) LoadFromIdx(idxName string) (ret error) { return idx.WalkIndexFile(idxFile, func(key NeedleId, offset Offset, size uint32) error { if offset.IsZero() || size == TombstoneFileSize { - return nil + return cm.Delete(key) } return cm.Set(key, offset, size) }) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 434b5989d..09bf36f7a 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -356,19 +356,17 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string, preallocate int64) (err error) { var ( dstDatBackend backend.BackendStorageFile - oldIndexFile *os.File ) if dstDatBackend, err = createVolumeFile(dstName, preallocate, 0); err != nil { return } defer dstDatBackend.Close() - if oldIndexFile, err = os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644); err != nil { + oldNm := needle_map.NewMemDb() + newNm := needle_map.NewMemDb() + if err = oldNm.LoadFromIdx(v.FileName()+".idx"); err != nil { return } - defer oldIndexFile.Close() - - nm := needle_map.NewMemDb() now := uint64(time.Now().Unix()) @@ -376,13 +374,11 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string, preallocate i dstDatBackend.WriteAt(v.SuperBlock.Bytes(), 0) newOffset := int64(v.SuperBlock.BlockSize()) - idx2.WalkIndexFile(oldIndexFile, func(key NeedleId, offset Offset, size uint32) error { - if offset.IsZero() || size == TombstoneFileSize { - return nil - } + oldNm.AscendingVisit(func(value needle_map.NeedleValue) error { + + offset, size := value.Offset, value.Size - nv, ok := v.nm.Get(key) - if !ok { + if offset.IsZero() || size == TombstoneFileSize { return nil } @@ -396,21 +392,19 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string, preallocate i return nil } - glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv) - if nv.Offset == offset && nv.Size > 0 { - if err = nm.Set(n.Id, ToOffset(newOffset), n.Size); err != nil { - return fmt.Errorf("cannot put needle: %s", err) - } - if _, _, _, err = n.Append(dstDatBackend, v.Version()); err != nil { - return fmt.Errorf("cannot append needle: %s", err) - } - newOffset += n.DiskSize(v.Version()) - glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + if err = newNm.Set(n.Id, ToOffset(newOffset), n.Size); err != nil { + return fmt.Errorf("cannot put needle: %s", err) } + if _, _, _, err = n.Append(dstDatBackend, v.Version()); err != nil { + return fmt.Errorf("cannot append needle: %s", err) + } + newOffset += n.DiskSize(v.Version()) + glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + return nil }) - nm.SaveToIdx(idxName) + newNm.SaveToIdx(idxName) return } From 288baf37fd214d7ef9b40fae547976dff8357d8b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 8 Jan 2020 09:45:26 -0800 Subject: [PATCH 0026/2432] saving .vif files correctly --- weed/storage/volume_loading.go | 9 ++++++--- weed/storage/volume_tier.go | 5 ++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index fa1f7d617..6b42fc452 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -26,9 +26,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind fileName := v.FileName() alreadyHasSuperBlock := false - if !v.maybeLoadVolumeInfo() { - v.SaveVolumeInfo() - } + hasVolumeInfoFile := v.maybeLoadVolumeInfo() && v.volumeInfo.Version != 0 if v.HasRemoteFile() { v.noWriteCanDelete = true @@ -141,6 +139,11 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind } } + if !hasVolumeInfoFile { + v.volumeInfo.Version = uint32(v.SuperBlock.Version) + v.SaveVolumeInfo() + } + stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Inc() return err diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go index 99071285f..85eafa848 100644 --- a/weed/storage/volume_tier.go +++ b/weed/storage/volume_tier.go @@ -14,12 +14,11 @@ func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo { func (v *Volume) maybeLoadVolumeInfo() (found bool) { - v.volumeInfo, found = pb.MaybeLoadVolumeInfo(v.FileName() + ".vif") + v.volumeInfo, v.hasRemoteFile = pb.MaybeLoadVolumeInfo(v.FileName() + ".vif") - if found { + if v.hasRemoteFile { glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id, v.volumeInfo.Files[0].BackendName(), v.volumeInfo.Files[0].Key) - v.hasRemoteFile = true } return From f8a20ef35e58610028dc20fa14e78fd5e93ed4fb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 8 Jan 2020 09:45:42 -0800 Subject: [PATCH 0027/2432] add the old way to compact as a comment --- weed/storage/store_vacuum.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go index 5dacb71bf..e94d9b516 100644 --- a/weed/storage/store_vacuum.go +++ b/weed/storage/store_vacuum.go @@ -17,6 +17,7 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) { func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error { if v := s.findVolume(vid); v != nil { return v.Compact2(preallocate) // compactionBytePerSecond + // return v.Compact(preallocate, compactionBytePerSecond) } return fmt.Errorf("volume id %d is not found during compact", vid) } From 66ab09aa4aec71fb4fc1abb64e1eea15a7cb4320 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 8 Jan 2020 09:51:35 -0800 Subject: [PATCH 0028/2432] 1.49 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index fc62c6950..0bf794344 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 48) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 49) ) From 89e16bd2e8edabee4d5b09c29dcc31ae023dc18d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 8 Jan 2020 18:07:07 -0800 Subject: [PATCH 0029/2432] skip error when draining reader fix https://github.com/chrislusf/seaweedfs/issues/1179 --- weed/util/http_util.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 667d0b4be..4aab90f5a 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -11,6 +11,8 @@ import ( "net/http" "net/url" "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" ) var ( @@ -210,7 +212,8 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo } var reader io.ReadCloser - switch r.Header.Get("Content-Encoding") { + contentEncoding := r.Header.Get("Content-Encoding") + switch contentEncoding { case "gzip": reader, err = gzip.NewReader(r.Body) defer reader.Close() @@ -242,7 +245,7 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo // drains the response body to avoid memory leak data, _ := ioutil.ReadAll(reader) if len(data) != 0 { - err = fmt.Errorf("buffer size is too small. remains %d", len(data)) + glog.V(1).Infof("%s reader has remaining %d bytes", contentEncoding, len(data)) } return n, err } From 8dd260623e2b2e6452df53529f143a63eeec4676 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 8 Jan 2020 21:50:59 -0800 Subject: [PATCH 0030/2432] protect against nil deletion results fix https://github.com/chrislusf/seaweedfs/issues/1180 --- weed/operation/delete_content.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 358399324..e4aa6c6d3 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -4,12 +4,14 @@ import ( "context" "errors" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "google.golang.org/grpc" "net/http" "strings" "sync" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) type DeleteResult struct { @@ -94,7 +96,7 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil { err = deleteErr - } else { + } else if deleteResults != nil { resultChan <- deleteResults } From 30c7148020822651703a449ea5dd2c4f813dd9a5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 8 Jan 2020 22:08:35 -0800 Subject: [PATCH 0031/2432] 1.50 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 0bf794344..99cd29d87 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 49) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 50) ) From d1ab16b6e312b8a218b974ecf9caa3d4b0103eb6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 10 Jan 2020 00:37:44 -0800 Subject: [PATCH 0032/2432] treat it as a single node cluster if empty raft server name possible fix for https://github.com/chrislusf/seaweedfs/issues/1118 --- weed/topology/topology.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/weed/topology/topology.go b/weed/topology/topology.go index e6cb44727..fbf998707 100644 --- a/weed/topology/topology.go +++ b/weed/topology/topology.go @@ -60,7 +60,12 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls func (t *Topology) IsLeader() bool { if t.RaftServer != nil { - return t.RaftServer.State() == raft.Leader + if t.RaftServer.State() == raft.Leader { + return true + } + if t.RaftServer.Leader() == "" { + return true + } } return false } @@ -75,7 +80,7 @@ func (t *Topology) Leader() (string, error) { if l == "" { // We are a single node cluster, we are the leader - return t.RaftServer.Name(), errors.New("Raft Server not initialized!") + return t.RaftServer.Name(), nil } return l, nil @@ -152,7 +157,7 @@ func (t *Topology) ListCollections(includeNormalVolumes, includeEcVolumes bool) t.ecShardMapLock.RUnlock() } - for k, _ := range mapOfCollections { + for k := range mapOfCollections { ret = append(ret, k) } return ret From e4b660699f0e49944c8e951cb552d215da29471f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 12 Jan 2020 20:31:33 -0800 Subject: [PATCH 0033/2432] correct comments fix https://github.com/chrislusf/seaweedfs/issues/1181 --- weed/command/scaffold.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 8519598b3..78eec277c 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -145,9 +145,9 @@ addresses = [ "localhost:30006", ] password = "" -// allows reads from slave servers or the master, but all writes still go to the master +# allows reads from slave servers or the master, but all writes still go to the master readOnly = true -// automatically use the closest Redis server for reads +# automatically use the closest Redis server for reads routeByLatency = true [etcd] From 230a0e4a333699f9622a418ee7e4f91befa00bfa Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 15 Jan 2020 19:08:54 -0800 Subject: [PATCH 0034/2432] filer: no entry if not found --- weed/filesys/dir.go | 6 ++---- weed/filesys/xattr.go | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 7b24a1ec5..e31986c04 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -184,7 +184,8 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath) if err != nil { - return nil, err + glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) + return nil, fuse.ENOENT } if entry != nil { dir.wfs.listDirectoryEntriesCache.Set(fullFilePath, entry, 5*time.Minute) @@ -390,9 +391,6 @@ func (dir *Dir) maybeLoadEntry(ctx context.Context) error { if err != nil { return err } - if entry == nil { - return fuse.ENOENT - } dir.entry = entry } return nil diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index 3c0ba164a..23775cec9 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -125,15 +125,13 @@ func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *fi } resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { + if err != nil || resp == nil || resp.Entry == nil { glog.V(3).Infof("file attr read file %v: %v", request, err) return fuse.ENOENT } entry = resp.Entry - if entry != nil { - wfs.listDirectoryEntriesCache.Set(fullpath, entry, wfs.option.EntryCacheTtl) - } + wfs.listDirectoryEntriesCache.Set(fullpath, entry, wfs.option.EntryCacheTtl) return nil }) From 061a61e018c3fbfa5554bcea50d8ce9e6951945b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 15 Jan 2020 19:09:00 -0800 Subject: [PATCH 0035/2432] logs --- weed/filesys/dir_rename.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index e72a15758..7890d24d9 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -3,6 +3,8 @@ package filesys import ( "context" "fmt" + + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" @@ -11,6 +13,7 @@ import ( func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { newDir := newDirectory.(*Dir) + glog.V(4).Infof("dir Rename %s/%s => %s/%s", dir.Path, req.OldName, newDir.Path, req.NewName) return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { From e9bb0c179fff7016bc4150be8082383f1425a2c7 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 18 Jan 2020 13:25:17 -0800 Subject: [PATCH 0036/2432] adjust logs --- weed/filesys/dir.go | 9 +++++++-- weed/filesys/xattr.go | 7 ++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index e31986c04..563581db9 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -69,6 +69,8 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f } func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { + attr.Inode = 1 + attr.Valid = time.Hour attr.Uid = dir.wfs.option.MountUid attr.Gid = dir.wfs.option.MountGid attr.Mode = dir.wfs.option.MountMode @@ -76,6 +78,7 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { attr.Ctime = dir.wfs.option.MountCtime attr.Mtime = dir.wfs.option.MountMtime attr.Atime = dir.wfs.option.MountMtime + attr.BlockSize = 1024 * 1024 } func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File { @@ -202,6 +205,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. } resp.EntryValid = time.Duration(0) + resp.Attr.Valid = time.Duration(0) resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode) @@ -211,6 +215,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. return node, nil } + glog.V(1).Infof("not found dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT } @@ -270,7 +275,7 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro glog.V(3).Infof("remove file: %v", request) _, err := client.DeleteEntry(ctx, request) if err != nil { - glog.V(3).Infof("remove file %s/%s: %v", dir.Path, req.Name, err) + glog.V(3).Infof("not found remove file %s/%s: %v", dir.Path, req.Name, err) return fuse.ENOENT } @@ -294,7 +299,7 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error glog.V(3).Infof("remove directory entry: %v", request) _, err := client.DeleteEntry(ctx, request) if err != nil { - glog.V(3).Infof("remove %s/%s: %v", dir.Path, req.Name, err) + glog.V(3).Infof("not found remove %s/%s: %v", dir.Path, req.Name, err) return fuse.ENOENT } diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index 23775cec9..8857c33d6 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -4,6 +4,7 @@ import ( "context" "path/filepath" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" @@ -115,7 +116,7 @@ func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *fi entry = item.Value().(*filer_pb.Entry) return } - glog.V(3).Infof("read entry cache miss %s", fullpath) + // glog.V(3).Infof("read entry cache miss %s", fullpath) err = wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { @@ -126,6 +127,10 @@ func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *fi resp, err := client.LookupDirectoryEntry(ctx, request) if err != nil || resp == nil || resp.Entry == nil { + if err == filer2.ErrNotFound { + glog.V(3).Infof("file attr read not found file %v: %v", request, err) + return fuse.ENOENT + } glog.V(3).Infof("file attr read file %v: %v", request, err) return fuse.ENOENT } From 39edcfde4cdd867a5c36211f9e355e2a470f87d5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 19 Jan 2020 12:06:19 -0800 Subject: [PATCH 0037/2432] filer: set inode avoid "getcwd: No such file or directory" --- .../filer2/abstract_sql/abstract_sql_store.go | 13 ++++---- weed/filer2/abstract_sql/hashing.go | 32 ------------------- weed/filesys/dir.go | 23 +++++++++---- weed/filesys/file.go | 4 ++- weed/util/bytes.go | 31 ++++++++++++++++++ 5 files changed, 57 insertions(+), 46 deletions(-) delete mode 100644 weed/filer2/abstract_sql/hashing.go diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index d512467c7..47fe507a1 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -7,6 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" ) type AbstractSqlStore struct { @@ -65,7 +66,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.En return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, hashToLong(dir), name, dir, meta) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta) if err != nil { return fmt.Errorf("insert %s: %s", entry.FullPath, err) } @@ -85,7 +86,7 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir) if err != nil { return fmt.Errorf("update %s: %s", entry.FullPath, err) } @@ -100,7 +101,7 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) { dir, name := fullpath.DirAndName() - row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, hashToLong(dir), name, dir) + row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir) var data []byte if err := row.Scan(&data); err != nil { return nil, filer2.ErrNotFound @@ -120,7 +121,7 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2. dir, name := fullpath.DirAndName() - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, util.HashStringToLong(dir), name, dir) if err != nil { return fmt.Errorf("delete %s: %s", fullpath, err) } @@ -135,7 +136,7 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2. func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, hashToLong(string(fullpath)), fullpath) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath) if err != nil { return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) } @@ -155,7 +156,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat sqlText = store.SqlListInclusive } - rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) + rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), limit) if err != nil { return nil, fmt.Errorf("list %s : %v", fullpath, err) } diff --git a/weed/filer2/abstract_sql/hashing.go b/weed/filer2/abstract_sql/hashing.go deleted file mode 100644 index 5c982c537..000000000 --- a/weed/filer2/abstract_sql/hashing.go +++ /dev/null @@ -1,32 +0,0 @@ -package abstract_sql - -import ( - "crypto/md5" - "io" -) - -// returns a 64 bit big int -func hashToLong(dir string) (v int64) { - h := md5.New() - io.WriteString(h, dir) - - b := h.Sum(nil) - - v += int64(b[0]) - v <<= 8 - v += int64(b[1]) - v <<= 8 - v += int64(b[2]) - v <<= 8 - v += int64(b[3]) - v <<= 8 - v += int64(b[4]) - v <<= 8 - v += int64(b[5]) - v <<= 8 - v += int64(b[6]) - v <<= 8 - v += int64(b[7]) - - return -} diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 563581db9..95b26f409 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -9,6 +9,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -34,26 +35,31 @@ var _ = fs.NodeListxattrer(&Dir{}) func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { - glog.V(3).Infof("dir Attr %s", dir.Path) + glog.V(3).Infof("dir Attr %s, existing attr: %+v", dir.Path, attr) // https://github.com/bazil/fuse/issues/196 attr.Valid = time.Second if dir.Path == dir.wfs.option.FilerMountRootPath { dir.setRootDirAttributes(attr) + glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.Path, attr) return nil } if err := dir.maybeLoadEntry(ctx); err != nil { + glog.V(3).Infof("dir Attr %s,err: %+v", dir.Path, err) return err } + attr.Inode = uint64(util.HashStringToLong(dir.Path)) attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0) attr.Ctime = time.Unix(dir.entry.Attributes.Crtime, 0) attr.Gid = dir.entry.Attributes.Gid attr.Uid = dir.entry.Attributes.Uid + glog.V(3).Infof("dir Attr %s, attr: %+v", dir.Path, attr) + return nil } @@ -69,7 +75,7 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f } func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { - attr.Inode = 1 + attr.Inode = uint64(util.HashStringToLong(dir.Path)) attr.Valid = time.Hour attr.Uid = dir.wfs.option.MountUid attr.Gid = dir.wfs.option.MountGid @@ -204,8 +210,9 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. node = dir.newFile(req.Name, entry) } - resp.EntryValid = time.Duration(0) - resp.Attr.Valid = time.Duration(0) + resp.EntryValid = time.Second + resp.Attr.Inode = uint64(util.HashStringToLong(fullFilePath)) + resp.Attr.Valid = time.Second resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode) @@ -226,14 +233,16 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { cacheTtl := 5 * time.Minute readErr := filer2.ReadDirAllEntries(ctx, dir.wfs, dir.Path, "", func(entry *filer_pb.Entry, isLast bool) { + fullpath := path.Join(dir.Path, entry.Name) + inode := uint64(util.HashStringToLong(fullpath)) if entry.IsDirectory { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir} + dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir} ret = append(ret, dirent) } else { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_File} + dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_File} ret = append(ret, dirent) } - dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, cacheTtl) + dir.wfs.listDirectoryEntriesCache.Set(fullpath, entry, cacheTtl) }) if readErr != nil { glog.V(0).Infof("list %s: %v", dir.Path, err) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index afe78ee0f..b0df344fa 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -10,6 +10,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -40,12 +41,13 @@ func (file *File) fullpath() string { func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { - glog.V(4).Infof("file Attr %s", file.fullpath()) + glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr) if err := file.maybeLoadEntry(ctx); err != nil { return err } + attr.Inode = uint64(util.HashStringToLong(file.fullpath())) attr.Mode = os.FileMode(file.entry.Attributes.FileMode) attr.Size = filer2.TotalSize(file.entry.Chunks) attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0) diff --git a/weed/util/bytes.go b/weed/util/bytes.go index dfa4ae665..9c7e5e2cb 100644 --- a/weed/util/bytes.go +++ b/weed/util/bytes.go @@ -1,5 +1,10 @@ package util +import ( + "crypto/md5" + "io" +) + // big endian func BytesToUint64(b []byte) (v uint64) { @@ -43,3 +48,29 @@ func Uint16toBytes(b []byte, v uint16) { func Uint8toBytes(b []byte, v uint8) { b[0] = byte(v) } + +// returns a 64 bit big int +func HashStringToLong(dir string) (v int64) { + h := md5.New() + io.WriteString(h, dir) + + b := h.Sum(nil) + + v += int64(b[0]) + v <<= 8 + v += int64(b[1]) + v <<= 8 + v += int64(b[2]) + v <<= 8 + v += int64(b[3]) + v <<= 8 + v += int64(b[4]) + v <<= 8 + v += int64(b[5]) + v <<= 8 + v += int64(b[6]) + v <<= 8 + v += int64(b[7]) + + return +} From 6a40c688e00d9bd2cbee28aae18fface408a9742 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 19 Jan 2020 12:07:04 -0800 Subject: [PATCH 0038/2432] mount: modify file size --- weed/filesys/file.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index b0df344fa..ca0550a87 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -50,6 +50,10 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { attr.Inode = uint64(util.HashStringToLong(file.fullpath())) attr.Mode = os.FileMode(file.entry.Attributes.FileMode) attr.Size = filer2.TotalSize(file.entry.Chunks) + if file.isOpen { + attr.Size = file.entry.Attributes.FileSize + } + attr.Crtime = time.Unix(file.entry.Attributes.Crtime, 0) attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0) attr.Gid = file.entry.Attributes.Gid attr.Uid = file.entry.Attributes.Uid From 04019aa161dcfe04511da41e198c9d3b5b1b66dc Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 19 Jan 2020 12:07:26 -0800 Subject: [PATCH 0039/2432] mount: deletion ignore old vid --- weed/filesys/wfs_deletion.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index 6e586b7df..52c275e26 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -50,7 +50,10 @@ func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client f VolumeId: vid, Locations: nil, } - locations := resp.LocationsMap[vid] + locations, found := resp.LocationsMap[vid] + if !found { + continue + } for _, loc := range locations.Locations { lr.Locations = append(lr.Locations, operation.Location{ Url: loc.Url, From 2f15e9346696d18032b9d2bfffe459635cb36171 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 19 Jan 2020 12:31:56 -0800 Subject: [PATCH 0040/2432] mount: rename also clear the cache fix https://github.com/chrislusf/seaweedfs/issues/1182 --- weed/filesys/dir.go | 6 +++--- weed/filesys/dir_rename.go | 11 ++++++++++- weed/filesys/xattr.go | 3 +-- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 95b26f409..ac6291319 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -182,7 +182,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. glog.V(4).Infof("dir Lookup %s: %s", dir.Path, req.Name) var entry *filer_pb.Entry - fullFilePath := path.Join(dir.Path, req.Name) + fullFilePath := string(filer2.NewFullPath(dir.Path, req.Name)) item := dir.wfs.listDirectoryEntriesCache.Get(fullFilePath) if item != nil && !item.Expired() { @@ -233,7 +233,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { cacheTtl := 5 * time.Minute readErr := filer2.ReadDirAllEntries(ctx, dir.wfs, dir.Path, "", func(entry *filer_pb.Entry, isLast bool) { - fullpath := path.Join(dir.Path, entry.Name) + fullpath := string(filer2.NewFullPath(dir.Path, entry.Name)) inode := uint64(util.HashStringToLong(fullpath)) if entry.IsDirectory { dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir} @@ -295,7 +295,7 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) + dir.wfs.listDirectoryEntriesCache.Delete(string(filer2.NewFullPath(dir.Path, req.Name))) return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index 7890d24d9..8309b238a 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" @@ -15,7 +16,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector newDir := newDirectory.(*Dir) glog.V(4).Infof("dir Rename %s/%s => %s/%s", dir.Path, req.OldName, newDir.Path, req.NewName) - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: dir.Path, @@ -33,4 +34,12 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector }) + if err == nil { + oldpath := string(filer2.NewFullPath(dir.Path, req.OldName)) + newpath := string(filer2.NewFullPath(newDir.Path, req.NewName)) + dir.wfs.listDirectoryEntriesCache.Delete(oldpath) + dir.wfs.listDirectoryEntriesCache.Delete(newpath) + } + + return err } diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index 8857c33d6..a81f74638 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -2,7 +2,6 @@ package filesys import ( "context" - "path/filepath" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -110,7 +109,7 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *filer_pb.Entry, err error) { - fullpath := filepath.Join(dir, name) + fullpath := string(filer2.NewFullPath(dir, name)) item := wfs.listDirectoryEntriesCache.Get(fullpath) if item != nil && !item.Expired() { entry = item.Value().(*filer_pb.Entry) From 1b0bfbaf59ba613ebae6b90021b2270b3ac34bc5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 19 Jan 2020 23:59:46 -0800 Subject: [PATCH 0041/2432] refactoring --- weed/filer2/filer_client_util.go | 10 +++---- weed/filer2/fullpath.go | 6 +++++ weed/filesys/dir.go | 43 +++++++++++++----------------- weed/filesys/dir_rename.go | 18 +++++-------- weed/filesys/file.go | 21 ++++++++------- weed/filesys/wfs.go | 25 ++++++++++++++--- weed/filesys/xattr.go | 16 +++++------ weed/server/webdav_server.go | 23 +++++++--------- weed/shell/command_fs_du.go | 2 +- weed/shell/command_fs_ls.go | 2 +- weed/shell/command_fs_meta_save.go | 2 +- weed/shell/command_fs_tree.go | 9 +++---- 12 files changed, 92 insertions(+), 85 deletions(-) diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index 1a10f7c20..b1c579447 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -25,7 +25,7 @@ type FilerClient interface { WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error } -func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath string, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { +func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath FullPath, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { var vids []string for _, chunkView := range chunkViews { vids = append(vids, VolumeId(chunkView.FileId)) @@ -93,9 +93,9 @@ func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath s return } -func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath string) (entry *filer_pb.Entry, err error) { +func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.Entry, err error) { - dir, name := FullPath(fullFilePath).DirAndName() + dir, name := fullFilePath.DirAndName() err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { @@ -126,14 +126,14 @@ func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath string) return } -func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) { +func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath FullPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) { err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { lastEntryName := "" request := &filer_pb.ListEntriesRequest{ - Directory: fullDirPath, + Directory: string(fullDirPath), Prefix: prefix, StartFromFileName: lastEntryName, Limit: math.MaxUint32, diff --git a/weed/filer2/fullpath.go b/weed/filer2/fullpath.go index 191e51cf3..133069f93 100644 --- a/weed/filer2/fullpath.go +++ b/weed/filer2/fullpath.go @@ -3,6 +3,8 @@ package filer2 import ( "path/filepath" "strings" + + "github.com/chrislusf/seaweedfs/weed/util" ) type FullPath string @@ -34,3 +36,7 @@ func (fp FullPath) Child(name string) FullPath { } return FullPath(dir + "/" + name) } + +func (fp FullPath) AsInode() uint64 { + return uint64(util.HashStringToLong(string(fp))) +} diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index ac6291319..076252051 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -9,7 +9,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -51,7 +50,7 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { return err } - attr.Inode = uint64(util.HashStringToLong(dir.Path)) + attr.Inode = filer2.FullPath(dir.Path).AsInode() attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0) attr.Ctime = time.Unix(dir.entry.Attributes.Crtime, 0) @@ -75,7 +74,7 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f } func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { - attr.Inode = uint64(util.HashStringToLong(dir.Path)) + attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode() attr.Valid = time.Hour attr.Uid = dir.wfs.option.MountUid attr.Gid = dir.wfs.option.MountGid @@ -181,13 +180,8 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. glog.V(4).Infof("dir Lookup %s: %s", dir.Path, req.Name) - var entry *filer_pb.Entry - fullFilePath := string(filer2.NewFullPath(dir.Path, req.Name)) - - item := dir.wfs.listDirectoryEntriesCache.Get(fullFilePath) - if item != nil && !item.Expired() { - entry = item.Value().(*filer_pb.Entry) - } + fullFilePath := filer2.NewFullPath(dir.Path, req.Name) + entry := dir.wfs.cacheGet(fullFilePath) if entry == nil { glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) @@ -196,22 +190,20 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT } - if entry != nil { - dir.wfs.listDirectoryEntriesCache.Set(fullFilePath, entry, 5*time.Minute) - } + dir.wfs.cacheSet(fullFilePath, entry, 5*time.Minute) } else { glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath) } if entry != nil { if entry.IsDirectory { - node = &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs, entry: entry} + node = &Dir{Path: string(fullFilePath), wfs: dir.wfs, entry: entry} } else { node = dir.newFile(req.Name, entry) } resp.EntryValid = time.Second - resp.Attr.Inode = uint64(util.HashStringToLong(fullFilePath)) + resp.Attr.Inode = fullFilePath.AsInode() resp.Attr.Valid = time.Second resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) @@ -232,9 +224,9 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { cacheTtl := 5 * time.Minute - readErr := filer2.ReadDirAllEntries(ctx, dir.wfs, dir.Path, "", func(entry *filer_pb.Entry, isLast bool) { - fullpath := string(filer2.NewFullPath(dir.Path, entry.Name)) - inode := uint64(util.HashStringToLong(fullpath)) + readErr := filer2.ReadDirAllEntries(ctx, dir.wfs, filer2.FullPath(dir.Path), "", func(entry *filer_pb.Entry, isLast bool) { + fullpath := filer2.NewFullPath(dir.Path, entry.Name) + inode := fullpath.AsInode() if entry.IsDirectory { dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir} ret = append(ret, dirent) @@ -242,7 +234,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_File} ret = append(ret, dirent) } - dir.wfs.listDirectoryEntriesCache.Set(fullpath, entry, cacheTtl) + dir.wfs.cacheSet(fullpath, entry, cacheTtl) }) if readErr != nil { glog.V(0).Infof("list %s: %v", dir.Path, err) @@ -264,14 +256,15 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error { - entry, err := filer2.GetEntry(ctx, dir.wfs, path.Join(dir.Path, req.Name)) + filePath := filer2.NewFullPath(dir.Path, req.Name) + entry, err := filer2.GetEntry(ctx, dir.wfs, filePath) if err != nil { return err } dir.wfs.deleteFileChunks(ctx, entry.Chunks) - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) + dir.wfs.cacheDelete(filePath) return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { @@ -295,7 +288,7 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { - dir.wfs.listDirectoryEntriesCache.Delete(string(filer2.NewFullPath(dir.Path, req.Name))) + dir.wfs.cacheDelete(filer2.NewFullPath(dir.Path, req.Name)) return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { @@ -340,7 +333,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus dir.entry.Attributes.Mtime = req.Mtime.Unix() } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) + dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) return dir.saveEntry(ctx) @@ -358,7 +351,7 @@ func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { return err } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) + dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) return dir.saveEntry(ctx) @@ -376,7 +369,7 @@ func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) e return err } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) + dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) return dir.saveEntry(ctx) diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index 8309b238a..6b68e4ee9 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -2,7 +2,6 @@ package filesys import ( "context" - "fmt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -16,7 +15,10 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector newDir := newDirectory.(*Dir) glog.V(4).Infof("dir Rename %s/%s => %s/%s", dir.Path, req.OldName, newDir.Path, req.NewName) - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + dir.wfs.cacheDelete(filer2.NewFullPath(newDir.Path, req.NewName)) + dir.wfs.cacheDelete(filer2.NewFullPath(dir.Path, req.OldName)) + + return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: dir.Path, @@ -27,19 +29,11 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector _, err := client.AtomicRenameEntry(ctx, request) if err != nil { - return fmt.Errorf("renaming %s/%s => %s/%s: %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) + glog.V(0).Infof("dir Rename %s/%s => %s/%s : %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) + return fuse.EIO } return nil }) - - if err == nil { - oldpath := string(filer2.NewFullPath(dir.Path, req.OldName)) - newpath := string(filer2.NewFullPath(newDir.Path, req.NewName)) - dir.wfs.listDirectoryEntriesCache.Delete(oldpath) - dir.wfs.listDirectoryEntriesCache.Delete(newpath) - } - - return err } diff --git a/weed/filesys/file.go b/weed/filesys/file.go index ca0550a87..7e562eabc 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -3,14 +3,12 @@ package filesys import ( "context" "os" - "path/filepath" "sort" "time" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -35,19 +33,22 @@ type File struct { isOpen bool } -func (file *File) fullpath() string { - return filepath.Join(file.dir.Path, file.Name) +func (file *File) fullpath() filer2.FullPath { + return filer2.NewFullPath(file.dir.Path, file.Name) } func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr) - if err := file.maybeLoadEntry(ctx); err != nil { - return err + if !file.isOpen { + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } } - attr.Inode = uint64(util.HashStringToLong(file.fullpath())) + attr.Inode = file.fullpath().AsInode() + attr.Valid = time.Second attr.Mode = os.FileMode(file.entry.Attributes.FileMode) attr.Size = filer2.TotalSize(file.entry.Chunks) if file.isOpen { @@ -132,7 +133,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f return nil } - file.wfs.listDirectoryEntriesCache.Delete(file.fullpath()) + file.wfs.cacheDelete(file.fullpath()) return file.saveEntry(ctx) @@ -150,7 +151,7 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error return err } - file.wfs.listDirectoryEntriesCache.Delete(file.fullpath()) + file.wfs.cacheDelete(file.fullpath()) return file.saveEntry(ctx) @@ -168,7 +169,7 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) return err } - file.wfs.listDirectoryEntriesCache.Delete(file.fullpath()) + file.wfs.cacheDelete(file.fullpath()) return file.saveEntry(ctx) diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index e924783ec..d3cc6329d 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -11,6 +11,7 @@ import ( "github.com/karlseguin/ccache" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -47,7 +48,7 @@ type WFS struct { // contains all open handles handles []*FileHandle - pathToHandleIndex map[string]int + pathToHandleIndex map[filer2.FullPath]int pathToHandleLock sync.Mutex bufPool sync.Pool @@ -62,7 +63,7 @@ func NewSeaweedFileSystem(option *Option) *WFS { wfs := &WFS{ option: option, listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)), - pathToHandleIndex: make(map[string]int), + pathToHandleIndex: make(map[filer2.FullPath]int), bufPool: sync.Pool{ New: func() interface{} { return make([]byte, option.ChunkSizeLimit) @@ -117,7 +118,7 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand return } -func (wfs *WFS) ReleaseHandle(fullpath string, handleId fuse.HandleID) { +func (wfs *WFS) ReleaseHandle(fullpath filer2.FullPath, handleId fuse.HandleID) { wfs.pathToHandleLock.Lock() defer wfs.pathToHandleLock.Unlock() @@ -191,3 +192,21 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. return nil } + +func (wfs *WFS) cacheGet(path filer2.FullPath) *filer_pb.Entry { + item := wfs.listDirectoryEntriesCache.Get(string(path)) + if item != nil && !item.Expired() { + return item.Value().(*filer_pb.Entry) + } + return nil +} +func (wfs *WFS) cacheSet(path filer2.FullPath, entry *filer_pb.Entry, ttl time.Duration) { + if entry == nil { + wfs.listDirectoryEntriesCache.Delete(string(path)) + }else{ + wfs.listDirectoryEntriesCache.Set(string(path), entry, ttl) + } +} +func (wfs *WFS) cacheDelete(path filer2.FullPath) { + wfs.listDirectoryEntriesCache.Delete(string(path)) +} diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index a81f74638..52a447d95 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -2,6 +2,7 @@ package filesys import ( "context" + "strings" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" @@ -109,13 +110,12 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *filer_pb.Entry, err error) { - fullpath := string(filer2.NewFullPath(dir, name)) - item := wfs.listDirectoryEntriesCache.Get(fullpath) - if item != nil && !item.Expired() { - entry = item.Value().(*filer_pb.Entry) + fullpath := filer2.NewFullPath(dir, name) + entry = wfs.cacheGet(fullpath) + if entry != nil { return } - // glog.V(3).Infof("read entry cache miss %s", fullpath) + glog.V(3).Infof("read entry cache miss %s", fullpath) err = wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { @@ -126,16 +126,16 @@ func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *fi resp, err := client.LookupDirectoryEntry(ctx, request) if err != nil || resp == nil || resp.Entry == nil { - if err == filer2.ErrNotFound { + if err == filer2.ErrNotFound || strings.Contains(err.Error(), filer2.ErrNotFound.Error()) { glog.V(3).Infof("file attr read not found file %v: %v", request, err) return fuse.ENOENT } glog.V(3).Infof("file attr read file %v: %v", request, err) - return fuse.ENOENT + return fuse.EIO } entry = resp.Entry - wfs.listDirectoryEntriesCache.Set(fullpath, entry, wfs.option.EntryCacheTtl) + wfs.cacheSet(fullpath, entry, wfs.option.EntryCacheTtl) return nil }) diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index abd0b66eb..17f814302 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -335,8 +335,10 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F return nil, err } + fullpath := filer2.FullPath(fullFilePath) + var fi FileInfo - entry, err := filer2.GetEntry(ctx, fs, fullFilePath) + entry, err := filer2.GetEntry(ctx, fs, fullpath) if entry == nil { return nil, os.ErrNotExist } @@ -344,14 +346,12 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F return nil, err } fi.size = int64(filer2.TotalSize(entry.GetChunks())) - fi.name = fullFilePath + fi.name = string(fullpath) fi.mode = os.FileMode(entry.Attributes.FileMode) fi.modifiledTime = time.Unix(entry.Attributes.Mtime, 0) fi.isDirectory = entry.IsDirectory - _, fi.name = path.Split(path.Clean(fi.name)) - if fi.name == "" { - fi.name = "/" + if fi.name == "/" { fi.modifiledTime = time.Now() fi.isDirectory = true } @@ -372,7 +372,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { var err error ctx := context.Background() if f.entry == nil { - f.entry, err = filer2.GetEntry(ctx, f.fs, f.name) + f.entry, err = filer2.GetEntry(ctx, f.fs, filer2.FullPath(f.name)) } if f.entry == nil { @@ -470,7 +470,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { ctx := context.Background() if f.entry == nil { - f.entry, err = filer2.GetEntry(ctx, f.fs, f.name) + f.entry, err = filer2.GetEntry(ctx, f.fs, filer2.FullPath(f.name)) } if f.entry == nil { return 0, err @@ -486,7 +486,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { } chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, f.off, len(p)) - totalRead, err := filer2.ReadIntoBuffer(ctx, f.fs, f.name, p, chunkViews, f.off) + totalRead, err := filer2.ReadIntoBuffer(ctx, f.fs, filer2.FullPath(f.name), p, chunkViews, f.off) if err != nil { return 0, err } @@ -507,12 +507,9 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count) ctx := context.Background() - dir := f.name - if dir != "/" && strings.HasSuffix(dir, "/") { - dir = dir[:len(dir)-1] - } + dir, _ := filer2.FullPath(f.name).DirAndName() - err = filer2.ReadDirAllEntries(ctx, f.fs, dir, "", func(entry *filer_pb.Entry, isLast bool) { + err = filer2.ReadDirAllEntries(ctx, f.fs, filer2.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) { fi := FileInfo{ size: int64(filer2.TotalSize(entry.GetChunks())), name: entry.Name, diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 1d7d79686..2c46350b2 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -59,7 +59,7 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer func duTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string) (blockCount uint64, byteCount uint64, err error) { - err = filer2.ReadDirAllEntries(ctx, filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { + err = filer2.ReadDirAllEntries(ctx, filerClient, filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { if entry.IsDirectory { subDir := fmt.Sprintf("%s/%s", dir, entry.Name) if dir == "/" { diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 01842083b..0c63f71fa 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -69,7 +69,7 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer dir, name := filer2.FullPath(path).DirAndName() entryCount := 0 - err = filer2.ReadDirAllEntries(ctx, commandEnv.getFilerClient(filerServer, filerPort), dir, name, func(entry *filer_pb.Entry, isLast bool) { + err = filer2.ReadDirAllEntries(ctx, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { if !showHidden && strings.HasPrefix(entry.Name, ".") { return diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index ed070350f..178c826d5 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -168,7 +168,7 @@ func processOneDirectory(ctx context.Context, writer io.Writer, filerClient file parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { - return filer2.ReadDirAllEntries(ctx, filerClient, string(parentPath), "", func(entry *filer_pb.Entry, isLast bool) { + return filer2.ReadDirAllEntries(ctx, filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) { fn(parentPath, entry) diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index a4524f341..8660030e3 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -39,7 +39,7 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ ctx := context.Background() - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name, newPrefix(), -1) + dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, newPrefix(), -1) if terr == nil { fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) @@ -49,7 +49,7 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ } -func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { +func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir filer2.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { prefix.addMarker(level) @@ -64,10 +64,7 @@ func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient fi if entry.IsDirectory { directoryCount++ - subDir := fmt.Sprintf("%s/%s", dir, entry.Name) - if dir == "/" { - subDir = "/" + entry.Name - } + subDir := dir.Child(entry.Name) dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, filerClient, subDir, "", prefix, level+1) directoryCount += dirCount fileCount += fCount From 630f72f8c577fba9ca11fee7694e0748af03fadf Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 20 Jan 2020 00:00:08 -0800 Subject: [PATCH 0042/2432] mount: able to trim a file --- weed/filesys/file.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 7e562eabc..622ba6f57 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -102,9 +102,18 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f if req.Valid.Size() { glog.V(3).Infof("%v file setattr set size=%v", file.fullpath(), req.Size) - if req.Size == 0 { + if req.Size < filer2.TotalSize(file.entry.Chunks) { // fmt.Printf("truncate %v \n", fullPath) - file.entry.Chunks = nil + var chunks []*filer_pb.FileChunk + for _, chunk := range file.entry.Chunks { + if uint64(chunk.Offset)+chunk.Size > req.Size { + chunk.Size = req.Size - uint64(chunk.Offset) + } + if chunk.Size > 0 { + chunks = append(chunks, chunk) + } + } + file.entry.Chunks = chunks file.entryViewCache = nil } file.entry.Attributes.FileSize = req.Size From a990ef2106a2571d0e2578eecdd856ee74986944 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 20 Jan 2020 20:21:01 -0800 Subject: [PATCH 0043/2432] mount: fix problems found in issue 1182 fix https://github.com/chrislusf/seaweedfs/issues/1182 always use the non-duplicated fs.Node Forget() the fs.Node Rename will also use the right fs.Node Avoid using the same file handle for the same file --- weed/filesys/dir.go | 45 +++++++++++++++++-------- weed/filesys/dir_rename.go | 28 +++++++++++++--- weed/filesys/dirty_page.go | 8 +++-- weed/filesys/file.go | 11 +++++- weed/filesys/wfs.go | 69 +++++++++++++++++++++++++------------- 5 files changed, 117 insertions(+), 44 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 076252051..7ad141ea5 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -3,7 +3,6 @@ package filesys import ( "context" "os" - "path" "time" "github.com/chrislusf/seaweedfs/weed/filer2" @@ -31,6 +30,7 @@ var _ = fs.NodeGetxattrer(&Dir{}) var _ = fs.NodeSetxattrer(&Dir{}) var _ = fs.NodeRemovexattrer(&Dir{}) var _ = fs.NodeListxattrer(&Dir{}) +var _ = fs.NodeForgetter(&Dir{}) func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { @@ -86,14 +86,22 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { attr.BlockSize = 1024 * 1024 } -func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File { - return &File{ - Name: name, - dir: dir, - wfs: dir.wfs, - entry: entry, - entryViewCache: nil, - } +func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node { + return dir.wfs.getNode(filer2.NewFullPath(dir.Path, name), func() fs.Node { + return &File{ + Name: name, + dir: dir, + wfs: dir.wfs, + entry: entry, + entryViewCache: nil, + } + }) +} + +func (dir *Dir) newDirectory(fullpath filer2.FullPath, entry *filer_pb.Entry) fs.Node { + return dir.wfs.getNode(fullpath, func() fs.Node { + return &Dir{Path: string(fullpath), wfs: dir.wfs, entry: entry} + }) } func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, @@ -130,7 +138,8 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, } } - file := dir.newFile(req.Name, request.Entry) + node := dir.newFile(req.Name, request.Entry) + file := node.(*File) if !request.Entry.IsDirectory { file.isOpen = true } @@ -169,7 +178,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err }) if err == nil { - node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs} + node := dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), nil) return node, nil } @@ -197,12 +206,12 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. if entry != nil { if entry.IsDirectory { - node = &Dir{Path: string(fullFilePath), wfs: dir.wfs, entry: entry} + node = dir.newDirectory(fullFilePath, entry) } else { node = dir.newFile(req.Name, entry) } - resp.EntryValid = time.Second + // resp.EntryValid = time.Second resp.Attr.Inode = fullFilePath.AsInode() resp.Attr.Valid = time.Second resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) @@ -234,6 +243,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_File} ret = append(ret, dirent) } + glog.V(4).Infof("dir ReadDirAll : %s %+v", fullpath, entry) dir.wfs.cacheSet(fullpath, entry, cacheTtl) }) if readErr != nil { @@ -312,11 +322,12 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { + glog.V(3).Infof("%v dir setattr %+v", dir.Path, req) + if err := dir.maybeLoadEntry(ctx); err != nil { return err } - glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle) if req.Valid.Mode() { dir.entry.Attributes.FileMode = uint32(req.Mode) } @@ -391,6 +402,12 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp } +func (dir *Dir) Forget() { + glog.V(3).Infof("Forget dir %s/%s", dir.Path) + + dir.wfs.forgetNode(filer2.FullPath(dir.Path)) +} + func (dir *Dir) maybeLoadEntry(ctx context.Context) error { if dir.entry == nil { parentDirPath, name := filer2.FullPath(dir.Path).DirAndName() diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index 6b68e4ee9..1bd1a6470 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -15,10 +15,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector newDir := newDirectory.(*Dir) glog.V(4).Infof("dir Rename %s/%s => %s/%s", dir.Path, req.OldName, newDir.Path, req.NewName) - dir.wfs.cacheDelete(filer2.NewFullPath(newDir.Path, req.NewName)) - dir.wfs.cacheDelete(filer2.NewFullPath(dir.Path, req.OldName)) - - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: dir.Path, @@ -36,4 +33,27 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector return nil }) + + if err == nil { + oldPath := filer2.NewFullPath(dir.Path, req.OldName) + dir.wfs.cacheDelete(filer2.NewFullPath(newDir.Path, req.NewName)) + dir.wfs.cacheDelete(oldPath) + + oldFileNode := dir.wfs.getNode(oldPath, func() fs.Node { + return nil + }) + newDirNode := dir.wfs.getNode(filer2.FullPath(dir.Path), func() fs.Node { + return nil + }) + if oldFileNode != nil { + oldFile := oldFileNode.(*File) + oldFile.Name = req.NewName + if newDirNode != nil { + oldFile.dir = newDirNode.(*Dir) + } + } + dir.wfs.forgetNode(oldPath) + } + + return err } diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 35d8f249a..f83944678 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -65,7 +65,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da // or buffer is full if adding new data, // flush current buffer and add new data - // println("offset", offset, "size", len(data), "existing offset", pages.Offset, "size", pages.Size) + glog.V(4).Infof("offset=%d, size=%d, existing pages offset=%d, pages size=%d, data=%d", offset, len(data), pages.Offset, pages.Size, len(pages.Data)) if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { if chunk != nil { @@ -77,6 +77,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da return } pages.Offset = offset + glog.V(4).Infof("copy data0: offset=%d, size=%d, existing pages offset=%d, pages size=%d, data=%d", offset, len(data), pages.Offset, pages.Size, len(pages.Data)) copy(pages.Data, data) pages.Size = int64(len(data)) return @@ -86,7 +87,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da // when this happens, debug shows the data overlapping with existing data is empty // the data is not just append if offset == pages.Offset && int(pages.Size) < len(data) { - // glog.V(2).Infof("pages[%d,%d) pages.Data len=%v, data len=%d, pages.Size=%d", pages.Offset, pages.Offset+pages.Size, len(pages.Data), len(data), pages.Size) + glog.V(4).Infof("copy data1: offset=%d, size=%d, existing pages offset=%d, pages size=%d, data=%d", offset, len(data), pages.Offset, pages.Size, len(pages.Data)) copy(pages.Data[pages.Size:], data[pages.Size:]) } else { if pages.Size != 0 { @@ -95,6 +96,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da return pages.flushAndSave(ctx, offset, data) } } else { + glog.V(4).Infof("copy data2: offset=%d, size=%d, existing pages offset=%d, pages size=%d, data=%d", offset, len(data), pages.Offset, pages.Size, len(pages.Data)) copy(pages.Data[offset-pages.Offset:], data) } @@ -159,6 +161,8 @@ func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Contex return nil, nil } + glog.V(0).Infof("%s/%s saveExistingPagesToStorage [%d,%d): Data len=%d", pages.f.dir.Path, pages.f.Name, pages.Offset, pages.Size, len(pages.Data)) + return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset) } diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 622ba6f57..d811cb179 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -23,6 +23,7 @@ var _ = fs.NodeGetxattrer(&File{}) var _ = fs.NodeSetxattrer(&File{}) var _ = fs.NodeRemovexattrer(&File{}) var _ = fs.NodeListxattrer(&File{}) +var _ = fs.NodeForgetter(&File{}) type File struct { Name string @@ -94,11 +95,12 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { + glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) + if err := file.maybeLoadEntry(ctx); err != nil { return err } - glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) if req.Valid.Size() { glog.V(3).Infof("%v file setattr set size=%v", file.fullpath(), req.Size) @@ -208,6 +210,13 @@ func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { return nil } +func (file *File) Forget() { + glog.V(3).Infof("Forget file %s/%s", file.dir.Path, file.Name) + + file.wfs.forgetNode(filer2.NewFullPath(file.dir.Path, file.Name)) + +} + func (file *File) maybeLoadEntry(ctx context.Context) error { if file.entry == nil || !file.isOpen { entry, err := file.wfs.maybeLoadEntry(ctx, file.dir.Path, file.Name) diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index d3cc6329d..a2e5a9073 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -46,13 +46,18 @@ type WFS struct { option *Option listDirectoryEntriesCache *ccache.Cache - // contains all open handles - handles []*FileHandle - pathToHandleIndex map[filer2.FullPath]int - pathToHandleLock sync.Mutex - bufPool sync.Pool + // contains all open handles, protected by handlesLock + handlesLock sync.Mutex + handles []*FileHandle + + bufPool sync.Pool stats statsCache + + // nodes, protected by nodesLock + nodesLock sync.Mutex + nodes map[uint64]fs.Node + root fs.Node } type statsCache struct { filer_pb.StatisticsResponse @@ -63,19 +68,21 @@ func NewSeaweedFileSystem(option *Option) *WFS { wfs := &WFS{ option: option, listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)), - pathToHandleIndex: make(map[filer2.FullPath]int), bufPool: sync.Pool{ New: func() interface{} { return make([]byte, option.ChunkSizeLimit) }, }, + nodes: make(map[uint64]fs.Node), } + wfs.root = &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs} + return wfs } func (wfs *WFS) Root() (fs.Node, error) { - return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil + return wfs.root, nil } func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { @@ -88,42 +95,35 @@ func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFil } func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() fullpath := file.fullpath() + glog.V(4).Infof("%s AcquireHandle uid=%d gid=%d", fullpath, uid, gid) - index, found := wfs.pathToHandleIndex[fullpath] - if found && wfs.handles[index] != nil { - glog.V(2).Infoln(fullpath, "found fileHandle id", index) - return wfs.handles[index] - } + wfs.handlesLock.Lock() + defer wfs.handlesLock.Unlock() fileHandle = newFileHandle(file, uid, gid) for i, h := range wfs.handles { if h == nil { wfs.handles[i] = fileHandle fileHandle.handle = uint64(i) - wfs.pathToHandleIndex[fullpath] = i - glog.V(4).Infoln(fullpath, "reuse fileHandle id", fileHandle.handle) + glog.V(4).Infof( "%s reuse fh %d", fullpath,fileHandle.handle) return } } wfs.handles = append(wfs.handles, fileHandle) fileHandle.handle = uint64(len(wfs.handles) - 1) - glog.V(2).Infoln(fullpath, "new fileHandle id", fileHandle.handle) - wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle) + glog.V(4).Infof( "%s new fh %d", fullpath,fileHandle.handle) return } func (wfs *WFS) ReleaseHandle(fullpath filer2.FullPath, handleId fuse.HandleID) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() + wfs.handlesLock.Lock() + defer wfs.handlesLock.Unlock() - glog.V(4).Infof("%s releasing handle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) - delete(wfs.pathToHandleIndex, fullpath) + glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) if int(handleId) < len(wfs.handles) { wfs.handles[int(handleId)] = nil } @@ -203,10 +203,33 @@ func (wfs *WFS) cacheGet(path filer2.FullPath) *filer_pb.Entry { func (wfs *WFS) cacheSet(path filer2.FullPath, entry *filer_pb.Entry, ttl time.Duration) { if entry == nil { wfs.listDirectoryEntriesCache.Delete(string(path)) - }else{ + } else { wfs.listDirectoryEntriesCache.Set(string(path), entry, ttl) } } func (wfs *WFS) cacheDelete(path filer2.FullPath) { wfs.listDirectoryEntriesCache.Delete(string(path)) } + +func (wfs *WFS) getNode(fullpath filer2.FullPath, fn func() fs.Node) fs.Node { + wfs.nodesLock.Lock() + defer wfs.nodesLock.Unlock() + + node, found := wfs.nodes[fullpath.AsInode()] + if found { + return node + } + node = fn() + if node != nil { + wfs.nodes[fullpath.AsInode()] = node + } + return node +} + +func (wfs *WFS) forgetNode(fullpath filer2.FullPath) { + wfs.nodesLock.Lock() + defer wfs.nodesLock.Unlock() + + delete(wfs.nodes, fullpath.AsInode()) + +} From fca6152c3a4a55248fe37a2e06198d27422bef5f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 20 Jan 2020 20:24:23 -0800 Subject: [PATCH 0044/2432] fix print format --- weed/filesys/dir.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 7ad141ea5..7ed638e0e 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -403,7 +403,7 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp } func (dir *Dir) Forget() { - glog.V(3).Infof("Forget dir %s/%s", dir.Path) + glog.V(3).Infof("Forget dir %s", dir.Path) dir.wfs.forgetNode(filer2.FullPath(dir.Path)) } From bb1be616023ebfa4307b484ddcda3dc2720ce14c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 20 Jan 2020 20:30:03 -0800 Subject: [PATCH 0045/2432] 1.51 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 99cd29d87..d06cfb8b8 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 50) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 51) ) From c8b2dac6c1d29bab2f96c82ad30b7045b61ab8e6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 21 Jan 2020 21:18:01 -0800 Subject: [PATCH 0046/2432] volume: avoid sharing volume dat file handle possibly help on https://github.com/chrislusf/seaweedfs/issues/1184 --- weed/storage/volume_vacuum.go | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 09bf36f7a..0ca9016c8 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -72,7 +72,7 @@ func (v *Volume) Compact2(preallocate int64) error { v.lastCompactIndexOffset = v.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactionRevision glog.V(3).Infof("creating copies for volume %d ...", v.Id) - return v.copyDataBasedOnIndexFile(filePath+".cpd", filePath+".cpx", preallocate) + return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate) } func (v *Volume) CommitCompact() error { @@ -353,26 +353,31 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca return } -func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string, preallocate int64) (err error) { +func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64) (err error) { var ( - dstDatBackend backend.BackendStorageFile + srcDatBackend, dstDatBackend backend.BackendStorageFile + dataFile *os.File ) - if dstDatBackend, err = createVolumeFile(dstName, preallocate, 0); err != nil { + if dstDatBackend, err = createVolumeFile(dstDatName, preallocate, 0); err != nil { return } defer dstDatBackend.Close() oldNm := needle_map.NewMemDb() newNm := needle_map.NewMemDb() - if err = oldNm.LoadFromIdx(v.FileName()+".idx"); err != nil { + if err = oldNm.LoadFromIdx(srcIdxName); err != nil { return } + if dataFile, err = os.Open(srcDatName); err != nil { + return err + } + srcDatBackend = backend.NewDiskFile(dataFile) now := uint64(time.Now().Unix()) - v.SuperBlock.CompactionRevision++ - dstDatBackend.WriteAt(v.SuperBlock.Bytes(), 0) - newOffset := int64(v.SuperBlock.BlockSize()) + sb.CompactionRevision++ + dstDatBackend.WriteAt(sb.Bytes(), 0) + newOffset := int64(sb.BlockSize()) oldNm.AscendingVisit(func(value needle_map.NeedleValue) error { @@ -383,28 +388,28 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string, preallocate i } n := new(needle.Needle) - err := n.ReadData(v.DataBackend, offset.ToAcutalOffset(), size, v.Version()) + err := n.ReadData(srcDatBackend, offset.ToAcutalOffset(), size, version) if err != nil { return nil } - if n.HasTtl() && now >= n.LastModified+uint64(v.Ttl.Minutes()*60) { + if n.HasTtl() && now >= n.LastModified+uint64(sb.Ttl.Minutes()*60) { return nil } if err = newNm.Set(n.Id, ToOffset(newOffset), n.Size); err != nil { return fmt.Errorf("cannot put needle: %s", err) } - if _, _, _, err = n.Append(dstDatBackend, v.Version()); err != nil { + if _, _, _, err = n.Append(dstDatBackend, sb.Version); err != nil { return fmt.Errorf("cannot append needle: %s", err) } - newOffset += n.DiskSize(v.Version()) + newOffset += n.DiskSize(version) glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) return nil }) - newNm.SaveToIdx(idxName) + newNm.SaveToIdx(datIdxName) return } From 9b01a99d9a93b2502746fe6870945823be48cc5f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 21 Jan 2020 22:45:50 -0800 Subject: [PATCH 0047/2432] adjust logging --- weed/server/common.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/weed/server/common.go b/weed/server/common.go index 6828e9dc5..31a9a73b8 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -77,7 +77,8 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter // wrapper for writeJson - just logs errors func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) { if err := writeJson(w, r, httpStatus, obj); err != nil { - glog.V(0).Infof("error writing JSON %+v status %d: %v", obj, httpStatus, err) + glog.V(0).Infof("error writing JSON status %d: %v", httpStatus, err) + glog.V(1).Infof("JSON content: %+v", obj) } } func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err error) { From d4e75a0d183b57180b2ff0be2531db540c0c9aa6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 22 Jan 2020 11:42:40 -0800 Subject: [PATCH 0048/2432] filer: option to create file only if it is new, O_EXCL --- other/java/client/src/main/proto/filer.proto | 1 + weed/filer2/filer.go | 5 +- weed/filer2/leveldb/leveldb_store_test.go | 2 +- weed/filer2/leveldb2/leveldb2_store_test.go | 2 +- weed/filesys/dir.go | 30 +-- weed/pb/filer.proto | 1 + weed/pb/filer_pb/filer.pb.go | 213 +++++++++--------- weed/server/filer_grpc_server.go | 2 +- weed/server/filer_grpc_server_rename.go | 2 +- weed/server/filer_server_handlers_write.go | 2 +- .../filer_server_handlers_write_autochunk.go | 2 +- 11 files changed, 140 insertions(+), 122 deletions(-) diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index ef847cbe7..41c1650d4 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -123,6 +123,7 @@ message FuseAttributes { message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; } message CreateEntryResponse { diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index b724e20fd..96a010fce 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -72,7 +72,7 @@ func (f *Filer) RollbackTransaction(ctx context.Context) error { return f.store.RollbackTransaction(ctx) } -func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { +func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) error { if string(entry.FullPath) == "/" { return nil @@ -160,6 +160,9 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) } } else { + if o_excl { + return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) + } if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { glog.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err) diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 904de8c97..983e1cbe9 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -30,7 +30,7 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(ctx, entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go index e28ef7dac..58637b7b6 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -30,7 +30,7 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(ctx, entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 7ed638e0e..3c1672911 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -3,6 +3,7 @@ package filesys import ( "context" "os" + "strings" "time" "github.com/chrislusf/seaweedfs/weed/filer2" @@ -123,26 +124,29 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, TtlSec: dir.wfs.option.TtlSec, }, }, + OExcl: req.Flags&fuse.OpenExclusive != 0, } - glog.V(1).Infof("create: %v", request) + glog.V(1).Infof("create: %v", req.String()) - if request.Entry.IsDirectory { - if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) - return fuse.EIO + if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if _, err := client.CreateEntry(ctx, request); err != nil { + glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) + if strings.Contains(err.Error(), "EEXIST") { + return fuse.EEXIST } - return nil - }); err != nil { - return nil, nil, err + return fuse.EIO } + return nil + }); err != nil { + return nil, nil, err } - node := dir.newFile(req.Name, request.Entry) - file := node.(*File) - if !request.Entry.IsDirectory { - file.isOpen = true + if request.Entry.IsDirectory { + return node, nil, nil } + + file := node.(*File) + file.isOpen = true fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) fh.dirtyMetadata = true return file, fh, nil diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index ef847cbe7..41c1650d4 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -123,6 +123,7 @@ message FuseAttributes { message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; } message CreateEntryResponse { diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index c8214aa94..043a46504 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -497,6 +497,7 @@ func (m *FuseAttributes) GetSymlinkTarget() string { type CreateEntryRequest struct { Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` + OExcl bool `protobuf:"varint,3,opt,name=o_excl,json=oExcl" json:"o_excl,omitempty"` } func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} } @@ -518,6 +519,13 @@ func (m *CreateEntryRequest) GetEntry() *Entry { return nil } +func (m *CreateEntryRequest) GetOExcl() bool { + if m != nil { + return m.OExcl + } + return false +} + type CreateEntryResponse struct { } @@ -1457,106 +1465,107 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1603 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcd, 0x6f, 0xdc, 0x44, - 0x14, 0x8f, 0xf7, 0xdb, 0x6f, 0x77, 0xdb, 0x64, 0x92, 0xb6, 0xdb, 0xcd, 0x07, 0xa9, 0x43, 0x4b, - 0x10, 0x55, 0xa8, 0x42, 0x0f, 0x2d, 0x85, 0x43, 0x9b, 0x0f, 0x14, 0x91, 0x7e, 0xc8, 0x69, 0x11, - 0x08, 0x09, 0xcb, 0xb1, 0x67, 0x37, 0x43, 0x6c, 0xcf, 0x32, 0x1e, 0x27, 0x29, 0x7f, 0x02, 0x47, - 0x8e, 0x48, 0x9c, 0xf9, 0x27, 0x10, 0x17, 0x84, 0xf8, 0x6f, 0x38, 0x72, 0x46, 0x33, 0x63, 0x7b, - 0xc7, 0xeb, 0x4d, 0xd2, 0x0a, 0xf5, 0xe6, 0x79, 0xdf, 0xef, 0xcd, 0x7b, 0xbf, 0x37, 0xbb, 0xd0, - 0x1e, 0x90, 0x00, 0xb3, 0x8d, 0x11, 0xa3, 0x9c, 0xa2, 0x96, 0x3c, 0x38, 0xa3, 0x43, 0xeb, 0x39, - 0x2c, 0xee, 0x53, 0x7a, 0x9c, 0x8c, 0xb6, 0x09, 0xc3, 0x1e, 0xa7, 0xec, 0xf5, 0x4e, 0xc4, 0xd9, - 0x6b, 0x1b, 0xff, 0x90, 0xe0, 0x98, 0xa3, 0x25, 0x30, 0xfd, 0x8c, 0xd1, 0x33, 0x56, 0x8d, 0x75, - 0xd3, 0x1e, 0x13, 0x10, 0x82, 0x5a, 0xe4, 0x86, 0xb8, 0x57, 0x91, 0x0c, 0xf9, 0x6d, 0xed, 0xc0, - 0xd2, 0x74, 0x83, 0xf1, 0x88, 0x46, 0x31, 0x46, 0xb7, 0xa1, 0x8e, 0x05, 0x41, 0x5a, 0x6b, 0x6f, - 0x5e, 0xdd, 0xc8, 0x42, 0xd9, 0x50, 0x72, 0x8a, 0x6b, 0xfd, 0x61, 0x00, 0xda, 0x27, 0x31, 0x17, - 0x44, 0x82, 0xe3, 0x37, 0x8b, 0xe7, 0x3a, 0x34, 0x46, 0x0c, 0x0f, 0xc8, 0x59, 0x1a, 0x51, 0x7a, - 0x42, 0x77, 0x61, 0x2e, 0xe6, 0x2e, 0xe3, 0xbb, 0x8c, 0x86, 0xbb, 0x24, 0xc0, 0xcf, 0x44, 0xd0, - 0x55, 0x29, 0x52, 0x66, 0xa0, 0x0d, 0x40, 0x24, 0xf2, 0x82, 0x24, 0x26, 0x27, 0xf8, 0x20, 0xe3, - 0xf6, 0x6a, 0xab, 0xc6, 0x7a, 0xcb, 0x9e, 0xc2, 0x41, 0x0b, 0x50, 0x0f, 0x48, 0x48, 0x78, 0xaf, - 0xbe, 0x6a, 0xac, 0x77, 0x6d, 0x75, 0xb0, 0x3e, 0x83, 0xf9, 0x42, 0xfc, 0x6f, 0x97, 0xfe, 0xaf, - 0x15, 0xa8, 0x4b, 0x42, 0x5e, 0x63, 0x63, 0x5c, 0x63, 0x74, 0x0b, 0x3a, 0x24, 0x76, 0xc6, 0x85, - 0xa8, 0xc8, 0xd8, 0xda, 0x24, 0xce, 0x6b, 0x8e, 0x3e, 0x82, 0x86, 0x77, 0x94, 0x44, 0xc7, 0x71, - 0xaf, 0xba, 0x5a, 0x5d, 0x6f, 0x6f, 0xce, 0x8f, 0x1d, 0x89, 0x44, 0xb7, 0x04, 0xcf, 0x4e, 0x45, - 0xd0, 0x03, 0x00, 0x97, 0x73, 0x46, 0x0e, 0x13, 0x8e, 0x63, 0x99, 0x69, 0x7b, 0xb3, 0xa7, 0x29, - 0x24, 0x31, 0x7e, 0x9c, 0xf3, 0x6d, 0x4d, 0x16, 0x3d, 0x84, 0x16, 0x3e, 0xe3, 0x38, 0xf2, 0xb1, - 0xdf, 0xab, 0x4b, 0x47, 0xcb, 0x13, 0x19, 0x6d, 0xec, 0xa4, 0x7c, 0x95, 0x5f, 0x2e, 0xde, 0x7f, - 0x04, 0xdd, 0x02, 0x0b, 0xcd, 0x42, 0xf5, 0x18, 0x67, 0xb7, 0x2a, 0x3e, 0x45, 0x65, 0x4f, 0xdc, - 0x20, 0x51, 0x0d, 0xd6, 0xb1, 0xd5, 0xe1, 0xd3, 0xca, 0x03, 0xc3, 0xda, 0x06, 0x73, 0x37, 0x09, - 0x82, 0x5c, 0xd1, 0x27, 0x2c, 0x53, 0xf4, 0x09, 0x1b, 0x57, 0xb9, 0x72, 0x61, 0x95, 0x7f, 0x37, - 0x60, 0x6e, 0xe7, 0x04, 0x47, 0xfc, 0x19, 0xe5, 0x64, 0x40, 0x3c, 0x97, 0x13, 0x1a, 0xa1, 0xbb, - 0x60, 0xd2, 0xc0, 0x77, 0x2e, 0xbc, 0xa6, 0x16, 0x0d, 0xd2, 0xa8, 0xef, 0x82, 0x19, 0xe1, 0x53, - 0xe7, 0x42, 0x77, 0xad, 0x08, 0x9f, 0x2a, 0xe9, 0x35, 0xe8, 0xfa, 0x38, 0xc0, 0x1c, 0x3b, 0xf9, - 0xed, 0x88, 0xab, 0xeb, 0x28, 0xe2, 0x96, 0xba, 0x8e, 0x3b, 0x70, 0x55, 0x98, 0x1c, 0xb9, 0x0c, - 0x47, 0xdc, 0x19, 0xb9, 0xfc, 0x48, 0xde, 0x89, 0x69, 0x77, 0x23, 0x7c, 0xfa, 0x42, 0x52, 0x5f, - 0xb8, 0xfc, 0xc8, 0xfa, 0xd7, 0x00, 0x33, 0xbf, 0x4c, 0x74, 0x03, 0x9a, 0xc2, 0xad, 0x43, 0xfc, - 0xb4, 0x12, 0x0d, 0x71, 0xdc, 0xf3, 0xc5, 0x54, 0xd0, 0xc1, 0x20, 0xc6, 0x5c, 0x86, 0x57, 0xb5, - 0xd3, 0x93, 0xe8, 0xac, 0x98, 0xfc, 0xa8, 0x06, 0xa1, 0x66, 0xcb, 0x6f, 0x51, 0xf1, 0x90, 0x93, - 0x10, 0x4b, 0x87, 0x55, 0x5b, 0x1d, 0xd0, 0x3c, 0xd4, 0xb1, 0xc3, 0xdd, 0xa1, 0xec, 0x70, 0xd3, - 0xae, 0xe1, 0x97, 0xee, 0x10, 0xbd, 0x0f, 0x57, 0x62, 0x9a, 0x30, 0x0f, 0x3b, 0x99, 0xdb, 0x86, - 0xe4, 0x76, 0x14, 0x75, 0x57, 0x39, 0xb7, 0xa0, 0x3a, 0x20, 0x7e, 0xaf, 0x29, 0x0b, 0x33, 0x5b, - 0x6c, 0xc2, 0x3d, 0xdf, 0x16, 0x4c, 0xf4, 0x31, 0x40, 0x6e, 0xc9, 0xef, 0xb5, 0xce, 0x11, 0x35, - 0x33, 0xbb, 0xbe, 0xf5, 0x35, 0x34, 0x52, 0xf3, 0x8b, 0x60, 0x9e, 0xd0, 0x20, 0x09, 0xf3, 0xb4, - 0xbb, 0x76, 0x4b, 0x11, 0xf6, 0x7c, 0x74, 0x13, 0x24, 0xce, 0x39, 0xa2, 0xab, 0x2a, 0x32, 0x49, - 0x59, 0xa1, 0x2f, 0xb1, 0x44, 0x0a, 0x8f, 0xd2, 0x63, 0xa2, 0xb2, 0x6f, 0xda, 0xe9, 0xc9, 0xfa, - 0xa7, 0x02, 0x57, 0x8a, 0xed, 0x2e, 0x5c, 0x48, 0x2b, 0xb2, 0x56, 0x86, 0x34, 0x23, 0xcd, 0x1e, - 0x14, 0xea, 0x55, 0xd1, 0xeb, 0x95, 0xa9, 0x84, 0xd4, 0x57, 0x0e, 0xba, 0x4a, 0xe5, 0x29, 0xf5, - 0xb1, 0xe8, 0xd6, 0x84, 0xf8, 0xb2, 0xc0, 0x5d, 0x5b, 0x7c, 0x0a, 0xca, 0x90, 0xf8, 0x29, 0x7c, - 0x88, 0x4f, 0x19, 0x1e, 0x93, 0x76, 0x1b, 0xea, 0xca, 0xd4, 0x49, 0x5c, 0x59, 0x28, 0xa8, 0x4d, - 0x75, 0x0f, 0xe2, 0x1b, 0xad, 0x42, 0x9b, 0xe1, 0x51, 0x90, 0x76, 0xaf, 0x2c, 0x9f, 0x69, 0xeb, - 0x24, 0xb4, 0x02, 0xe0, 0xd1, 0x20, 0xc0, 0x9e, 0x14, 0x30, 0xa5, 0x80, 0x46, 0x11, 0x9d, 0xc3, - 0x79, 0xe0, 0xc4, 0xd8, 0xeb, 0xc1, 0xaa, 0xb1, 0x5e, 0xb7, 0x1b, 0x9c, 0x07, 0x07, 0xd8, 0x13, - 0x79, 0x24, 0x31, 0x66, 0x8e, 0x04, 0xa0, 0xb6, 0xd4, 0x6b, 0x09, 0x82, 0x84, 0xc9, 0x65, 0x80, - 0x21, 0xa3, 0xc9, 0x48, 0x71, 0x3b, 0xab, 0x55, 0x81, 0xc5, 0x92, 0x22, 0xd9, 0xb7, 0xe1, 0x4a, - 0xfc, 0x3a, 0x0c, 0x48, 0x74, 0xec, 0x70, 0x97, 0x0d, 0x31, 0xef, 0x75, 0x55, 0x0f, 0xa7, 0xd4, - 0x97, 0x92, 0x68, 0x7d, 0x03, 0x68, 0x8b, 0x61, 0x97, 0xe3, 0xb7, 0x58, 0x3b, 0x6f, 0x38, 0xdd, - 0xd7, 0x60, 0xbe, 0x60, 0x5a, 0x21, 0xb0, 0xf0, 0xf8, 0x6a, 0xe4, 0xbf, 0x2b, 0x8f, 0x05, 0xd3, - 0xa9, 0xc7, 0xbf, 0x0c, 0x40, 0xdb, 0x72, 0xc0, 0xff, 0xdf, 0x6e, 0x15, 0x23, 0x27, 0x70, 0x5f, - 0x01, 0x88, 0xef, 0x72, 0x37, 0xdd, 0x4a, 0x1d, 0x12, 0x2b, 0xfb, 0xdb, 0x2e, 0x77, 0xd3, 0xed, - 0xc0, 0xb0, 0x97, 0x30, 0xb1, 0xa8, 0x64, 0x5f, 0xc9, 0xed, 0x60, 0x67, 0x24, 0x74, 0x1f, 0xae, - 0x93, 0x61, 0x44, 0x19, 0x1e, 0x8b, 0x39, 0x98, 0x31, 0xca, 0x64, 0xbf, 0xb5, 0xec, 0x05, 0xc5, - 0xcd, 0x15, 0x76, 0x04, 0x4f, 0xa4, 0x57, 0x48, 0x23, 0x4d, 0xef, 0x17, 0x03, 0x7a, 0x8f, 0x39, - 0x0d, 0x89, 0x67, 0x63, 0x11, 0x66, 0x21, 0xc9, 0x35, 0xe8, 0x0a, 0x30, 0x9d, 0x4c, 0xb4, 0x43, - 0x03, 0x7f, 0xbc, 0xac, 0x6e, 0x82, 0xc0, 0x53, 0x47, 0xcb, 0xb7, 0x49, 0x03, 0x5f, 0xb6, 0xd1, - 0x1a, 0x08, 0xd0, 0xd3, 0xf4, 0xd5, 0xda, 0xee, 0x44, 0xf8, 0xb4, 0xa0, 0x2f, 0x84, 0xa4, 0xbe, - 0x42, 0xca, 0x66, 0x84, 0x4f, 0x85, 0xbe, 0xb5, 0x08, 0x37, 0xa7, 0xc4, 0x96, 0x46, 0xfe, 0x9b, - 0x01, 0xf3, 0x8f, 0xe3, 0x98, 0x0c, 0xa3, 0xaf, 0x24, 0x66, 0x64, 0x41, 0x2f, 0x40, 0xdd, 0xa3, - 0x49, 0xc4, 0x65, 0xb0, 0x75, 0x5b, 0x1d, 0x26, 0xc6, 0xa8, 0x52, 0x1a, 0xa3, 0x89, 0x41, 0xac, - 0x96, 0x07, 0x51, 0x1b, 0xb4, 0x5a, 0x61, 0xd0, 0xde, 0x83, 0xb6, 0xb8, 0x4e, 0xc7, 0xc3, 0x11, - 0xc7, 0x2c, 0x85, 0x59, 0x10, 0xa4, 0x2d, 0x49, 0xb1, 0x7e, 0x32, 0x60, 0xa1, 0x18, 0x69, 0xfa, - 0x9e, 0x38, 0x17, 0xf5, 0x05, 0xcc, 0xb0, 0x20, 0x0d, 0x53, 0x7c, 0x8a, 0x81, 0x1d, 0x25, 0x87, - 0x01, 0xf1, 0x1c, 0xc1, 0x50, 0xe1, 0x99, 0x8a, 0xf2, 0x8a, 0x05, 0xe3, 0xa4, 0x6b, 0x7a, 0xd2, - 0x08, 0x6a, 0x6e, 0xc2, 0x8f, 0x32, 0xe4, 0x17, 0xdf, 0xd6, 0x7d, 0x98, 0x57, 0x4f, 0xbc, 0x62, - 0xd5, 0x96, 0x01, 0x72, 0x2c, 0x8e, 0x7b, 0x86, 0x02, 0x84, 0x0c, 0x8c, 0x63, 0xeb, 0x73, 0x30, - 0xf7, 0xa9, 0x2a, 0x44, 0x8c, 0xee, 0x81, 0x19, 0x64, 0x07, 0x29, 0xda, 0xde, 0x44, 0xe3, 0xa1, - 0xca, 0xe4, 0xec, 0xb1, 0x90, 0xf5, 0x08, 0x5a, 0x19, 0x39, 0xcb, 0xcd, 0x38, 0x2f, 0xb7, 0xca, - 0x44, 0x6e, 0xd6, 0x9f, 0x06, 0x2c, 0x14, 0x43, 0x4e, 0xcb, 0xf7, 0x0a, 0xba, 0xb9, 0x0b, 0x27, - 0x74, 0x47, 0x69, 0x2c, 0xf7, 0xf4, 0x58, 0xca, 0x6a, 0x79, 0x80, 0xf1, 0x53, 0x77, 0xa4, 0x5a, - 0xaa, 0x13, 0x68, 0xa4, 0xfe, 0x4b, 0x98, 0x2b, 0x89, 0x4c, 0x79, 0xdf, 0x7c, 0xa8, 0xbf, 0x6f, - 0x0a, 0x6f, 0xb4, 0x5c, 0x5b, 0x7f, 0xf4, 0x3c, 0x84, 0x1b, 0x6a, 0xfe, 0xb6, 0xf2, 0xa6, 0xcb, - 0x6a, 0x5f, 0xec, 0x4d, 0x63, 0xb2, 0x37, 0xad, 0x3e, 0xf4, 0xca, 0xaa, 0xe9, 0x14, 0x0c, 0x61, - 0xee, 0x80, 0xbb, 0x9c, 0xc4, 0x9c, 0x78, 0xf9, 0x43, 0x7b, 0xa2, 0x99, 0x8d, 0xcb, 0xb6, 0x4a, - 0x79, 0x1c, 0x66, 0xa1, 0xca, 0x79, 0xd6, 0x67, 0xe2, 0x53, 0xdc, 0x02, 0xd2, 0x3d, 0xa5, 0x77, - 0xf0, 0x0e, 0x5c, 0x89, 0x7e, 0xe0, 0x94, 0xbb, 0x81, 0xda, 0xda, 0x35, 0xb9, 0xb5, 0x4d, 0x49, - 0x91, 0x6b, 0x5b, 0x2d, 0x36, 0x5f, 0x71, 0xeb, 0x6a, 0xa7, 0x0b, 0x82, 0x64, 0x2e, 0x03, 0xc8, - 0x91, 0x52, 0xd3, 0xd0, 0x50, 0xba, 0x82, 0xb2, 0x25, 0x08, 0xd6, 0x0a, 0x2c, 0x7d, 0x81, 0xb9, - 0x78, 0x7f, 0xb0, 0x2d, 0x1a, 0x0d, 0xc8, 0x30, 0x61, 0xae, 0x76, 0x15, 0xd6, 0xcf, 0x06, 0x2c, - 0x9f, 0x23, 0x90, 0x26, 0xdc, 0x83, 0x66, 0xe8, 0xc6, 0x1c, 0xb3, 0x6c, 0x4a, 0xb2, 0xe3, 0x64, - 0x29, 0x2a, 0x97, 0x95, 0xa2, 0x5a, 0x2a, 0xc5, 0x35, 0x68, 0x84, 0xee, 0x99, 0x13, 0x1e, 0xa6, - 0x0f, 0x8c, 0x7a, 0xe8, 0x9e, 0x3d, 0x3d, 0xdc, 0xfc, 0xbb, 0x09, 0x9d, 0x03, 0xec, 0x9e, 0x62, - 0xec, 0xcb, 0xc0, 0xd0, 0x30, 0x1b, 0x88, 0xe2, 0xcf, 0x34, 0x74, 0x7b, 0xb2, 0xf3, 0xa7, 0xfe, - 0x2e, 0xec, 0xdf, 0xb9, 0x4c, 0x2c, 0xed, 0xad, 0x19, 0xf4, 0x0c, 0xda, 0xda, 0xef, 0x20, 0xb4, - 0xa4, 0x29, 0x96, 0x7e, 0xde, 0xf5, 0x97, 0xcf, 0xe1, 0x66, 0xd6, 0xee, 0x19, 0x68, 0x1f, 0xda, - 0xda, 0x56, 0xd7, 0xed, 0x95, 0xdf, 0x11, 0xba, 0xbd, 0x69, 0x4f, 0x81, 0x19, 0x61, 0x4d, 0xdb, - 0xd8, 0xba, 0xb5, 0xf2, 0x1b, 0x41, 0xb7, 0x36, 0x6d, 0xcd, 0x4b, 0x6b, 0xda, 0x82, 0xd4, 0xad, - 0x95, 0xd7, 0xbf, 0x6e, 0x6d, 0xda, 0x56, 0x9d, 0x41, 0xdf, 0xc1, 0x5c, 0x69, 0x75, 0x21, 0x6b, - 0xac, 0x75, 0xde, 0xce, 0xed, 0xaf, 0x5d, 0x28, 0x93, 0xdb, 0x7f, 0x0e, 0x1d, 0x7d, 0xa5, 0x20, - 0x2d, 0xa0, 0x29, 0x4b, 0xb1, 0xbf, 0x72, 0x1e, 0x5b, 0x37, 0xa8, 0xa3, 0xa5, 0x6e, 0x70, 0xca, - 0xbe, 0xd0, 0x0d, 0x4e, 0x03, 0x59, 0x6b, 0x06, 0x7d, 0x0b, 0xb3, 0x93, 0xa8, 0x85, 0x6e, 0x4d, - 0x96, 0xad, 0x04, 0x86, 0x7d, 0xeb, 0x22, 0x91, 0xdc, 0xf8, 0x1e, 0xc0, 0x18, 0x8c, 0xd0, 0xe2, - 0x58, 0xa7, 0x04, 0x86, 0xfd, 0xa5, 0xe9, 0xcc, 0xdc, 0xd4, 0xf7, 0x70, 0x6d, 0xea, 0xc4, 0x23, - 0x6d, 0x4c, 0x2e, 0xc2, 0x8c, 0xfe, 0x07, 0x97, 0xca, 0x65, 0xbe, 0x9e, 0xac, 0xc0, 0x6c, 0xac, - 0x06, 0x79, 0x10, 0x6f, 0x78, 0x01, 0xc1, 0x11, 0x7f, 0x02, 0x52, 0xe3, 0x05, 0xa3, 0x9c, 0x1e, - 0x36, 0xe4, 0x3f, 0x3c, 0x9f, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0x14, 0x43, 0x9d, 0xb9, 0xf0, - 0x11, 0x00, 0x00, + // 1624 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x4b, 0x6f, 0xdb, 0xc6, + 0x16, 0x36, 0xf5, 0xe6, 0x91, 0x94, 0xd8, 0x63, 0x3b, 0x51, 0xe4, 0xc7, 0x75, 0xe8, 0x9b, 0x5c, + 0x5f, 0x34, 0x70, 0x03, 0x37, 0x8b, 0xa4, 0x69, 0x17, 0x89, 0x1f, 0x85, 0x51, 0xe7, 0x01, 0x3a, + 0x29, 0x5a, 0x14, 0x28, 0x41, 0x93, 0x23, 0x79, 0x6a, 0x92, 0xa3, 0x0e, 0x87, 0xb6, 0xd3, 0x9f, + 0xd0, 0x65, 0x97, 0x05, 0xba, 0xee, 0x9f, 0x28, 0xba, 0x29, 0x8a, 0xfe, 0x9b, 0x2e, 0xbb, 0x2e, + 0x66, 0x86, 0xa4, 0x86, 0xa2, 0x6c, 0x27, 0x28, 0xb2, 0xe3, 0x9c, 0xf7, 0x39, 0x73, 0xce, 0xf9, + 0x46, 0x82, 0xf6, 0x80, 0x04, 0x98, 0x6d, 0x8e, 0x18, 0xe5, 0x14, 0xb5, 0xe4, 0xc1, 0x19, 0x1d, + 0x59, 0x2f, 0x60, 0xe9, 0x80, 0xd2, 0x93, 0x64, 0xb4, 0x43, 0x18, 0xf6, 0x38, 0x65, 0x6f, 0x76, + 0x23, 0xce, 0xde, 0xd8, 0xf8, 0xbb, 0x04, 0xc7, 0x1c, 0x2d, 0x83, 0xe9, 0x67, 0x8c, 0x9e, 0xb1, + 0x66, 0x6c, 0x98, 0xf6, 0x98, 0x80, 0x10, 0xd4, 0x22, 0x37, 0xc4, 0xbd, 0x8a, 0x64, 0xc8, 0x6f, + 0x6b, 0x17, 0x96, 0xa7, 0x1b, 0x8c, 0x47, 0x34, 0x8a, 0x31, 0xba, 0x03, 0x75, 0x2c, 0x08, 0xd2, + 0x5a, 0x7b, 0xeb, 0xfa, 0x66, 0x16, 0xca, 0xa6, 0x92, 0x53, 0x5c, 0xeb, 0x37, 0x03, 0xd0, 0x01, + 0x89, 0xb9, 0x20, 0x12, 0x1c, 0xbf, 0x5d, 0x3c, 0x37, 0xa0, 0x31, 0x62, 0x78, 0x40, 0xce, 0xd3, + 0x88, 0xd2, 0x13, 0xba, 0x07, 0x73, 0x31, 0x77, 0x19, 0xdf, 0x63, 0x34, 0xdc, 0x23, 0x01, 0x7e, + 0x2e, 0x82, 0xae, 0x4a, 0x91, 0x32, 0x03, 0x6d, 0x02, 0x22, 0x91, 0x17, 0x24, 0x31, 0x39, 0xc5, + 0x87, 0x19, 0xb7, 0x57, 0x5b, 0x33, 0x36, 0x5a, 0xf6, 0x14, 0x0e, 0x5a, 0x80, 0x7a, 0x40, 0x42, + 0xc2, 0x7b, 0xf5, 0x35, 0x63, 0xa3, 0x6b, 0xab, 0x83, 0xf5, 0x09, 0xcc, 0x17, 0xe2, 0x7f, 0xb7, + 0xf4, 0x7f, 0xae, 0x40, 0x5d, 0x12, 0xf2, 0x1a, 0x1b, 0xe3, 0x1a, 0xa3, 0xdb, 0xd0, 0x21, 0xb1, + 0x33, 0x2e, 0x44, 0x45, 0xc6, 0xd6, 0x26, 0x71, 0x5e, 0x73, 0xf4, 0x01, 0x34, 0xbc, 0xe3, 0x24, + 0x3a, 0x89, 0x7b, 0xd5, 0xb5, 0xea, 0x46, 0x7b, 0x6b, 0x7e, 0xec, 0x48, 0x24, 0xba, 0x2d, 0x78, + 0x76, 0x2a, 0x82, 0x1e, 0x02, 0xb8, 0x9c, 0x33, 0x72, 0x94, 0x70, 0x1c, 0xcb, 0x4c, 0xdb, 0x5b, + 0x3d, 0x4d, 0x21, 0x89, 0xf1, 0x93, 0x9c, 0x6f, 0x6b, 0xb2, 0xe8, 0x11, 0xb4, 0xf0, 0x39, 0xc7, + 0x91, 0x8f, 0xfd, 0x5e, 0x5d, 0x3a, 0x5a, 0x99, 0xc8, 0x68, 0x73, 0x37, 0xe5, 0xab, 0xfc, 0x72, + 0xf1, 0xfe, 0x63, 0xe8, 0x16, 0x58, 0x68, 0x16, 0xaa, 0x27, 0x38, 0xbb, 0x55, 0xf1, 0x29, 0x2a, + 0x7b, 0xea, 0x06, 0x89, 0x6a, 0xb0, 0x8e, 0xad, 0x0e, 0x1f, 0x57, 0x1e, 0x1a, 0xd6, 0x0e, 0x98, + 0x7b, 0x49, 0x10, 0xe4, 0x8a, 0x3e, 0x61, 0x99, 0xa2, 0x4f, 0xd8, 0xb8, 0xca, 0x95, 0x4b, 0xab, + 0xfc, 0xab, 0x01, 0x73, 0xbb, 0xa7, 0x38, 0xe2, 0xcf, 0x29, 0x27, 0x03, 0xe2, 0xb9, 0x9c, 0xd0, + 0x08, 0xdd, 0x03, 0x93, 0x06, 0xbe, 0x73, 0xe9, 0x35, 0xb5, 0x68, 0x90, 0x46, 0x7d, 0x0f, 0xcc, + 0x08, 0x9f, 0x39, 0x97, 0xba, 0x6b, 0x45, 0xf8, 0x4c, 0x49, 0xaf, 0x43, 0xd7, 0xc7, 0x01, 0xe6, + 0xd8, 0xc9, 0x6f, 0x47, 0x5c, 0x5d, 0x47, 0x11, 0xb7, 0xd5, 0x75, 0xdc, 0x85, 0xeb, 0xc2, 0xe4, + 0xc8, 0x65, 0x38, 0xe2, 0xce, 0xc8, 0xe5, 0xc7, 0xf2, 0x4e, 0x4c, 0xbb, 0x1b, 0xe1, 0xb3, 0x97, + 0x92, 0xfa, 0xd2, 0xe5, 0xc7, 0xd6, 0xdf, 0x06, 0x98, 0xf9, 0x65, 0xa2, 0x9b, 0xd0, 0x14, 0x6e, + 0x1d, 0xe2, 0xa7, 0x95, 0x68, 0x88, 0xe3, 0xbe, 0x2f, 0xa6, 0x82, 0x0e, 0x06, 0x31, 0xe6, 0x32, + 0xbc, 0xaa, 0x9d, 0x9e, 0x44, 0x67, 0xc5, 0xe4, 0x7b, 0x35, 0x08, 0x35, 0x5b, 0x7e, 0x8b, 0x8a, + 0x87, 0x9c, 0x84, 0x58, 0x3a, 0xac, 0xda, 0xea, 0x80, 0xe6, 0xa1, 0x8e, 0x1d, 0xee, 0x0e, 0x65, + 0x87, 0x9b, 0x76, 0x0d, 0xbf, 0x72, 0x87, 0xe8, 0xbf, 0x70, 0x2d, 0xa6, 0x09, 0xf3, 0xb0, 0x93, + 0xb9, 0x6d, 0x48, 0x6e, 0x47, 0x51, 0xf7, 0x94, 0x73, 0x0b, 0xaa, 0x03, 0xe2, 0xf7, 0x9a, 0xb2, + 0x30, 0xb3, 0xc5, 0x26, 0xdc, 0xf7, 0x6d, 0xc1, 0x44, 0x1f, 0x02, 0xe4, 0x96, 0xfc, 0x5e, 0xeb, + 0x02, 0x51, 0x33, 0xb3, 0xeb, 0x5b, 0x5f, 0x42, 0x23, 0x35, 0xbf, 0x04, 0xe6, 0x29, 0x0d, 0x92, + 0x30, 0x4f, 0xbb, 0x6b, 0xb7, 0x14, 0x61, 0xdf, 0x47, 0xb7, 0x40, 0xee, 0x39, 0x47, 0x74, 0x55, + 0x45, 0x26, 0x29, 0x2b, 0xf4, 0x39, 0x96, 0x9b, 0xc2, 0xa3, 0xf4, 0x84, 0xa8, 0xec, 0x9b, 0x76, + 0x7a, 0xb2, 0xfe, 0xaa, 0xc0, 0xb5, 0x62, 0xbb, 0x0b, 0x17, 0xd2, 0x8a, 0xac, 0x95, 0x21, 0xcd, + 0x48, 0xb3, 0x87, 0x85, 0x7a, 0x55, 0xf4, 0x7a, 0x65, 0x2a, 0x21, 0xf5, 0x95, 0x83, 0xae, 0x52, + 0x79, 0x46, 0x7d, 0x2c, 0xba, 0x35, 0x21, 0xbe, 0x2c, 0x70, 0xd7, 0x16, 0x9f, 0x82, 0x32, 0x24, + 0x7e, 0xba, 0x3e, 0xc4, 0xa7, 0x0c, 0x8f, 0x49, 0xbb, 0x0d, 0x75, 0x65, 0xea, 0x24, 0xae, 0x2c, + 0x14, 0xd4, 0xa6, 0xba, 0x07, 0xf1, 0x8d, 0xd6, 0xa0, 0xcd, 0xf0, 0x28, 0x48, 0xbb, 0x57, 0x96, + 0xcf, 0xb4, 0x75, 0x12, 0x5a, 0x05, 0xf0, 0x68, 0x10, 0x60, 0x4f, 0x0a, 0x98, 0x52, 0x40, 0xa3, + 0x88, 0xce, 0xe1, 0x3c, 0x70, 0x62, 0xec, 0xf5, 0x60, 0xcd, 0xd8, 0xa8, 0xdb, 0x0d, 0xce, 0x83, + 0x43, 0xec, 0x89, 0x3c, 0x92, 0x18, 0x33, 0x47, 0x2e, 0xa0, 0xb6, 0xd4, 0x6b, 0x09, 0x82, 0x5c, + 0x93, 0x2b, 0x00, 0x43, 0x46, 0x93, 0x91, 0xe2, 0x76, 0xd6, 0xaa, 0x62, 0x17, 0x4b, 0x8a, 0x64, + 0xdf, 0x81, 0x6b, 0xf1, 0x9b, 0x30, 0x20, 0xd1, 0x89, 0xc3, 0x5d, 0x36, 0xc4, 0xbc, 0xd7, 0x55, + 0x3d, 0x9c, 0x52, 0x5f, 0x49, 0xa2, 0x35, 0x02, 0xb4, 0xcd, 0xb0, 0xcb, 0xf1, 0x3b, 0xc0, 0xce, + 0xdb, 0x4d, 0x37, 0x5a, 0x84, 0x06, 0x75, 0xf0, 0xb9, 0x17, 0xa4, 0x43, 0x56, 0xa7, 0xbb, 0xe7, + 0x5e, 0x60, 0x2d, 0xc2, 0x7c, 0xc1, 0xa3, 0x5a, 0xcc, 0xd6, 0x57, 0x80, 0x5e, 0x8f, 0xfc, 0xf7, + 0x11, 0x88, 0xf0, 0x58, 0x30, 0x9d, 0x7a, 0xfc, 0xc3, 0x00, 0xb4, 0x23, 0xe7, 0xfe, 0xdf, 0x41, + 0xae, 0x98, 0x44, 0x01, 0x07, 0x6a, 0xaf, 0xf8, 0x2e, 0x77, 0x53, 0xb0, 0xea, 0x90, 0x58, 0xd9, + 0xdf, 0x71, 0xb9, 0x9b, 0x82, 0x06, 0xc3, 0x5e, 0xc2, 0x04, 0x7e, 0xc9, 0x76, 0x93, 0xa0, 0x61, + 0x67, 0x24, 0xf4, 0x00, 0x6e, 0x90, 0x61, 0x44, 0x19, 0x1e, 0x8b, 0x39, 0x98, 0x31, 0xca, 0x64, + 0x1b, 0xb6, 0xec, 0x05, 0xc5, 0xcd, 0x15, 0x76, 0x05, 0x4f, 0xa4, 0x57, 0x48, 0x23, 0x4d, 0xef, + 0x27, 0x03, 0x7a, 0x4f, 0x38, 0x0d, 0x89, 0x67, 0x63, 0x11, 0x66, 0x21, 0xc9, 0x75, 0xe8, 0x8a, + 0x1d, 0x3b, 0x99, 0x68, 0x87, 0x06, 0xfe, 0x18, 0xc3, 0x6e, 0x81, 0x58, 0xb3, 0x8e, 0x96, 0x6f, + 0x93, 0x06, 0xbe, 0xec, 0xae, 0x75, 0x10, 0xbb, 0x50, 0xd3, 0x57, 0x68, 0xde, 0x89, 0xf0, 0x59, + 0x41, 0x5f, 0x08, 0x49, 0x7d, 0xb5, 0x40, 0x9b, 0x11, 0x3e, 0x13, 0xfa, 0xd6, 0x12, 0xdc, 0x9a, + 0x12, 0x5b, 0x1a, 0xf9, 0x2f, 0x06, 0xcc, 0x3f, 0x89, 0x63, 0x32, 0x8c, 0xbe, 0x90, 0xab, 0x24, + 0x0b, 0x7a, 0x01, 0xea, 0x1e, 0x4d, 0x22, 0x2e, 0x83, 0xad, 0xdb, 0xea, 0x30, 0x31, 0x5d, 0x95, + 0xd2, 0x74, 0x4d, 0xcc, 0x67, 0xb5, 0x3c, 0x9f, 0xda, 0xfc, 0xd5, 0x0a, 0xf3, 0xf7, 0x1f, 0x68, + 0x8b, 0xeb, 0x74, 0x3c, 0x1c, 0x71, 0xcc, 0xd2, 0xed, 0x0b, 0x82, 0xb4, 0x2d, 0x29, 0xd6, 0x0f, + 0x06, 0x2c, 0x14, 0x23, 0x4d, 0x9f, 0x19, 0x17, 0x82, 0x81, 0xd8, 0x3e, 0x2c, 0x48, 0xc3, 0x14, + 0x9f, 0x62, 0x8e, 0x47, 0xc9, 0x51, 0x40, 0x3c, 0x47, 0x30, 0x54, 0x78, 0xa6, 0xa2, 0xbc, 0x66, + 0xc1, 0x38, 0xe9, 0x9a, 0x9e, 0x34, 0x82, 0x9a, 0x9b, 0xf0, 0xe3, 0x0c, 0x10, 0xc4, 0xb7, 0xf5, + 0x00, 0xe6, 0xd5, 0xcb, 0xaf, 0x58, 0xb5, 0x15, 0x80, 0x7c, 0x45, 0xc7, 0x3d, 0x43, 0xed, 0x89, + 0x6c, 0x47, 0xc7, 0xd6, 0xa7, 0x60, 0x1e, 0x50, 0x55, 0x88, 0x18, 0xdd, 0x07, 0x33, 0xc8, 0x0e, + 0x52, 0xb4, 0xbd, 0x85, 0xc6, 0x43, 0x95, 0xc9, 0xd9, 0x63, 0x21, 0xeb, 0x31, 0xb4, 0x32, 0x72, + 0x96, 0x9b, 0x71, 0x51, 0x6e, 0x95, 0x89, 0xdc, 0xac, 0xdf, 0x0d, 0x58, 0x28, 0x86, 0x9c, 0x96, + 0xef, 0x35, 0x74, 0x73, 0x17, 0x4e, 0xe8, 0x8e, 0xd2, 0x58, 0xee, 0xeb, 0xb1, 0x94, 0xd5, 0xf2, + 0x00, 0xe3, 0x67, 0xee, 0x48, 0xb5, 0x54, 0x27, 0xd0, 0x48, 0xfd, 0x57, 0x30, 0x57, 0x12, 0x99, + 0xf2, 0xec, 0xf9, 0xbf, 0xfe, 0xec, 0x29, 0x3c, 0xdd, 0x72, 0x6d, 0xfd, 0x2d, 0xf4, 0x08, 0x6e, + 0xaa, 0xf9, 0xdb, 0xce, 0x9b, 0x2e, 0xab, 0x7d, 0xb1, 0x37, 0x8d, 0xc9, 0xde, 0xb4, 0xfa, 0xd0, + 0x2b, 0xab, 0xa6, 0x53, 0x30, 0x84, 0xb9, 0x43, 0xee, 0x72, 0x12, 0x73, 0xe2, 0xe5, 0xef, 0xef, + 0x89, 0x66, 0x36, 0xae, 0x02, 0x9b, 0xf2, 0x38, 0xcc, 0x42, 0x95, 0xf3, 0xac, 0xcf, 0xc4, 0xa7, + 0xb8, 0x05, 0xa4, 0x7b, 0x4a, 0xef, 0xe0, 0x3d, 0xb8, 0x12, 0xfd, 0xc0, 0x29, 0x77, 0x03, 0x05, + 0xe6, 0x35, 0x09, 0xe6, 0xa6, 0xa4, 0x48, 0x34, 0x57, 0x78, 0xe7, 0x2b, 0x6e, 0x5d, 0x41, 0xbd, + 0x20, 0x48, 0xe6, 0x0a, 0x80, 0x1c, 0x29, 0x35, 0x0d, 0x0d, 0xa5, 0x2b, 0x28, 0xdb, 0x82, 0x60, + 0xad, 0xc2, 0xf2, 0x67, 0x98, 0x8b, 0x67, 0x09, 0xdb, 0xa6, 0xd1, 0x80, 0x0c, 0x13, 0xe6, 0x6a, + 0x57, 0x61, 0xfd, 0x68, 0xc0, 0xca, 0x05, 0x02, 0x69, 0xc2, 0x3d, 0x68, 0x86, 0x6e, 0xcc, 0x31, + 0xcb, 0xa6, 0x24, 0x3b, 0x4e, 0x96, 0xa2, 0x72, 0x55, 0x29, 0xaa, 0xa5, 0x52, 0x2c, 0x42, 0x23, + 0x74, 0xcf, 0x9d, 0xf0, 0x28, 0x7d, 0x77, 0xd4, 0x43, 0xf7, 0xfc, 0xd9, 0xd1, 0xd6, 0x9f, 0x4d, + 0xe8, 0x1c, 0x62, 0xf7, 0x0c, 0x63, 0x5f, 0x06, 0x86, 0x86, 0xd9, 0x40, 0x14, 0x7f, 0xbd, 0xa1, + 0x3b, 0x93, 0x9d, 0x3f, 0xf5, 0xe7, 0x62, 0xff, 0xee, 0x55, 0x62, 0x69, 0x6f, 0xcd, 0xa0, 0xe7, + 0xd0, 0xd6, 0x7e, 0x1e, 0xa1, 0x65, 0x4d, 0xb1, 0xf4, 0xab, 0xaf, 0xbf, 0x72, 0x01, 0x37, 0xb3, + 0x76, 0xdf, 0x40, 0x07, 0xd0, 0xd6, 0x50, 0x5d, 0xb7, 0x57, 0x7e, 0x5e, 0xe8, 0xf6, 0xa6, 0x3d, + 0x05, 0x66, 0x84, 0x35, 0x0d, 0xb1, 0x75, 0x6b, 0xe5, 0x37, 0x82, 0x6e, 0x6d, 0x1a, 0xcc, 0x4b, + 0x6b, 0x1a, 0x40, 0xea, 0xd6, 0xca, 0xf0, 0xaf, 0x5b, 0x9b, 0x86, 0xaa, 0x33, 0xe8, 0x1b, 0x98, + 0x2b, 0x41, 0x17, 0xb2, 0xc6, 0x5a, 0x17, 0x61, 0x6e, 0x7f, 0xfd, 0x52, 0x99, 0xdc, 0xfe, 0x0b, + 0xe8, 0xe8, 0x90, 0x82, 0xb4, 0x80, 0xa6, 0x80, 0x62, 0x7f, 0xf5, 0x22, 0xb6, 0x6e, 0x50, 0xdf, + 0x96, 0xba, 0xc1, 0x29, 0x78, 0xa1, 0x1b, 0x9c, 0xb6, 0x64, 0xad, 0x19, 0xf4, 0x35, 0xcc, 0x4e, + 0x6e, 0x2d, 0x74, 0x7b, 0xb2, 0x6c, 0xa5, 0x65, 0xd8, 0xb7, 0x2e, 0x13, 0xc9, 0x8d, 0xef, 0x03, + 0x8c, 0x97, 0x11, 0x5a, 0x1a, 0xeb, 0x94, 0x96, 0x61, 0x7f, 0x79, 0x3a, 0x33, 0x37, 0xf5, 0x2d, + 0x2c, 0x4e, 0x9d, 0x78, 0xa4, 0x8d, 0xc9, 0x65, 0x3b, 0xa3, 0xff, 0xbf, 0x2b, 0xe5, 0x32, 0x5f, + 0x4f, 0x57, 0x61, 0x36, 0x56, 0x83, 0x3c, 0x88, 0x37, 0xbd, 0x80, 0xe0, 0x88, 0x3f, 0x05, 0xa9, + 0xf1, 0x92, 0x51, 0x4e, 0x8f, 0x1a, 0xf2, 0x8f, 0x9f, 0x8f, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, + 0x32, 0x00, 0xf8, 0x2b, 0x07, 0x12, 0x00, 0x00, } diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index a84feec2d..d82ef9782 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -139,7 +139,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr FullPath: fullpath, Attr: filer2.PbToEntryAttribute(req.Entry.Attributes), Chunks: chunks, - }) + }, req.OExcl) if err == nil { fs.filer.DeleteChunks(garbages) diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index dfa59e7fe..0669a26f1 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -107,7 +107,7 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullP Attr: entry.Attr, Chunks: entry.Chunks, } - createErr := fs.filer.CreateEntry(ctx, newEntry) + createErr := fs.filer.CreateEntry(ctx, newEntry, false) if createErr != nil { return createErr } diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index a7dcc39a0..6b5d258bc 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -193,7 +193,7 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w entry.Attr.Mime = mime.TypeByExtension(ext) } // glog.V(4).Infof("saving %s => %+v", path, entry) - if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { + if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil { fs.filer.DeleteChunks(entry.Chunks) glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) writeJsonError(w, r, http.StatusInternalServerError, dbErr) diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 8ff7ab2c0..5739c2d4e 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -176,7 +176,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r }, Chunks: fileChunks, } - if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { + if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil { fs.filer.DeleteChunks(entry.Chunks) replyerr = dbErr filerResult.Error = dbErr.Error() From 09f4ceef3a5b3eca457fd74382f0391a1db4283e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 22 Jan 2020 11:43:43 -0800 Subject: [PATCH 0049/2432] mount: reuse previous file node --- weed/filesys/dir_rename.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index 1bd1a6470..8db879d2c 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -35,8 +35,9 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector }) if err == nil { + newPath := filer2.NewFullPath(newDir.Path, req.NewName) oldPath := filer2.NewFullPath(dir.Path, req.OldName) - dir.wfs.cacheDelete(filer2.NewFullPath(newDir.Path, req.NewName)) + dir.wfs.cacheDelete(newPath) dir.wfs.cacheDelete(oldPath) oldFileNode := dir.wfs.getNode(oldPath, func() fs.Node { @@ -45,14 +46,17 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector newDirNode := dir.wfs.getNode(filer2.FullPath(dir.Path), func() fs.Node { return nil }) - if oldFileNode != nil { + dir.wfs.forgetNode(newPath) + dir.wfs.forgetNode(oldPath) + if oldFileNode != nil && newDirNode != nil { oldFile := oldFileNode.(*File) oldFile.Name = req.NewName - if newDirNode != nil { - oldFile.dir = newDirNode.(*Dir) - } + oldFile.dir = newDirNode.(*Dir) + dir.wfs.getNode(newPath, func() fs.Node { + return oldFile + }) + } - dir.wfs.forgetNode(oldPath) } return err From 6b48d246a5d7943527d1948e352c9906e9d01a17 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 22 Jan 2020 13:42:03 -0800 Subject: [PATCH 0050/2432] mount: read data that is just written able read on data not flushed multiple file open shares the same file handle fix https://github.com/chrislusf/seaweedfs/issues/1182 on linux --- weed/filesys/dir.go | 3 +- weed/filesys/dirty_page.go | 23 +++++++++++++++ weed/filesys/file.go | 12 ++++---- weed/filesys/filehandle.go | 57 ++++++++++++++++++++++++++++---------- weed/filesys/wfs.go | 11 ++++++++ 5 files changed, 84 insertions(+), 22 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 3c1672911..91e42fc0a 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -146,9 +146,8 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, } file := node.(*File) - file.isOpen = true + file.isOpen++ fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) - fh.dirtyMetadata = true return file, fh, nil } diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index f83944678..7a466b506 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -222,3 +222,26 @@ func max(x, y int64) int64 { } return y } +func min(x, y int64) int64 { + if x < y { + return x + } + return y +} + +func (pages *ContinuousDirtyPages) ReadDirtyData(ctx context.Context, data []byte, startOffset int64) (offset int64, size int, err error) { + bufSize := int64(len(data)) + if startOffset+bufSize < pages.Offset { + return + } + if startOffset >= pages.Offset+pages.Size { + return + } + + offset = max(pages.Offset, startOffset) + stopOffset := min(pages.Offset+pages.Size, startOffset+bufSize) + size = int(stopOffset - offset) + copy(data[offset-startOffset:], pages.Data[offset-pages.Offset:stopOffset-pages.Offset]) + + return +} diff --git a/weed/filesys/file.go b/weed/filesys/file.go index d811cb179..e15d55b5b 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -31,7 +31,7 @@ type File struct { wfs *WFS entry *filer_pb.Entry entryViewCache []filer2.VisibleInterval - isOpen bool + isOpen int } func (file *File) fullpath() filer2.FullPath { @@ -42,7 +42,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr) - if !file.isOpen { + if file.isOpen <=0 { if err := file.maybeLoadEntry(ctx); err != nil { return err } @@ -52,7 +52,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { attr.Valid = time.Second attr.Mode = os.FileMode(file.entry.Attributes.FileMode) attr.Size = filer2.TotalSize(file.entry.Chunks) - if file.isOpen { + if file.isOpen > 0 { attr.Size = file.entry.Attributes.FileSize } attr.Crtime = time.Unix(file.entry.Attributes.Crtime, 0) @@ -81,7 +81,7 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op glog.V(4).Infof("file %v open %+v", file.fullpath(), req) - file.isOpen = true + file.isOpen++ handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid) @@ -140,7 +140,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f file.entry.Attributes.Mtime = req.Mtime.Unix() } - if file.isOpen { + if file.isOpen > 0 { return nil } @@ -218,7 +218,7 @@ func (file *File) Forget() { } func (file *File) maybeLoadEntry(ctx context.Context) error { - if file.entry == nil || !file.isOpen { + if file.entry == nil || file.isOpen <= 0{ entry, err := file.wfs.maybeLoadEntry(ctx, file.dir.Path, file.Name) if err != nil { return err diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 101f5c056..981de7ea2 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -7,10 +7,11 @@ import ( "path" "time" + "github.com/gabriel-vasile/mimetype" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gabriel-vasile/mimetype" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -50,29 +51,50 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size)) + buff := make([]byte, req.Size) + + totalRead, err := fh.readFromChunks(ctx, buff, req.Offset) + if err == nil { + dirtyOffset, dirtySize, dirtyReadErr := fh.readFromDirtyPages(ctx, buff, req.Offset) + if dirtyReadErr == nil && totalRead+req.Offset < dirtyOffset+int64(dirtySize) { + totalRead = dirtyOffset + int64(dirtySize) - req.Offset + } + } + + resp.Data = buff[:totalRead] + + if err != nil { + glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) + } + + return err +} + +func (fh *FileHandle) readFromDirtyPages(ctx context.Context, buff []byte, startOffset int64) (offset int64, size int, err error) { + return fh.dirtyPages.ReadDirtyData(ctx, buff, startOffset) +} + +func (fh *FileHandle) readFromChunks(ctx context.Context, buff []byte, offset int64) (int64, error) { + // this value should come from the filer instead of the old f if len(fh.f.entry.Chunks) == 0 { glog.V(1).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name) - return nil + return 0, nil } - buff := make([]byte, req.Size) - if fh.f.entryViewCache == nil { fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks) } - chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size) - - totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, req.Offset) + chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, offset, len(buff)) - resp.Data = buff[:totalRead] + totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, offset) if err != nil { glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) } - return err + return totalRead, err } // Write to the file handle @@ -115,11 +137,12 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle) - fh.dirtyPages.releaseResource() + fh.f.isOpen-- - fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) - - fh.f.isOpen = false + if fh.f.isOpen <= 0 { + fh.dirtyPages.releaseResource() + fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + } return nil } @@ -141,7 +164,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { return nil } - return fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { if fh.f.entry.Attributes != nil { fh.f.entry.Attributes.Mime = fh.contentType @@ -178,4 +201,10 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { return nil }) + + if err == nil { + fh.dirtyMetadata = false + } + + return err } diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index a2e5a9073..4cfab811b 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -49,6 +49,7 @@ type WFS struct { // contains all open handles, protected by handlesLock handlesLock sync.Mutex handles []*FileHandle + pathToHandleIndex map[filer2.FullPath]int bufPool sync.Pool @@ -68,6 +69,7 @@ func NewSeaweedFileSystem(option *Option) *WFS { wfs := &WFS{ option: option, listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)), + pathToHandleIndex: make(map[filer2.FullPath]int), bufPool: sync.Pool{ New: func() interface{} { return make([]byte, option.ChunkSizeLimit) @@ -102,11 +104,18 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand wfs.handlesLock.Lock() defer wfs.handlesLock.Unlock() + index, found := wfs.pathToHandleIndex[fullpath] + if found && wfs.handles[index] != nil { + glog.V(2).Infoln(fullpath, "found fileHandle id", index) + return wfs.handles[index] + } + fileHandle = newFileHandle(file, uid, gid) for i, h := range wfs.handles { if h == nil { wfs.handles[i] = fileHandle fileHandle.handle = uint64(i) + wfs.pathToHandleIndex[fullpath] = i glog.V(4).Infof( "%s reuse fh %d", fullpath,fileHandle.handle) return } @@ -114,6 +123,7 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand wfs.handles = append(wfs.handles, fileHandle) fileHandle.handle = uint64(len(wfs.handles) - 1) + wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle) glog.V(4).Infof( "%s new fh %d", fullpath,fileHandle.handle) return @@ -124,6 +134,7 @@ func (wfs *WFS) ReleaseHandle(fullpath filer2.FullPath, handleId fuse.HandleID) defer wfs.handlesLock.Unlock() glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) + delete(wfs.pathToHandleIndex, fullpath) if int(handleId) < len(wfs.handles) { wfs.handles[int(handleId)] = nil } From 66ded8804aac639e62b2a5a9c45fd9b4ccf92567 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 22 Jan 2020 15:37:59 -0800 Subject: [PATCH 0051/2432] adjust logging --- weed/filesys/dir.go | 1 - weed/filesys/file.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 91e42fc0a..f4132cbf0 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -246,7 +246,6 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_File} ret = append(ret, dirent) } - glog.V(4).Infof("dir ReadDirAll : %s %+v", fullpath, entry) dir.wfs.cacheSet(fullpath, entry, cacheTtl) }) if readErr != nil { diff --git a/weed/filesys/file.go b/weed/filesys/file.go index e15d55b5b..b1d53507b 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -68,7 +68,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { - glog.V(4).Infof("file Getxattr %s", file.fullpath()) + // glog.V(4).Infof("file Getxattr %s", file.fullpath()) if err := file.maybeLoadEntry(ctx); err != nil { return err From 72e1aff237511fff72e1455e1b31b1f55e6d0ceb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 22 Jan 2020 15:38:25 -0800 Subject: [PATCH 0052/2432] add lock on dirty pages --- weed/filesys/dirty_page.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 7a466b506..3418dc1c9 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -231,6 +231,10 @@ func min(x, y int64) int64 { func (pages *ContinuousDirtyPages) ReadDirtyData(ctx context.Context, data []byte, startOffset int64) (offset int64, size int, err error) { bufSize := int64(len(data)) + + pages.lock.Lock() + defer pages.lock.Unlock() + if startOffset+bufSize < pages.Offset { return } From 6a5c0370995653621fa8b576ea149e91875938d6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 22 Jan 2020 22:59:12 -0800 Subject: [PATCH 0053/2432] fix http range requests --- .../client/src/main/java/seaweedfs/client/SeaweedRead.java | 2 +- weed/util/http_util.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java index 2efa64580..b08c14467 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java @@ -63,7 +63,7 @@ public class SeaweedRead { if (!chunkView.isFullChunk) { request.setHeader(HttpHeaders.ACCEPT_ENCODING, ""); request.setHeader(HttpHeaders.RANGE, - String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size)); + String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size - 1)); } try { diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 4aab90f5a..740d41967 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -196,7 +196,7 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo return 0, err } if isReadRange { - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size))) + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) } else { req.Header.Set("Accept-Encoding", "gzip") } @@ -256,7 +256,7 @@ func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte if err != nil { return 0, err } - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size))) + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) r, err := client.Do(req) if err != nil { From c2e589f202b84b5beb98f1f5c243cc38b58f232b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 22 Jan 2020 23:00:04 -0800 Subject: [PATCH 0054/2432] mount: better combines connected intervals to write to volume servers --- weed/filesys/dirty_page.go | 161 +++++++++-------------- weed/filesys/dirty_page_interval.go | 190 ++++++++++++++++++++++++++++ weed/filesys/file.go | 6 - weed/filesys/filehandle.go | 15 ++- 4 files changed, 259 insertions(+), 113 deletions(-) create mode 100644 weed/filesys/dirty_page_interval.go diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 3418dc1c9..f1532a6a0 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -4,8 +4,8 @@ import ( "bytes" "context" "fmt" + "io" "sync" - "sync/atomic" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -15,28 +15,19 @@ import ( ) type ContinuousDirtyPages struct { - hasData bool - Offset int64 - Size int64 - Data []byte - f *File - lock sync.Mutex + intervals *ContinuousIntervals + f *File + lock sync.Mutex } func newDirtyPages(file *File) *ContinuousDirtyPages { return &ContinuousDirtyPages{ - Data: nil, - f: file, + intervals: &ContinuousIntervals{}, + f: file, } } func (pages *ContinuousDirtyPages) releaseResource() { - if pages.Data != nil { - pages.f.wfs.bufPool.Put(pages.Data) - pages.Data = nil - atomic.AddInt32(&counter, -1) - glog.V(3).Infof("%s/%s releasing resource %d", pages.f.dir.Path, pages.f.Name, counter) - } } var counter = int32(0) @@ -46,84 +37,49 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da pages.lock.Lock() defer pages.lock.Unlock() - var chunk *filer_pb.FileChunk + glog.V(3).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data))) if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) { // this is more than what buffer can hold. return pages.flushAndSave(ctx, offset, data) } - if pages.Data == nil { - pages.Data = pages.f.wfs.bufPool.Get().([]byte) - atomic.AddInt32(&counter, 1) - glog.V(3).Infof("%s/%s acquire resource %d", pages.f.dir.Path, pages.f.Name, counter) - } - - if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) || - pages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) { - // if the data is out of range, - // or buffer is full if adding new data, - // flush current buffer and add new data - - glog.V(4).Infof("offset=%d, size=%d, existing pages offset=%d, pages size=%d, data=%d", offset, len(data), pages.Offset, pages.Size, len(pages.Data)) - - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s add save [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) - chunks = append(chunks, chunk) - } - } else { - glog.V(0).Infof("%s/%s add save [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) - return - } - pages.Offset = offset - glog.V(4).Infof("copy data0: offset=%d, size=%d, existing pages offset=%d, pages size=%d, data=%d", offset, len(data), pages.Offset, pages.Size, len(pages.Data)) - copy(pages.Data, data) - pages.Size = int64(len(data)) + hasOverlap := pages.intervals.AddInterval(data, offset) + if hasOverlap { + chunks, err = pages.saveExistingPagesToStorage(ctx) + pages.intervals.AddInterval(data, offset) return } - if offset != pages.Offset+pages.Size { - // when this happens, debug shows the data overlapping with existing data is empty - // the data is not just append - if offset == pages.Offset && int(pages.Size) < len(data) { - glog.V(4).Infof("copy data1: offset=%d, size=%d, existing pages offset=%d, pages size=%d, data=%d", offset, len(data), pages.Offset, pages.Size, len(pages.Data)) - copy(pages.Data[pages.Size:], data[pages.Size:]) - } else { - if pages.Size != 0 { - glog.V(1).Infof("%s/%s add page: pages [%d, %d) write [%d, %d)", pages.f.dir.Path, pages.f.Name, pages.Offset, pages.Offset+pages.Size, offset, offset+int64(len(data))) - } - return pages.flushAndSave(ctx, offset, data) + var chunk *filer_pb.FileChunk + var hasSavedData bool + + if pages.intervals.TotalSize() > pages.f.wfs.option.ChunkSizeLimit { + chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage(ctx) + if hasSavedData { + chunks = append(chunks, chunk) } - } else { - glog.V(4).Infof("copy data2: offset=%d, size=%d, existing pages offset=%d, pages size=%d, data=%d", offset, len(data), pages.Offset, pages.Size, len(pages.Data)) - copy(pages.Data[offset-pages.Offset:], data) } - pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset) - return } func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { var chunk *filer_pb.FileChunk + var newChunks []*filer_pb.FileChunk // flush existing - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s flush existing [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) - chunks = append(chunks, chunk) + if newChunks, err = pages.saveExistingPagesToStorage(ctx); err == nil { + if newChunks != nil { + chunks = append(chunks, newChunks...) } } else { - glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) return } - pages.Size = 0 - pages.Offset = 0 // flush the new page - if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil { + if chunk, err = pages.saveToStorage(ctx, bytes.NewReader(data), offset, int64(len(data))); err == nil { if chunk != nil { glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) chunks = append(chunks, chunk) @@ -136,37 +92,55 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6 return } -func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunks []*filer_pb.FileChunk, err error) { pages.lock.Lock() defer pages.lock.Unlock() - if pages.Size == 0 { - return nil, nil - } + return pages.saveExistingPagesToStorage(ctx) +} - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - pages.Size = 0 - pages.Offset = 0 - if chunk != nil { - glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) +func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (chunks []*filer_pb.FileChunk, err error) { + + var hasSavedData bool + var chunk *filer_pb.FileChunk + + for { + + chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage(ctx) + if !hasSavedData { + return chunks, err + } + + if err == nil { + chunks = append(chunks, chunk) + } else { + return } } - return + } -func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) { +func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, hasSavedData bool, err error) { - if pages.Size == 0 { - return nil, nil + maxList := pages.intervals.RemoveLargestIntervalLinkedList() + if maxList == nil { + return nil, false, nil } - glog.V(0).Infof("%s/%s saveExistingPagesToStorage [%d,%d): Data len=%d", pages.f.dir.Path, pages.f.Name, pages.Offset, pages.Size, len(pages.Data)) + chunk, err = pages.saveToStorage(ctx, maxList.ToReader(), maxList.Offset(), maxList.Size()) + if err == nil { + hasSavedData = true + glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId) + } else { + glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err) + return + } - return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset) + return } -func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) { +func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) { var fileId, host string var auth security.EncodedJwt @@ -195,8 +169,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte } fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "", nil, auth) + uploadResult, err := operation.Upload(fileUrl, pages.f.Name, reader, false, "", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) return nil, fmt.Errorf("upload data: %v", err) @@ -209,7 +182,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte return &filer_pb.FileChunk{ FileId: fileId, Offset: offset, - Size: uint64(len(buf)), + Size: uint64(size), Mtime: time.Now().UnixNano(), ETag: uploadResult.ETag, }, nil @@ -229,23 +202,11 @@ func min(x, y int64) int64 { return y } -func (pages *ContinuousDirtyPages) ReadDirtyData(ctx context.Context, data []byte, startOffset int64) (offset int64, size int, err error) { - bufSize := int64(len(data)) +func (pages *ContinuousDirtyPages) ReadDirtyData(ctx context.Context, data []byte, startOffset int64) (offset int64, size int) { pages.lock.Lock() defer pages.lock.Unlock() - if startOffset+bufSize < pages.Offset { - return - } - if startOffset >= pages.Offset+pages.Size { - return - } - - offset = max(pages.Offset, startOffset) - stopOffset := min(pages.Offset+pages.Size, startOffset+bufSize) - size = int(stopOffset - offset) - copy(data[offset-startOffset:], pages.Data[offset-pages.Offset:stopOffset-pages.Offset]) + return pages.intervals.ReadData(data, startOffset) - return } diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go new file mode 100644 index 000000000..5c55268c7 --- /dev/null +++ b/weed/filesys/dirty_page_interval.go @@ -0,0 +1,190 @@ +package filesys + +import ( + "bytes" + "io" + "math" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +type IntervalNode struct { + Data []byte + Offset int64 + Size int64 + Next *IntervalNode +} + +type IntervalLinkedList struct { + Head *IntervalNode + Tail *IntervalNode +} + +type ContinuousIntervals struct { + lists []*IntervalLinkedList +} + +func (list *IntervalLinkedList) Offset() int64 { + return list.Head.Offset +} +func (list *IntervalLinkedList) Size() int64 { + return list.Tail.Offset + list.Tail.Size - list.Head.Offset +} +func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) { + // glog.V(0).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size) + list.Tail.Next = node + list.Tail = node +} +func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) { + // glog.V(0).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size) + node.Next = list.Head + list.Head = node +} + +func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) { + t := list.Head + for { + + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart < nodeStop { + glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", + start, stop, t.Offset, t.Offset+t.Size, len(t.Data), + len(buf), nodeStart, nodeStop) + copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset]) + } + + if t.Next == nil { + break + } + t = t.Next + } +} + +func (c *ContinuousIntervals) TotalSize() (total int64) { + for _, list := range c.lists { + total += list.Size() + } + return +} + +func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) (hasOverlap bool) { + interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))} + + var prevList, nextList *IntervalLinkedList + + for _, list := range c.lists { + if list.Head.Offset == interval.Offset+interval.Size { + nextList = list + break + } + } + + for _, list := range c.lists { + if list.Head.Offset+list.Size() == offset { + list.addNodeToTail(interval) + prevList = list + break + } + if list.Head.Offset <= offset && offset < list.Head.Offset+list.Size() { + if list.Tail.Offset <= offset { + dataStartIndex := list.Tail.Offset + list.Tail.Size - offset + // glog.V(4).Infof("overlap data new [0,%d) same=%v", dataStartIndex, bytes.Compare(interval.Data[0:dataStartIndex], list.Tail.Data[len(list.Tail.Data)-int(dataStartIndex):])) + interval.Data = interval.Data[dataStartIndex:] + interval.Size -= dataStartIndex + interval.Offset = offset + dataStartIndex + // glog.V(4).Infof("overlapping append as [%d,%d) dataSize=%d", interval.Offset, interval.Offset+interval.Size, len(interval.Data)) + list.addNodeToTail(interval) + prevList = list + break + } + glog.V(4).Infof("overlapped! interval is [%d,%d) dataSize=%d", interval.Offset, interval.Offset+interval.Size, len(interval.Data)) + hasOverlap = true + return + } + } + + if prevList != nil && nextList != nil { + // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size) + prevList.Tail.Next = nextList.Head + prevList.Tail = nextList.Tail + c.removeList(nextList) + } else if nextList != nil { + // add to head was not done when checking + nextList.addNodeToHead(interval) + } + if prevList == nil && nextList == nil { + c.lists = append(c.lists, &IntervalLinkedList{ + Head: interval, + Tail: interval, + }) + } + + return +} + +func (c *ContinuousIntervals) RemoveLargestIntervalLinkedList() *IntervalLinkedList { + var maxSize int64 + maxIndex := -1 + for k, list := range c.lists { + if maxSize <= list.Size() { + maxSize = list.Size() + maxIndex = k + } + } + if maxSize <= 0 { + return nil + } + + t := c.lists[maxIndex] + c.lists = append(c.lists[0:maxIndex], c.lists[maxIndex+1:]...) + return t + +} + +func (c *ContinuousIntervals) removeList(target *IntervalLinkedList) { + index := -1 + for k, list := range c.lists { + if list.Offset() == target.Offset() { + index = k + } + } + if index < 0 { + return + } + + c.lists = append(c.lists[0:index], c.lists[index+1:]...) + +} + +func (c *ContinuousIntervals) ReadData(data []byte, startOffset int64) (offset int64, size int) { + var minOffset int64 = math.MaxInt64 + var maxStop int64 + for _, list := range c.lists { + start := max(startOffset, list.Offset()) + stop := min(startOffset+int64(len(data)), list.Offset()+list.Size()) + if start <= stop { + list.ReadData(data[start-startOffset:], start, stop) + minOffset = min(minOffset, start) + maxStop = max(maxStop, stop) + } + } + + if minOffset == math.MaxInt64 { + return 0, 0 + } + + offset = minOffset + size = int(maxStop - offset) + return +} + +func (l *IntervalLinkedList) ToReader() io.Reader { + var readers []io.Reader + t := l.Head + readers = append(readers, bytes.NewReader(t.Data)) + for t.Next != nil { + t = t.Next + readers = append(readers, bytes.NewReader(t.Data)) + } + return io.MultiReader(readers...) +} diff --git a/weed/filesys/file.go b/weed/filesys/file.go index b1d53507b..5a823f516 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -230,12 +230,6 @@ func (file *File) maybeLoadEntry(ctx context.Context) error { return nil } -func (file *File) addChunk(chunk *filer_pb.FileChunk) { - if chunk != nil { - file.addChunks([]*filer_pb.FileChunk{chunk}) - } -} - func (file *File) addChunks(chunks []*filer_pb.FileChunk) { sort.Slice(chunks, func(i, j int) bool { diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 981de7ea2..c3f06ae8a 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -55,8 +55,8 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus totalRead, err := fh.readFromChunks(ctx, buff, req.Offset) if err == nil { - dirtyOffset, dirtySize, dirtyReadErr := fh.readFromDirtyPages(ctx, buff, req.Offset) - if dirtyReadErr == nil && totalRead+req.Offset < dirtyOffset+int64(dirtySize) { + dirtyOffset, dirtySize := fh.readFromDirtyPages(ctx, buff, req.Offset) + if totalRead+req.Offset < dirtyOffset+int64(dirtySize) { totalRead = dirtyOffset + int64(dirtySize) - req.Offset } } @@ -70,7 +70,7 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus return err } -func (fh *FileHandle) readFromDirtyPages(ctx context.Context, buff []byte, startOffset int64) (offset int64, size int, err error) { +func (fh *FileHandle) readFromDirtyPages(ctx context.Context, buff []byte, startOffset int64) (offset int64, size int) { return fh.dirtyPages.ReadDirtyData(ctx, buff, startOffset) } @@ -102,8 +102,6 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f // write the request to volume servers - glog.V(4).Infof("%+v/%v write fh %d: [%d,%d)", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data))) - chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) if err != nil { glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) @@ -152,13 +150,16 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { // send the data to the OS glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req) - chunk, err := fh.dirtyPages.FlushToStorage(ctx) + chunks, err := fh.dirtyPages.FlushToStorage(ctx) if err != nil { glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) return fmt.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) } - fh.f.addChunk(chunk) + fh.f.addChunks(chunks) + if len(chunks) > 0 { + fh.dirtyMetadata = true + } if !fh.dirtyMetadata { return nil From c936a12afad1632bbc9a3d72b4723ed86da81c33 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 23 Jan 2020 21:59:58 -0800 Subject: [PATCH 0055/2432] mount: update the file size so file Attr() can read if file Attr() during file writes, the OS, at least for Mac, may try to resend the data to write, which usually are empty. --- weed/filesys/filehandle.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index c3f06ae8a..bee153cc5 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -102,6 +102,8 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f // write the request to volume servers + fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize))) + chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) if err != nil { glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) From 2f75264ec7928e362f54d0be2453168d21c82834 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 23 Jan 2020 22:12:57 -0800 Subject: [PATCH 0056/2432] mount: avoid use uint64 for greater than zero test --- weed/filesys/file.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 5a823f516..c5f75ff4f 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -42,7 +42,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr) - if file.isOpen <=0 { + if file.isOpen <= 0 { if err := file.maybeLoadEntry(ctx); err != nil { return err } @@ -108,10 +108,11 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f // fmt.Printf("truncate %v \n", fullPath) var chunks []*filer_pb.FileChunk for _, chunk := range file.entry.Chunks { - if uint64(chunk.Offset)+chunk.Size > req.Size { - chunk.Size = req.Size - uint64(chunk.Offset) + int64Size := int64(chunk.Size) + if chunk.Offset+int64Size > int64(req.Size) { + int64Size = int64(req.Size) - chunk.Offset } - if chunk.Size > 0 { + if int64Size > 0 { chunks = append(chunks, chunk) } } @@ -218,7 +219,7 @@ func (file *File) Forget() { } func (file *File) maybeLoadEntry(ctx context.Context) error { - if file.entry == nil || file.isOpen <= 0{ + if file.entry == nil || file.isOpen <= 0 { entry, err := file.wfs.maybeLoadEntry(ctx, file.dir.Path, file.Name) if err != nil { return err From 107e8a56ea0fee9eff996177003b32a0179d7651 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 01:40:51 -0800 Subject: [PATCH 0057/2432] retry context canceled request --- weed/filesys/wfs.go | 24 +++++++++++++++++++----- weed/util/grpc_client_server.go | 9 ++++++++- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 4cfab811b..bc78a0dbe 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -5,6 +5,7 @@ import ( "fmt" "math" "os" + "strings" "sync" "time" @@ -47,8 +48,8 @@ type WFS struct { listDirectoryEntriesCache *ccache.Cache // contains all open handles, protected by handlesLock - handlesLock sync.Mutex - handles []*FileHandle + handlesLock sync.Mutex + handles []*FileHandle pathToHandleIndex map[filer2.FullPath]int bufPool sync.Pool @@ -89,11 +90,24 @@ func (wfs *WFS) Root() (fs.Node, error) { func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + err := util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) + if err == nil { + return nil + } + if strings.Contains(err.Error(), "context canceled") { + time.Sleep(1337 * time.Millisecond) + glog.V(2).Infoln("retry context canceled request...") + return util.WithCachedGrpcClient(context.Background(), func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) + } + return err + } func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) { @@ -116,7 +130,7 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand wfs.handles[i] = fileHandle fileHandle.handle = uint64(i) wfs.pathToHandleIndex[fullpath] = i - glog.V(4).Infof( "%s reuse fh %d", fullpath,fileHandle.handle) + glog.V(4).Infof("%s reuse fh %d", fullpath, fileHandle.handle) return } } @@ -124,7 +138,7 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand wfs.handles = append(wfs.handles, fileHandle) fileHandle.handle = uint64(len(wfs.handles) - 1) wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle) - glog.V(4).Infof( "%s new fh %d", fullpath,fileHandle.handle) + glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle) return } diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index 31497ad35..63519d97a 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -64,7 +64,14 @@ func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, existingConnection, found := grpcClients[address] if found { grpcClientsLock.Unlock() - return fn(existingConnection) + err := fn(existingConnection) + if err != nil { + grpcClientsLock.Lock() + delete(grpcClients, address) + grpcClientsLock.Unlock() + existingConnection.Close() + } + return err } grpcConnection, err := GrpcDial(ctx, address, opts...) From 9d93c836549629013739ec860c9e1adb20fafcf6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 01:41:31 -0800 Subject: [PATCH 0058/2432] mount: return IO error for mkdir --- weed/filesys/dir.go | 4 ++-- weed/filesys/filehandle.go | 15 +++++++++++---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index f4132cbf0..4687d5c91 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -174,7 +174,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err glog.V(1).Infof("mkdir: %v", request) if _, err := client.CreateEntry(ctx, request); err != nil { glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err) - return fuse.EIO + return err } return nil @@ -185,7 +185,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err return node, nil } - return nil, err + return nil, fuse.EIO } func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index bee153cc5..9be290fe1 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -65,6 +65,7 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus if err != nil { glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) + return fuse.EIO } return err @@ -103,11 +104,12 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f // write the request to volume servers fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize))) + glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data))) chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) if err != nil { - glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) - return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err) + glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) + return fuse.EIO } resp.Size = len(req.Data) @@ -155,7 +157,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { chunks, err := fh.dirtyPages.FlushToStorage(ctx) if err != nil { glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) - return fmt.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) + return fuse.EIO } fh.f.addChunks(chunks) @@ -209,5 +211,10 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { fh.dirtyMetadata = false } - return err + if err != nil { + glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err) + return fuse.EIO + } + + return nil } From 5f1109f143ea76a4f1e475acdb855a3714e9000d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 01:41:51 -0800 Subject: [PATCH 0059/2432] reduce log --- weed/filesys/dirty_page_interval.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go index 5c55268c7..47d329f82 100644 --- a/weed/filesys/dirty_page_interval.go +++ b/weed/filesys/dirty_page_interval.go @@ -47,9 +47,7 @@ func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) { nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) if nodeStart < nodeStop { - glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", - start, stop, t.Offset, t.Offset+t.Size, len(t.Data), - len(buf), nodeStart, nodeStop) + // glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop) copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset]) } From 24f62ebe76bed4617395616c24378b129a9c1bc7 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 10:02:53 -0800 Subject: [PATCH 0060/2432] minor changes --- weed/filesys/filehandle.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 9be290fe1..8269e159f 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -187,7 +187,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { glog.V(3).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks)) for i, chunk := range fh.f.entry.Chunks { - glog.V(3).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(3).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) } chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks) From e6f9f5da3a7fc9dfcd0363a98f1977c1578b00d2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 17:26:03 -0800 Subject: [PATCH 0061/2432] adjust logging --- weed/server/filer_grpc_server.go | 1 + weed/server/volume_server_handlers_read.go | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index d82ef9782..6f4d449ff 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -20,6 +20,7 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) if err != nil { + glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) return nil, err } diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index cd11356b9..d89d13a0d 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -54,7 +54,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) return } - glog.V(4).Infoln("volume", volumeId, "reading", n) + // glog.V(4).Infoln("volume", volumeId, "reading", n) hasVolume := vs.store.HasVolume(volumeId) _, hasEcVolume := vs.store.FindEcVolume(volumeId) if !hasVolume && !hasEcVolume { @@ -88,7 +88,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } else if hasEcVolume { count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n) } - glog.V(4).Infoln("read bytes", count, "error", err) + // glog.V(4).Infoln("read bytes", count, "error", err) if err != nil || count < 0 { glog.V(0).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err) w.WriteHeader(http.StatusNotFound) From 6107509c64adca478066fc881718d75b1e927465 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 17:26:18 -0800 Subject: [PATCH 0062/2432] adjust logging --- weed/filer2/filer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 96a010fce..5afb2b255 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -96,7 +96,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro glog.V(4).Infof("find uncached directory: %s", dirPath) dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath)) } else { - glog.V(4).Infof("found cached directory: %s", dirPath) + // glog.V(4).Infof("found cached directory: %s", dirPath) } // no such existing directory From 3cdb27fafccbeff0e8641e5531ae6ad9d746847a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 17:55:39 -0800 Subject: [PATCH 0063/2432] adjust logging --- weed/server/filer_grpc_server.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 6f4d449ff..bd0e25c80 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -133,6 +133,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) if req.Entry.Attributes == nil { + glog.V(3).Infof("CreateEntry %s: nil attributes", filepath.Join(req.Directory, req.Entry.Name)) return nil, fmt.Errorf("can not create entry with empty attributes") } @@ -144,6 +145,8 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr if err == nil { fs.filer.DeleteChunks(garbages) + } else { + glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) } return &filer_pb.CreateEntryResponse{}, err @@ -196,6 +199,8 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil { fs.filer.DeleteChunks(unusedChunks) fs.filer.DeleteChunks(garbages) + } else { + glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) } fs.filer.NotifyUpdateEvent(entry, newEntry, true) @@ -240,9 +245,11 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol } assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) if err != nil { + glog.V(3).Infof("AssignVolume: %v", err) return nil, fmt.Errorf("assign volume: %v", err) } if assignResult.Error != "" { + glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) return nil, fmt.Errorf("assign volume result: %v", assignResult.Error) } From 8e071c2fa65f7bb93188d22cd70db328889a2e66 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 17:56:46 -0800 Subject: [PATCH 0064/2432] docker: tweaking docker file --- docker/Dockerfile.go_build | 12 ++++++------ docker/dev-compose.yml | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index 67d391fdc..87f97dfc8 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -1,8 +1,9 @@ -FROM golang:latest -RUN go get github.com/chrislusf/seaweedfs/weed -RUN rm -rf /go/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace -RUN rm -rf /go/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace -RUN go get github.com/chrislusf/seaweedfs/weed +FROM frolvlad/alpine-glibc +RUN apk add git go g++ +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install +RUN cp /root/go/bin/weed /usr/bin/ # volume server gprc port EXPOSE 18080 @@ -27,6 +28,5 @@ RUN mkdir -p /etc/seaweedfs RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh -RUN cp /go/bin/weed /usr/bin/ ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/dev-compose.yml b/docker/dev-compose.yml index 1f44ff483..19331bebb 100644 --- a/docker/dev-compose.yml +++ b/docker/dev-compose.yml @@ -16,7 +16,7 @@ services: ports: - 8080:8080 - 18080:18080 - command: 'volume -max=5 -mserver="master:9333" -port=8080' + command: '-v=4 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' depends_on: - master filer: @@ -26,7 +26,7 @@ services: ports: - 8888:8888 - 18888:18888 - command: 'filer -master="master:9333"' + command: '-v=4 filer -master="master:9333"' depends_on: - master - volume @@ -36,7 +36,7 @@ services: dockerfile: Dockerfile.go_build ports: - 8333:8333 - command: 's3 -filer="filer:8888"' + command: '-v=4 s3 -filer="filer:8888"' depends_on: - master - volume From d4cde5df49b4165f5695f0f4d7a8e3e1aa8e50ab Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 18:07:34 -0800 Subject: [PATCH 0065/2432] return empty response if not found --- weed/server/filer_grpc_server.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index bd0e25c80..5145035d2 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -19,6 +19,9 @@ import ( func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) + if err == filer2.ErrNotFound { + return &filer_pb.LookupDirectoryEntryResponse{}, nil + } if err != nil { glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) return nil, err @@ -245,11 +248,11 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol } assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) if err != nil { - glog.V(3).Infof("AssignVolume: %v", err) + glog.V(3).Infof("AssignVolume: %v", err) return nil, fmt.Errorf("assign volume: %v", err) } if assignResult.Error != "" { - glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) + glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) return nil, fmt.Errorf("assign volume result: %v", assignResult.Error) } From b6e6ca85954f27ed3a5f8a9cdb1eab26a4122fcd Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 20:06:58 -0800 Subject: [PATCH 0066/2432] adjust logs --- weed/filer2/filer.go | 6 ++++++ weed/storage/volume_read_write.go | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 5afb2b255..f95dce4df 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -74,6 +74,8 @@ func (f *Filer) RollbackTransaction(ctx context.Context) error { func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) error { + glog.V(4).Infof("CreateEntry create %s", entry.FullPath) + if string(entry.FullPath) == "/" { return nil } @@ -127,6 +129,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro } } else if !dirEntry.IsDirectory() { + glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) return fmt.Errorf("%s is a file", dirPath) } @@ -141,6 +144,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro } if lastDirectoryEntry == nil { + glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath) return fmt.Errorf("parent folder not found: %v", entry.FullPath) } @@ -169,6 +173,8 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro } } + // glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + f.NotifyUpdateEvent(oldEntry, entry, true) f.deleteChunksIfNotNew(oldEntry, entry) diff --git a/weed/storage/volume_read_write.go b/weed/storage/volume_read_write.go index 0aa3f794b..ac6154cef 100644 --- a/weed/storage/volume_read_write.go +++ b/weed/storage/volume_read_write.go @@ -64,7 +64,7 @@ func (v *Volume) Destroy() (err error) { } func (v *Volume) writeNeedle(n *needle.Needle) (offset uint64, size uint32, isUnchanged bool, err error) { - glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) + // glog.V(4).Infof("writing needle %s", needle.NewFileIdFromNeedle(v.Id, n).String()) v.dataFileAccessLock.Lock() defer v.dataFileAccessLock.Unlock() if v.isFileUnchanged(n) { From 09bf256fc6993bcd8eaf7983a7e335a6a37826ab Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 20:07:12 -0800 Subject: [PATCH 0067/2432] less logs for volume servers --- docker/dev-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/dev-compose.yml b/docker/dev-compose.yml index 19331bebb..c2f588a60 100644 --- a/docker/dev-compose.yml +++ b/docker/dev-compose.yml @@ -16,7 +16,7 @@ services: ports: - 8080:8080 - 18080:18080 - command: '-v=4 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' + command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' depends_on: - master filer: From 6e25acc6818d95b8ea60cd6b3c314d589e724ceb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 22:01:51 -0800 Subject: [PATCH 0068/2432] add logging --- weed/filer2/filer.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index f95dce4df..f7d782d62 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -122,6 +122,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro mkdirErr := f.store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound { + glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { @@ -165,6 +166,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro } } else { if o_excl { + glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath) return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) } if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { @@ -173,7 +175,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro } } - // glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) f.NotifyUpdateEvent(oldEntry, entry, true) From 28d1b348564b7aa0e61c894136c6b882367966e5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 24 Jan 2020 22:13:06 -0800 Subject: [PATCH 0069/2432] logging --- weed/filer2/filer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index f7d782d62..ce6bb8133 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -175,12 +175,12 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro } } - glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) - f.NotifyUpdateEvent(oldEntry, entry, true) f.deleteChunksIfNotNew(oldEntry, entry) + glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + return nil } From 8a42aa822176dc71795318374c359a2794bc2356 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 25 Jan 2020 00:31:53 -0800 Subject: [PATCH 0070/2432] adjust logging --- weed/filer2/filer.go | 3 +-- weed/filer2/filer_client_util.go | 6 +++--- weed/filesys/dir.go | 2 +- weed/filesys/xattr.go | 4 ++-- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index ce6bb8133..c603777bd 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -74,8 +74,6 @@ func (f *Filer) RollbackTransaction(ctx context.Context) error { func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) error { - glog.V(4).Infof("CreateEntry create %s", entry.FullPath) - if string(entry.FullPath) == "/" { return nil } @@ -159,6 +157,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro oldEntry, _ := f.FindEntry(ctx, entry.FullPath) + glog.V(4).Infof("CreateEntry %s: old entry : %v", entry.FullPath, oldEntry) if oldEntry == nil { if err := f.store.InsertEntry(ctx, entry); err != nil { glog.Errorf("insert entry %s: %v", entry.FullPath, err) diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index b1c579447..1c5af7fe2 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -104,18 +104,18 @@ func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath FullPat Name: name, } - glog.V(3).Infof("read %s request: %v", fullFilePath, request) + // glog.V(3).Infof("read %s request: %v", fullFilePath, request) resp, err := client.LookupDirectoryEntry(ctx, request) if err != nil { if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { return nil } - glog.V(3).Infof("read %s attr %v: %v", fullFilePath, request, err) + glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) return err } if resp.Entry == nil { - glog.V(3).Infof("read %s entry: %v", fullFilePath, entry) + // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry) return nil } diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 4687d5c91..b783cbcbe 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -196,7 +196,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. entry := dir.wfs.cacheGet(fullFilePath) if entry == nil { - glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) + // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath) if err != nil { glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index 52a447d95..75ba0f2ba 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -115,7 +115,7 @@ func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *fi if entry != nil { return } - glog.V(3).Infof("read entry cache miss %s", fullpath) + // glog.V(3).Infof("read entry cache miss %s", fullpath) err = wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { @@ -130,7 +130,7 @@ func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *fi glog.V(3).Infof("file attr read not found file %v: %v", request, err) return fuse.ENOENT } - glog.V(3).Infof("file attr read file %v: %v", request, err) + glog.V(3).Infof("attr read %v: %v", request, err) return fuse.EIO } From 90e30c01704219b86ebb8132b66b6c4adcf24d09 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 25 Jan 2020 00:32:18 -0800 Subject: [PATCH 0071/2432] log --- weed/filesys/file.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index c5f75ff4f..cc0717f18 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -54,6 +54,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { attr.Size = filer2.TotalSize(file.entry.Chunks) if file.isOpen > 0 { attr.Size = file.entry.Attributes.FileSize + glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size) } attr.Crtime = time.Unix(file.entry.Attributes.Crtime, 0) attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0) From 8cccccce9f0ae4d145b8da07ee7b615cb01ce23e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 25 Jan 2020 01:15:54 -0800 Subject: [PATCH 0072/2432] mount: reuse the entry object for the new directory avoid mkdir and then query for the same directory reduces these "context canceled" issues attr read directory:"/seaweedfs/other/java/s3copier/src/main/java/com" name:"seaweedfs" : rpc error: code = Canceled desc = context canceled --- weed/filesys/dir.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index b783cbcbe..7af5544ff 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -154,21 +154,23 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { + newEntry := &filer_pb.Entry{ + Name: req.Name, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), + Uid: req.Uid, + Gid: req.Gid, + }, + } + err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: dir.Path, - Entry: &filer_pb.Entry{ - Name: req.Name, - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), - Uid: req.Uid, - Gid: req.Gid, - }, - }, + Entry: newEntry, } glog.V(1).Infof("mkdir: %v", request) @@ -181,7 +183,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err }) if err == nil { - node := dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), nil) + node := dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), newEntry) return node, nil } From 4433c7bdca9708c043dab80073c04c508f289865 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 25 Jan 2020 07:34:09 -0800 Subject: [PATCH 0073/2432] logs --- weed/filesys/filehandle.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 8269e159f..9fe4201db 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -79,7 +79,7 @@ func (fh *FileHandle) readFromChunks(ctx context.Context, buff []byte, offset in // this value should come from the filer instead of the old f if len(fh.f.entry.Chunks) == 0 { - glog.V(1).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name) + glog.V(1).Infof("empty fh %v", fh.f.fullpath()) return 0, nil } @@ -156,7 +156,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { chunks, err := fh.dirtyPages.FlushToStorage(ctx) if err != nil { - glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) + glog.Errorf("flush %s: %v", fh.f.fullpath(), err) return fuse.EIO } @@ -185,7 +185,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { Entry: fh.f.entry, } - glog.V(3).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks)) + glog.V(3).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks)) for i, chunk := range fh.f.entry.Chunks { glog.V(3).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) } @@ -201,7 +201,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { fh.f.wfs.deleteFileChunks(ctx, garbages) for i, chunk := range garbages { - glog.V(3).Infof("garbage %s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(3).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) } return nil From 94526f8e10a5fa9a22f38f46a82e411eaa991892 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 25 Jan 2020 07:50:54 -0800 Subject: [PATCH 0074/2432] 2-stage docker dev image build fix https://github.com/chrislusf/seaweedfs/issues/1047 --- docker/Dockerfile.go_build | 13 ++++--- ...{dev-compose.yml => local-dev-compose.yml} | 0 docker/seaweedfs-dev-compose.yml | 35 +++++++++++++++++++ 3 files changed, 43 insertions(+), 5 deletions(-) rename docker/{dev-compose.yml => local-dev-compose.yml} (100%) create mode 100644 docker/seaweedfs-dev-compose.yml diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index 87f97dfc8..306ce3aa1 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -1,9 +1,15 @@ -FROM frolvlad/alpine-glibc +FROM frolvlad/alpine-glibc as builder RUN apk add git go g++ RUN mkdir -p /go/src/github.com/chrislusf/ RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install -RUN cp /root/go/bin/weed /usr/bin/ + +FROM alpine AS final +LABEL author="Chris Lu" +COPY --from=builder /root/go/bin/weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh # volume server gprc port EXPOSE 18080 @@ -24,9 +30,6 @@ RUN mkdir -p /data/filerldb2 VOLUME /data -RUN mkdir -p /etc/seaweedfs -RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/dev-compose.yml b/docker/local-dev-compose.yml similarity index 100% rename from docker/dev-compose.yml rename to docker/local-dev-compose.yml diff --git a/docker/seaweedfs-dev-compose.yml b/docker/seaweedfs-dev-compose.yml new file mode 100644 index 000000000..765770084 --- /dev/null +++ b/docker/seaweedfs-dev-compose.yml @@ -0,0 +1,35 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8080:8080 + - 18080:18080 + command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' + depends_on: + - master + filer: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8888:8888 + - 18888:18888 + command: '-v=4 filer -master="master:9333"' + depends_on: + - master + - volume + s3: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8333:8333 + command: '-v=4 s3 -filer="filer:8888"' + depends_on: + - master + - volume + - filer From 9863f51cc8280a613eb816f5b73bda0fcc60f459 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 25 Jan 2020 08:04:45 -0800 Subject: [PATCH 0075/2432] use docker built binaries for faster automated release --- docker/Dockerfile | 31 +++++++++++-------------------- docker/Dockerfile.go_build | 35 ----------------------------------- docker/local-dev-compose.yml | 8 ++++---- 3 files changed, 15 insertions(+), 59 deletions(-) delete mode 100644 docker/Dockerfile.go_build diff --git a/docker/Dockerfile b/docker/Dockerfile index 38117a3dc..306ce3aa1 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,22 +1,15 @@ -FROM frolvlad/alpine-glibc +FROM frolvlad/alpine-glibc as builder +RUN apk add git go g++ +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install -# Supercronic install settings -ENV SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.8/supercronic-linux-amd64 \ - SUPERCRONIC=supercronic-linux-amd64 \ - SUPERCRONIC_SHA1SUM=be43e64c45acd6ec4fce5831e03759c89676a0ea - -# Install SeaweedFS and Supercronic ( for cron job mode ) -# Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format" -RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \ - wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \ - tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \ - curl -fsSLO "$SUPERCRONIC_URL" && \ - echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \ - chmod +x "$SUPERCRONIC" && \ - mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \ - ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \ - apk del build-dependencies && \ - rm -rf /tmp/* +FROM alpine AS final +LABEL author="Chris Lu" +COPY --from=builder /root/go/bin/weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh # volume server gprc port EXPOSE 18080 @@ -37,8 +30,6 @@ RUN mkdir -p /data/filerldb2 VOLUME /data -COPY filer.toml /etc/seaweedfs/filer.toml -COPY entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build deleted file mode 100644 index 306ce3aa1..000000000 --- a/docker/Dockerfile.go_build +++ /dev/null @@ -1,35 +0,0 @@ -FROM frolvlad/alpine-glibc as builder -RUN apk add git go g++ -RUN mkdir -p /go/src/github.com/chrislusf/ -RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs -RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install - -FROM alpine AS final -LABEL author="Chris Lu" -COPY --from=builder /root/go/bin/weed /usr/bin/ -RUN mkdir -p /etc/seaweedfs -COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh - -# volume server gprc port -EXPOSE 18080 -# volume server http port -EXPOSE 8080 -# filer server gprc port -EXPOSE 18888 -# filer server http port -EXPOSE 8888 -# master server shared gprc port -EXPOSE 19333 -# master server shared http port -EXPOSE 9333 -# s3 server http port -EXPOSE 8333 - -RUN mkdir -p /data/filerldb2 - -VOLUME /data - -RUN chmod +x /entrypoint.sh - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/local-dev-compose.yml b/docker/local-dev-compose.yml index c2f588a60..02ed1e6a3 100644 --- a/docker/local-dev-compose.yml +++ b/docker/local-dev-compose.yml @@ -4,7 +4,7 @@ services: master: build: context: . - dockerfile: Dockerfile.go_build + dockerfile: Dockerfile ports: - 9333:9333 - 19333:19333 @@ -12,7 +12,7 @@ services: volume: build: context: . - dockerfile: Dockerfile.go_build + dockerfile: Dockerfile ports: - 8080:8080 - 18080:18080 @@ -22,7 +22,7 @@ services: filer: build: context: . - dockerfile: Dockerfile.go_build + dockerfile: Dockerfile ports: - 8888:8888 - 18888:18888 @@ -33,7 +33,7 @@ services: s3: build: context: . - dockerfile: Dockerfile.go_build + dockerfile: Dockerfile ports: - 8333:8333 command: '-v=4 s3 -filer="filer:8888"' From ccf3859152908cdceaac35f8b59d54752c3d78b5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 25 Jan 2020 08:16:43 -0800 Subject: [PATCH 0076/2432] log --- weed/filesys/dirty_page_interval.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go index 47d329f82..79b3af335 100644 --- a/weed/filesys/dirty_page_interval.go +++ b/weed/filesys/dirty_page_interval.go @@ -86,11 +86,11 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) (hasOverlap if list.Head.Offset <= offset && offset < list.Head.Offset+list.Size() { if list.Tail.Offset <= offset { dataStartIndex := list.Tail.Offset + list.Tail.Size - offset - // glog.V(4).Infof("overlap data new [0,%d) same=%v", dataStartIndex, bytes.Compare(interval.Data[0:dataStartIndex], list.Tail.Data[len(list.Tail.Data)-int(dataStartIndex):])) + glog.V(4).Infof("overlap data new [0,%d) same=%v", dataStartIndex, bytes.Compare(interval.Data[0:dataStartIndex], list.Tail.Data[len(list.Tail.Data)-int(dataStartIndex):])) interval.Data = interval.Data[dataStartIndex:] interval.Size -= dataStartIndex interval.Offset = offset + dataStartIndex - // glog.V(4).Infof("overlapping append as [%d,%d) dataSize=%d", interval.Offset, interval.Offset+interval.Size, len(interval.Data)) + glog.V(4).Infof("overlapping append as [%d,%d) dataSize=%d", interval.Offset, interval.Offset+interval.Size, len(interval.Data)) list.addNodeToTail(interval) prevList = list break From 3f6e0e34cbb75fa440c8ccd182adb68ac7301847 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 25 Jan 2020 08:17:59 -0800 Subject: [PATCH 0077/2432] Revert "use docker built binaries for faster automated release" This reverts commit 9863f51cc8280a613eb816f5b73bda0fcc60f459. --- docker/Dockerfile | 31 ++++++++++++++++++++----------- docker/Dockerfile.go_build | 35 +++++++++++++++++++++++++++++++++++ docker/local-dev-compose.yml | 8 ++++---- 3 files changed, 59 insertions(+), 15 deletions(-) create mode 100644 docker/Dockerfile.go_build diff --git a/docker/Dockerfile b/docker/Dockerfile index 306ce3aa1..38117a3dc 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,15 +1,22 @@ -FROM frolvlad/alpine-glibc as builder -RUN apk add git go g++ -RUN mkdir -p /go/src/github.com/chrislusf/ -RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs -RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install +FROM frolvlad/alpine-glibc -FROM alpine AS final -LABEL author="Chris Lu" -COPY --from=builder /root/go/bin/weed /usr/bin/ -RUN mkdir -p /etc/seaweedfs -COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh +# Supercronic install settings +ENV SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.8/supercronic-linux-amd64 \ + SUPERCRONIC=supercronic-linux-amd64 \ + SUPERCRONIC_SHA1SUM=be43e64c45acd6ec4fce5831e03759c89676a0ea + +# Install SeaweedFS and Supercronic ( for cron job mode ) +# Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format" +RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \ + wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \ + tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \ + curl -fsSLO "$SUPERCRONIC_URL" && \ + echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \ + chmod +x "$SUPERCRONIC" && \ + mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \ + ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \ + apk del build-dependencies && \ + rm -rf /tmp/* # volume server gprc port EXPOSE 18080 @@ -30,6 +37,8 @@ RUN mkdir -p /data/filerldb2 VOLUME /data +COPY filer.toml /etc/seaweedfs/filer.toml +COPY entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build new file mode 100644 index 000000000..306ce3aa1 --- /dev/null +++ b/docker/Dockerfile.go_build @@ -0,0 +1,35 @@ +FROM frolvlad/alpine-glibc as builder +RUN apk add git go g++ +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install + +FROM alpine AS final +LABEL author="Chris Lu" +COPY --from=builder /root/go/bin/weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh + +# volume server gprc port +EXPOSE 18080 +# volume server http port +EXPOSE 8080 +# filer server gprc port +EXPOSE 18888 +# filer server http port +EXPOSE 8888 +# master server shared gprc port +EXPOSE 19333 +# master server shared http port +EXPOSE 9333 +# s3 server http port +EXPOSE 8333 + +RUN mkdir -p /data/filerldb2 + +VOLUME /data + +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/local-dev-compose.yml b/docker/local-dev-compose.yml index 02ed1e6a3..c2f588a60 100644 --- a/docker/local-dev-compose.yml +++ b/docker/local-dev-compose.yml @@ -4,7 +4,7 @@ services: master: build: context: . - dockerfile: Dockerfile + dockerfile: Dockerfile.go_build ports: - 9333:9333 - 19333:19333 @@ -12,7 +12,7 @@ services: volume: build: context: . - dockerfile: Dockerfile + dockerfile: Dockerfile.go_build ports: - 8080:8080 - 18080:18080 @@ -22,7 +22,7 @@ services: filer: build: context: . - dockerfile: Dockerfile + dockerfile: Dockerfile.go_build ports: - 8888:8888 - 18888:18888 @@ -33,7 +33,7 @@ services: s3: build: context: . - dockerfile: Dockerfile + dockerfile: Dockerfile.go_build ports: - 8333:8333 command: '-v=4 s3 -filer="filer:8888"' From c48fc8b4de5922c44d22da306699f789353ecdd4 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 25 Jan 2020 09:17:19 -0800 Subject: [PATCH 0078/2432] grpc send error via response instead of grpc error --- other/java/client/src/main/proto/filer.proto | 1 + weed/command/filer_copy.go | 4 +- weed/filesys/dir.go | 5 +- weed/filesys/dir_link.go | 2 +- weed/filesys/dirty_page_interval.go | 6 +- weed/filesys/filehandle.go | 2 +- weed/pb/filer.proto | 1 + weed/pb/filer_pb/filer.pb.go | 215 +++++++++--------- weed/pb/filer_pb/filer_pb_helper.go | 14 ++ weed/replication/sink/filersink/filer_sink.go | 2 +- weed/s3api/filer_util.go | 4 +- weed/server/filer_grpc_server.go | 14 +- weed/server/webdav_server.go | 4 +- weed/shell/command_fs_meta_load.go | 2 +- 14 files changed, 152 insertions(+), 124 deletions(-) diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 41c1650d4..6357d971f 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -127,6 +127,7 @@ message CreateEntryRequest { } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index f14d18c52..71143f307 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -331,7 +331,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy }, } - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -435,7 +435,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC }, } - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 7af5544ff..fe6b30619 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -129,8 +129,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, glog.V(1).Infof("create: %v", req.String()) if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { if strings.Contains(err.Error(), "EEXIST") { return fuse.EEXIST } @@ -174,7 +173,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err) return err } diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index 8e60872d3..13be62670 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -36,7 +36,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, } err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) return fuse.EIO } diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go index 79b3af335..c64196cdf 100644 --- a/weed/filesys/dirty_page_interval.go +++ b/weed/filesys/dirty_page_interval.go @@ -31,12 +31,12 @@ func (list *IntervalLinkedList) Size() int64 { return list.Tail.Offset + list.Tail.Size - list.Head.Offset } func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) { - // glog.V(0).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size) + glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size) list.Tail.Next = node list.Tail = node } func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) { - // glog.V(0).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size) + glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size) node.Next = list.Head list.Head = node } @@ -102,7 +102,7 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) (hasOverlap } if prevList != nil && nextList != nil { - // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size) + glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size) prevList.Tail.Next = nextList.Head prevList.Tail = nextList.Tail c.removeList(nextList) diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 9fe4201db..fad5418e2 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -194,7 +194,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { fh.f.entry.Chunks = chunks // fh.f.entryViewCache = nil - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.Errorf("update fh: %v", err) return fmt.Errorf("update fh: %v", err) } diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 41c1650d4..6357d971f 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -127,6 +127,7 @@ message CreateEntryRequest { } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 043a46504..01b3e8d90 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -527,6 +527,7 @@ func (m *CreateEntryRequest) GetOExcl() bool { } type CreateEntryResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` } func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} } @@ -534,6 +535,13 @@ func (m *CreateEntryResponse) String() string { return proto.CompactT func (*CreateEntryResponse) ProtoMessage() {} func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (m *CreateEntryResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type UpdateEntryRequest struct { Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` @@ -1465,107 +1473,108 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1624 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x4b, 0x6f, 0xdb, 0xc6, - 0x16, 0x36, 0xf5, 0xe6, 0x91, 0x94, 0xd8, 0x63, 0x3b, 0x51, 0xe4, 0xc7, 0x75, 0xe8, 0x9b, 0x5c, - 0x5f, 0x34, 0x70, 0x03, 0x37, 0x8b, 0xa4, 0x69, 0x17, 0x89, 0x1f, 0x85, 0x51, 0xe7, 0x01, 0x3a, - 0x29, 0x5a, 0x14, 0x28, 0x41, 0x93, 0x23, 0x79, 0x6a, 0x92, 0xa3, 0x0e, 0x87, 0xb6, 0xd3, 0x9f, - 0xd0, 0x65, 0x97, 0x05, 0xba, 0xee, 0x9f, 0x28, 0xba, 0x29, 0x8a, 0xfe, 0x9b, 0x2e, 0xbb, 0x2e, - 0x66, 0x86, 0xa4, 0x86, 0xa2, 0x6c, 0x27, 0x28, 0xb2, 0xe3, 0x9c, 0xf7, 0x39, 0x73, 0xce, 0xf9, - 0x46, 0x82, 0xf6, 0x80, 0x04, 0x98, 0x6d, 0x8e, 0x18, 0xe5, 0x14, 0xb5, 0xe4, 0xc1, 0x19, 0x1d, - 0x59, 0x2f, 0x60, 0xe9, 0x80, 0xd2, 0x93, 0x64, 0xb4, 0x43, 0x18, 0xf6, 0x38, 0x65, 0x6f, 0x76, - 0x23, 0xce, 0xde, 0xd8, 0xf8, 0xbb, 0x04, 0xc7, 0x1c, 0x2d, 0x83, 0xe9, 0x67, 0x8c, 0x9e, 0xb1, - 0x66, 0x6c, 0x98, 0xf6, 0x98, 0x80, 0x10, 0xd4, 0x22, 0x37, 0xc4, 0xbd, 0x8a, 0x64, 0xc8, 0x6f, - 0x6b, 0x17, 0x96, 0xa7, 0x1b, 0x8c, 0x47, 0x34, 0x8a, 0x31, 0xba, 0x03, 0x75, 0x2c, 0x08, 0xd2, - 0x5a, 0x7b, 0xeb, 0xfa, 0x66, 0x16, 0xca, 0xa6, 0x92, 0x53, 0x5c, 0xeb, 0x37, 0x03, 0xd0, 0x01, - 0x89, 0xb9, 0x20, 0x12, 0x1c, 0xbf, 0x5d, 0x3c, 0x37, 0xa0, 0x31, 0x62, 0x78, 0x40, 0xce, 0xd3, - 0x88, 0xd2, 0x13, 0xba, 0x07, 0x73, 0x31, 0x77, 0x19, 0xdf, 0x63, 0x34, 0xdc, 0x23, 0x01, 0x7e, - 0x2e, 0x82, 0xae, 0x4a, 0x91, 0x32, 0x03, 0x6d, 0x02, 0x22, 0x91, 0x17, 0x24, 0x31, 0x39, 0xc5, - 0x87, 0x19, 0xb7, 0x57, 0x5b, 0x33, 0x36, 0x5a, 0xf6, 0x14, 0x0e, 0x5a, 0x80, 0x7a, 0x40, 0x42, - 0xc2, 0x7b, 0xf5, 0x35, 0x63, 0xa3, 0x6b, 0xab, 0x83, 0xf5, 0x09, 0xcc, 0x17, 0xe2, 0x7f, 0xb7, - 0xf4, 0x7f, 0xae, 0x40, 0x5d, 0x12, 0xf2, 0x1a, 0x1b, 0xe3, 0x1a, 0xa3, 0xdb, 0xd0, 0x21, 0xb1, - 0x33, 0x2e, 0x44, 0x45, 0xc6, 0xd6, 0x26, 0x71, 0x5e, 0x73, 0xf4, 0x01, 0x34, 0xbc, 0xe3, 0x24, - 0x3a, 0x89, 0x7b, 0xd5, 0xb5, 0xea, 0x46, 0x7b, 0x6b, 0x7e, 0xec, 0x48, 0x24, 0xba, 0x2d, 0x78, - 0x76, 0x2a, 0x82, 0x1e, 0x02, 0xb8, 0x9c, 0x33, 0x72, 0x94, 0x70, 0x1c, 0xcb, 0x4c, 0xdb, 0x5b, - 0x3d, 0x4d, 0x21, 0x89, 0xf1, 0x93, 0x9c, 0x6f, 0x6b, 0xb2, 0xe8, 0x11, 0xb4, 0xf0, 0x39, 0xc7, - 0x91, 0x8f, 0xfd, 0x5e, 0x5d, 0x3a, 0x5a, 0x99, 0xc8, 0x68, 0x73, 0x37, 0xe5, 0xab, 0xfc, 0x72, - 0xf1, 0xfe, 0x63, 0xe8, 0x16, 0x58, 0x68, 0x16, 0xaa, 0x27, 0x38, 0xbb, 0x55, 0xf1, 0x29, 0x2a, - 0x7b, 0xea, 0x06, 0x89, 0x6a, 0xb0, 0x8e, 0xad, 0x0e, 0x1f, 0x57, 0x1e, 0x1a, 0xd6, 0x0e, 0x98, - 0x7b, 0x49, 0x10, 0xe4, 0x8a, 0x3e, 0x61, 0x99, 0xa2, 0x4f, 0xd8, 0xb8, 0xca, 0x95, 0x4b, 0xab, - 0xfc, 0xab, 0x01, 0x73, 0xbb, 0xa7, 0x38, 0xe2, 0xcf, 0x29, 0x27, 0x03, 0xe2, 0xb9, 0x9c, 0xd0, - 0x08, 0xdd, 0x03, 0x93, 0x06, 0xbe, 0x73, 0xe9, 0x35, 0xb5, 0x68, 0x90, 0x46, 0x7d, 0x0f, 0xcc, - 0x08, 0x9f, 0x39, 0x97, 0xba, 0x6b, 0x45, 0xf8, 0x4c, 0x49, 0xaf, 0x43, 0xd7, 0xc7, 0x01, 0xe6, - 0xd8, 0xc9, 0x6f, 0x47, 0x5c, 0x5d, 0x47, 0x11, 0xb7, 0xd5, 0x75, 0xdc, 0x85, 0xeb, 0xc2, 0xe4, - 0xc8, 0x65, 0x38, 0xe2, 0xce, 0xc8, 0xe5, 0xc7, 0xf2, 0x4e, 0x4c, 0xbb, 0x1b, 0xe1, 0xb3, 0x97, - 0x92, 0xfa, 0xd2, 0xe5, 0xc7, 0xd6, 0xdf, 0x06, 0x98, 0xf9, 0x65, 0xa2, 0x9b, 0xd0, 0x14, 0x6e, - 0x1d, 0xe2, 0xa7, 0x95, 0x68, 0x88, 0xe3, 0xbe, 0x2f, 0xa6, 0x82, 0x0e, 0x06, 0x31, 0xe6, 0x32, - 0xbc, 0xaa, 0x9d, 0x9e, 0x44, 0x67, 0xc5, 0xe4, 0x7b, 0x35, 0x08, 0x35, 0x5b, 0x7e, 0x8b, 0x8a, - 0x87, 0x9c, 0x84, 0x58, 0x3a, 0xac, 0xda, 0xea, 0x80, 0xe6, 0xa1, 0x8e, 0x1d, 0xee, 0x0e, 0x65, - 0x87, 0x9b, 0x76, 0x0d, 0xbf, 0x72, 0x87, 0xe8, 0xbf, 0x70, 0x2d, 0xa6, 0x09, 0xf3, 0xb0, 0x93, - 0xb9, 0x6d, 0x48, 0x6e, 0x47, 0x51, 0xf7, 0x94, 0x73, 0x0b, 0xaa, 0x03, 0xe2, 0xf7, 0x9a, 0xb2, - 0x30, 0xb3, 0xc5, 0x26, 0xdc, 0xf7, 0x6d, 0xc1, 0x44, 0x1f, 0x02, 0xe4, 0x96, 0xfc, 0x5e, 0xeb, - 0x02, 0x51, 0x33, 0xb3, 0xeb, 0x5b, 0x5f, 0x42, 0x23, 0x35, 0xbf, 0x04, 0xe6, 0x29, 0x0d, 0x92, - 0x30, 0x4f, 0xbb, 0x6b, 0xb7, 0x14, 0x61, 0xdf, 0x47, 0xb7, 0x40, 0xee, 0x39, 0x47, 0x74, 0x55, - 0x45, 0x26, 0x29, 0x2b, 0xf4, 0x39, 0x96, 0x9b, 0xc2, 0xa3, 0xf4, 0x84, 0xa8, 0xec, 0x9b, 0x76, - 0x7a, 0xb2, 0xfe, 0xaa, 0xc0, 0xb5, 0x62, 0xbb, 0x0b, 0x17, 0xd2, 0x8a, 0xac, 0x95, 0x21, 0xcd, - 0x48, 0xb3, 0x87, 0x85, 0x7a, 0x55, 0xf4, 0x7a, 0x65, 0x2a, 0x21, 0xf5, 0x95, 0x83, 0xae, 0x52, - 0x79, 0x46, 0x7d, 0x2c, 0xba, 0x35, 0x21, 0xbe, 0x2c, 0x70, 0xd7, 0x16, 0x9f, 0x82, 0x32, 0x24, - 0x7e, 0xba, 0x3e, 0xc4, 0xa7, 0x0c, 0x8f, 0x49, 0xbb, 0x0d, 0x75, 0x65, 0xea, 0x24, 0xae, 0x2c, - 0x14, 0xd4, 0xa6, 0xba, 0x07, 0xf1, 0x8d, 0xd6, 0xa0, 0xcd, 0xf0, 0x28, 0x48, 0xbb, 0x57, 0x96, - 0xcf, 0xb4, 0x75, 0x12, 0x5a, 0x05, 0xf0, 0x68, 0x10, 0x60, 0x4f, 0x0a, 0x98, 0x52, 0x40, 0xa3, - 0x88, 0xce, 0xe1, 0x3c, 0x70, 0x62, 0xec, 0xf5, 0x60, 0xcd, 0xd8, 0xa8, 0xdb, 0x0d, 0xce, 0x83, - 0x43, 0xec, 0x89, 0x3c, 0x92, 0x18, 0x33, 0x47, 0x2e, 0xa0, 0xb6, 0xd4, 0x6b, 0x09, 0x82, 0x5c, - 0x93, 0x2b, 0x00, 0x43, 0x46, 0x93, 0x91, 0xe2, 0x76, 0xd6, 0xaa, 0x62, 0x17, 0x4b, 0x8a, 0x64, - 0xdf, 0x81, 0x6b, 0xf1, 0x9b, 0x30, 0x20, 0xd1, 0x89, 0xc3, 0x5d, 0x36, 0xc4, 0xbc, 0xd7, 0x55, - 0x3d, 0x9c, 0x52, 0x5f, 0x49, 0xa2, 0x35, 0x02, 0xb4, 0xcd, 0xb0, 0xcb, 0xf1, 0x3b, 0xc0, 0xce, - 0xdb, 0x4d, 0x37, 0x5a, 0x84, 0x06, 0x75, 0xf0, 0xb9, 0x17, 0xa4, 0x43, 0x56, 0xa7, 0xbb, 0xe7, - 0x5e, 0x60, 0x2d, 0xc2, 0x7c, 0xc1, 0xa3, 0x5a, 0xcc, 0xd6, 0x57, 0x80, 0x5e, 0x8f, 0xfc, 0xf7, - 0x11, 0x88, 0xf0, 0x58, 0x30, 0x9d, 0x7a, 0xfc, 0xc3, 0x00, 0xb4, 0x23, 0xe7, 0xfe, 0xdf, 0x41, - 0xae, 0x98, 0x44, 0x01, 0x07, 0x6a, 0xaf, 0xf8, 0x2e, 0x77, 0x53, 0xb0, 0xea, 0x90, 0x58, 0xd9, - 0xdf, 0x71, 0xb9, 0x9b, 0x82, 0x06, 0xc3, 0x5e, 0xc2, 0x04, 0x7e, 0xc9, 0x76, 0x93, 0xa0, 0x61, - 0x67, 0x24, 0xf4, 0x00, 0x6e, 0x90, 0x61, 0x44, 0x19, 0x1e, 0x8b, 0x39, 0x98, 0x31, 0xca, 0x64, - 0x1b, 0xb6, 0xec, 0x05, 0xc5, 0xcd, 0x15, 0x76, 0x05, 0x4f, 0xa4, 0x57, 0x48, 0x23, 0x4d, 0xef, - 0x27, 0x03, 0x7a, 0x4f, 0x38, 0x0d, 0x89, 0x67, 0x63, 0x11, 0x66, 0x21, 0xc9, 0x75, 0xe8, 0x8a, - 0x1d, 0x3b, 0x99, 0x68, 0x87, 0x06, 0xfe, 0x18, 0xc3, 0x6e, 0x81, 0x58, 0xb3, 0x8e, 0x96, 0x6f, - 0x93, 0x06, 0xbe, 0xec, 0xae, 0x75, 0x10, 0xbb, 0x50, 0xd3, 0x57, 0x68, 0xde, 0x89, 0xf0, 0x59, - 0x41, 0x5f, 0x08, 0x49, 0x7d, 0xb5, 0x40, 0x9b, 0x11, 0x3e, 0x13, 0xfa, 0xd6, 0x12, 0xdc, 0x9a, - 0x12, 0x5b, 0x1a, 0xf9, 0x2f, 0x06, 0xcc, 0x3f, 0x89, 0x63, 0x32, 0x8c, 0xbe, 0x90, 0xab, 0x24, - 0x0b, 0x7a, 0x01, 0xea, 0x1e, 0x4d, 0x22, 0x2e, 0x83, 0xad, 0xdb, 0xea, 0x30, 0x31, 0x5d, 0x95, - 0xd2, 0x74, 0x4d, 0xcc, 0x67, 0xb5, 0x3c, 0x9f, 0xda, 0xfc, 0xd5, 0x0a, 0xf3, 0xf7, 0x1f, 0x68, - 0x8b, 0xeb, 0x74, 0x3c, 0x1c, 0x71, 0xcc, 0xd2, 0xed, 0x0b, 0x82, 0xb4, 0x2d, 0x29, 0xd6, 0x0f, - 0x06, 0x2c, 0x14, 0x23, 0x4d, 0x9f, 0x19, 0x17, 0x82, 0x81, 0xd8, 0x3e, 0x2c, 0x48, 0xc3, 0x14, - 0x9f, 0x62, 0x8e, 0x47, 0xc9, 0x51, 0x40, 0x3c, 0x47, 0x30, 0x54, 0x78, 0xa6, 0xa2, 0xbc, 0x66, - 0xc1, 0x38, 0xe9, 0x9a, 0x9e, 0x34, 0x82, 0x9a, 0x9b, 0xf0, 0xe3, 0x0c, 0x10, 0xc4, 0xb7, 0xf5, - 0x00, 0xe6, 0xd5, 0xcb, 0xaf, 0x58, 0xb5, 0x15, 0x80, 0x7c, 0x45, 0xc7, 0x3d, 0x43, 0xed, 0x89, - 0x6c, 0x47, 0xc7, 0xd6, 0xa7, 0x60, 0x1e, 0x50, 0x55, 0x88, 0x18, 0xdd, 0x07, 0x33, 0xc8, 0x0e, - 0x52, 0xb4, 0xbd, 0x85, 0xc6, 0x43, 0x95, 0xc9, 0xd9, 0x63, 0x21, 0xeb, 0x31, 0xb4, 0x32, 0x72, - 0x96, 0x9b, 0x71, 0x51, 0x6e, 0x95, 0x89, 0xdc, 0xac, 0xdf, 0x0d, 0x58, 0x28, 0x86, 0x9c, 0x96, - 0xef, 0x35, 0x74, 0x73, 0x17, 0x4e, 0xe8, 0x8e, 0xd2, 0x58, 0xee, 0xeb, 0xb1, 0x94, 0xd5, 0xf2, - 0x00, 0xe3, 0x67, 0xee, 0x48, 0xb5, 0x54, 0x27, 0xd0, 0x48, 0xfd, 0x57, 0x30, 0x57, 0x12, 0x99, - 0xf2, 0xec, 0xf9, 0xbf, 0xfe, 0xec, 0x29, 0x3c, 0xdd, 0x72, 0x6d, 0xfd, 0x2d, 0xf4, 0x08, 0x6e, - 0xaa, 0xf9, 0xdb, 0xce, 0x9b, 0x2e, 0xab, 0x7d, 0xb1, 0x37, 0x8d, 0xc9, 0xde, 0xb4, 0xfa, 0xd0, - 0x2b, 0xab, 0xa6, 0x53, 0x30, 0x84, 0xb9, 0x43, 0xee, 0x72, 0x12, 0x73, 0xe2, 0xe5, 0xef, 0xef, - 0x89, 0x66, 0x36, 0xae, 0x02, 0x9b, 0xf2, 0x38, 0xcc, 0x42, 0x95, 0xf3, 0xac, 0xcf, 0xc4, 0xa7, - 0xb8, 0x05, 0xa4, 0x7b, 0x4a, 0xef, 0xe0, 0x3d, 0xb8, 0x12, 0xfd, 0xc0, 0x29, 0x77, 0x03, 0x05, - 0xe6, 0x35, 0x09, 0xe6, 0xa6, 0xa4, 0x48, 0x34, 0x57, 0x78, 0xe7, 0x2b, 0x6e, 0x5d, 0x41, 0xbd, - 0x20, 0x48, 0xe6, 0x0a, 0x80, 0x1c, 0x29, 0x35, 0x0d, 0x0d, 0xa5, 0x2b, 0x28, 0xdb, 0x82, 0x60, - 0xad, 0xc2, 0xf2, 0x67, 0x98, 0x8b, 0x67, 0x09, 0xdb, 0xa6, 0xd1, 0x80, 0x0c, 0x13, 0xe6, 0x6a, - 0x57, 0x61, 0xfd, 0x68, 0xc0, 0xca, 0x05, 0x02, 0x69, 0xc2, 0x3d, 0x68, 0x86, 0x6e, 0xcc, 0x31, - 0xcb, 0xa6, 0x24, 0x3b, 0x4e, 0x96, 0xa2, 0x72, 0x55, 0x29, 0xaa, 0xa5, 0x52, 0x2c, 0x42, 0x23, - 0x74, 0xcf, 0x9d, 0xf0, 0x28, 0x7d, 0x77, 0xd4, 0x43, 0xf7, 0xfc, 0xd9, 0xd1, 0xd6, 0x9f, 0x4d, - 0xe8, 0x1c, 0x62, 0xf7, 0x0c, 0x63, 0x5f, 0x06, 0x86, 0x86, 0xd9, 0x40, 0x14, 0x7f, 0xbd, 0xa1, - 0x3b, 0x93, 0x9d, 0x3f, 0xf5, 0xe7, 0x62, 0xff, 0xee, 0x55, 0x62, 0x69, 0x6f, 0xcd, 0xa0, 0xe7, - 0xd0, 0xd6, 0x7e, 0x1e, 0xa1, 0x65, 0x4d, 0xb1, 0xf4, 0xab, 0xaf, 0xbf, 0x72, 0x01, 0x37, 0xb3, - 0x76, 0xdf, 0x40, 0x07, 0xd0, 0xd6, 0x50, 0x5d, 0xb7, 0x57, 0x7e, 0x5e, 0xe8, 0xf6, 0xa6, 0x3d, - 0x05, 0x66, 0x84, 0x35, 0x0d, 0xb1, 0x75, 0x6b, 0xe5, 0x37, 0x82, 0x6e, 0x6d, 0x1a, 0xcc, 0x4b, - 0x6b, 0x1a, 0x40, 0xea, 0xd6, 0xca, 0xf0, 0xaf, 0x5b, 0x9b, 0x86, 0xaa, 0x33, 0xe8, 0x1b, 0x98, - 0x2b, 0x41, 0x17, 0xb2, 0xc6, 0x5a, 0x17, 0x61, 0x6e, 0x7f, 0xfd, 0x52, 0x99, 0xdc, 0xfe, 0x0b, - 0xe8, 0xe8, 0x90, 0x82, 0xb4, 0x80, 0xa6, 0x80, 0x62, 0x7f, 0xf5, 0x22, 0xb6, 0x6e, 0x50, 0xdf, - 0x96, 0xba, 0xc1, 0x29, 0x78, 0xa1, 0x1b, 0x9c, 0xb6, 0x64, 0xad, 0x19, 0xf4, 0x35, 0xcc, 0x4e, - 0x6e, 0x2d, 0x74, 0x7b, 0xb2, 0x6c, 0xa5, 0x65, 0xd8, 0xb7, 0x2e, 0x13, 0xc9, 0x8d, 0xef, 0x03, - 0x8c, 0x97, 0x11, 0x5a, 0x1a, 0xeb, 0x94, 0x96, 0x61, 0x7f, 0x79, 0x3a, 0x33, 0x37, 0xf5, 0x2d, - 0x2c, 0x4e, 0x9d, 0x78, 0xa4, 0x8d, 0xc9, 0x65, 0x3b, 0xa3, 0xff, 0xbf, 0x2b, 0xe5, 0x32, 0x5f, - 0x4f, 0x57, 0x61, 0x36, 0x56, 0x83, 0x3c, 0x88, 0x37, 0xbd, 0x80, 0xe0, 0x88, 0x3f, 0x05, 0xa9, - 0xf1, 0x92, 0x51, 0x4e, 0x8f, 0x1a, 0xf2, 0x8f, 0x9f, 0x8f, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, - 0x32, 0x00, 0xf8, 0x2b, 0x07, 0x12, 0x00, 0x00, + // 1633 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x18, 0x4b, 0x6f, 0xdc, 0xc6, + 0x59, 0xdc, 0x37, 0xbf, 0xdd, 0xb5, 0xa5, 0x59, 0xc9, 0x5e, 0xaf, 0x1e, 0x95, 0xa9, 0xda, 0x55, + 0x61, 0x43, 0x35, 0x54, 0x1f, 0xec, 0xba, 0x3d, 0xd8, 0x7a, 0x14, 0x42, 0xe5, 0x07, 0x28, 0xbb, + 0x68, 0x11, 0x20, 0x04, 0x45, 0xce, 0xae, 0x26, 0x22, 0x39, 0x9b, 0xe1, 0x50, 0x92, 0xf3, 0x13, + 0x72, 0xcc, 0x31, 0x40, 0xce, 0xf9, 0x13, 0x41, 0x2e, 0x41, 0x90, 0x7f, 0x93, 0x63, 0xce, 0xc1, + 0xcc, 0x90, 0xdc, 0xe1, 0x72, 0x25, 0xd9, 0x08, 0x7c, 0x9b, 0xf9, 0xde, 0xef, 0x6f, 0x48, 0x68, + 0x0f, 0x49, 0x80, 0xd9, 0xd6, 0x98, 0x51, 0x4e, 0x51, 0x4b, 0x5e, 0x9c, 0xf1, 0xb1, 0xf5, 0x1a, + 0x96, 0x0f, 0x29, 0x3d, 0x4d, 0xc6, 0xbb, 0x84, 0x61, 0x8f, 0x53, 0xf6, 0x7e, 0x2f, 0xe2, 0xec, + 0xbd, 0x8d, 0xbf, 0x4c, 0x70, 0xcc, 0xd1, 0x0a, 0x98, 0x7e, 0x86, 0xe8, 0x1b, 0xeb, 0xc6, 0xa6, + 0x69, 0x4f, 0x00, 0x08, 0x41, 0x2d, 0x72, 0x43, 0xdc, 0xaf, 0x48, 0x84, 0x3c, 0x5b, 0x7b, 0xb0, + 0x32, 0x5b, 0x60, 0x3c, 0xa6, 0x51, 0x8c, 0xd1, 0x3d, 0xa8, 0x63, 0x01, 0x90, 0xd2, 0xda, 0xdb, + 0x37, 0xb7, 0x32, 0x53, 0xb6, 0x14, 0x9d, 0xc2, 0x5a, 0x3f, 0x1a, 0x80, 0x0e, 0x49, 0xcc, 0x05, + 0x90, 0xe0, 0xf8, 0xc3, 0xec, 0xb9, 0x05, 0x8d, 0x31, 0xc3, 0x43, 0x72, 0x91, 0x5a, 0x94, 0xde, + 0xd0, 0x43, 0x58, 0x88, 0xb9, 0xcb, 0xf8, 0x3e, 0xa3, 0xe1, 0x3e, 0x09, 0xf0, 0x2b, 0x61, 0x74, + 0x55, 0x92, 0x94, 0x11, 0x68, 0x0b, 0x10, 0x89, 0xbc, 0x20, 0x89, 0xc9, 0x19, 0x3e, 0xca, 0xb0, + 0xfd, 0xda, 0xba, 0xb1, 0xd9, 0xb2, 0x67, 0x60, 0xd0, 0x22, 0xd4, 0x03, 0x12, 0x12, 0xde, 0xaf, + 0xaf, 0x1b, 0x9b, 0x5d, 0x5b, 0x5d, 0xac, 0x7f, 0x42, 0xaf, 0x60, 0xff, 0xc7, 0xb9, 0xff, 0x5d, + 0x05, 0xea, 0x12, 0x90, 0xc7, 0xd8, 0x98, 0xc4, 0x18, 0xdd, 0x85, 0x0e, 0x89, 0x9d, 0x49, 0x20, + 0x2a, 0xd2, 0xb6, 0x36, 0x89, 0xf3, 0x98, 0xa3, 0x07, 0xd0, 0xf0, 0x4e, 0x92, 0xe8, 0x34, 0xee, + 0x57, 0xd7, 0xab, 0x9b, 0xed, 0xed, 0xde, 0x44, 0x91, 0x70, 0x74, 0x47, 0xe0, 0xec, 0x94, 0x04, + 0x3d, 0x01, 0x70, 0x39, 0x67, 0xe4, 0x38, 0xe1, 0x38, 0x96, 0x9e, 0xb6, 0xb7, 0xfb, 0x1a, 0x43, + 0x12, 0xe3, 0xe7, 0x39, 0xde, 0xd6, 0x68, 0xd1, 0x53, 0x68, 0xe1, 0x0b, 0x8e, 0x23, 0x1f, 0xfb, + 0xfd, 0xba, 0x54, 0xb4, 0x3a, 0xe5, 0xd1, 0xd6, 0x5e, 0x8a, 0x57, 0xfe, 0xe5, 0xe4, 0x83, 0x67, + 0xd0, 0x2d, 0xa0, 0xd0, 0x3c, 0x54, 0x4f, 0x71, 0x96, 0x55, 0x71, 0x14, 0x91, 0x3d, 0x73, 0x83, + 0x44, 0x15, 0x58, 0xc7, 0x56, 0x97, 0x7f, 0x54, 0x9e, 0x18, 0xd6, 0x2e, 0x98, 0xfb, 0x49, 0x10, + 0xe4, 0x8c, 0x3e, 0x61, 0x19, 0xa3, 0x4f, 0xd8, 0x24, 0xca, 0x95, 0x2b, 0xa3, 0xfc, 0x83, 0x01, + 0x0b, 0x7b, 0x67, 0x38, 0xe2, 0xaf, 0x28, 0x27, 0x43, 0xe2, 0xb9, 0x9c, 0xd0, 0x08, 0x3d, 0x04, + 0x93, 0x06, 0xbe, 0x73, 0x65, 0x9a, 0x5a, 0x34, 0x48, 0xad, 0x7e, 0x08, 0x66, 0x84, 0xcf, 0x9d, + 0x2b, 0xd5, 0xb5, 0x22, 0x7c, 0xae, 0xa8, 0x37, 0xa0, 0xeb, 0xe3, 0x00, 0x73, 0xec, 0xe4, 0xd9, + 0x11, 0xa9, 0xeb, 0x28, 0xe0, 0x8e, 0x4a, 0xc7, 0x7d, 0xb8, 0x29, 0x44, 0x8e, 0x5d, 0x86, 0x23, + 0xee, 0x8c, 0x5d, 0x7e, 0x22, 0x73, 0x62, 0xda, 0xdd, 0x08, 0x9f, 0xbf, 0x91, 0xd0, 0x37, 0x2e, + 0x3f, 0xb1, 0x7e, 0x33, 0xc0, 0xcc, 0x93, 0x89, 0x6e, 0x43, 0x53, 0xa8, 0x75, 0x88, 0x9f, 0x46, + 0xa2, 0x21, 0xae, 0x07, 0xbe, 0xe8, 0x0a, 0x3a, 0x1c, 0xc6, 0x98, 0x4b, 0xf3, 0xaa, 0x76, 0x7a, + 0x13, 0x95, 0x15, 0x93, 0xaf, 0x54, 0x23, 0xd4, 0x6c, 0x79, 0x16, 0x11, 0x0f, 0x39, 0x09, 0xb1, + 0x54, 0x58, 0xb5, 0xd5, 0x05, 0xf5, 0xa0, 0x8e, 0x1d, 0xee, 0x8e, 0x64, 0x85, 0x9b, 0x76, 0x0d, + 0xbf, 0x75, 0x47, 0xe8, 0xcf, 0x70, 0x23, 0xa6, 0x09, 0xf3, 0xb0, 0x93, 0xa9, 0x6d, 0x48, 0x6c, + 0x47, 0x41, 0xf7, 0x95, 0x72, 0x0b, 0xaa, 0x43, 0xe2, 0xf7, 0x9b, 0x32, 0x30, 0xf3, 0xc5, 0x22, + 0x3c, 0xf0, 0x6d, 0x81, 0x44, 0x7f, 0x03, 0xc8, 0x25, 0xf9, 0xfd, 0xd6, 0x25, 0xa4, 0x66, 0x26, + 0xd7, 0xb7, 0xfe, 0x07, 0x8d, 0x54, 0xfc, 0x32, 0x98, 0x67, 0x34, 0x48, 0xc2, 0xdc, 0xed, 0xae, + 0xdd, 0x52, 0x80, 0x03, 0x1f, 0xdd, 0x01, 0x39, 0xe7, 0x1c, 0x51, 0x55, 0x15, 0xe9, 0xa4, 0x8c, + 0xd0, 0x7f, 0xb0, 0x9c, 0x14, 0x1e, 0xa5, 0xa7, 0x44, 0x79, 0xdf, 0xb4, 0xd3, 0x9b, 0xf5, 0x6b, + 0x05, 0x6e, 0x14, 0xcb, 0x5d, 0xa8, 0x90, 0x52, 0x64, 0xac, 0x0c, 0x29, 0x46, 0x8a, 0x3d, 0x2a, + 0xc4, 0xab, 0xa2, 0xc7, 0x2b, 0x63, 0x09, 0xa9, 0xaf, 0x14, 0x74, 0x15, 0xcb, 0x4b, 0xea, 0x63, + 0x51, 0xad, 0x09, 0xf1, 0x65, 0x80, 0xbb, 0xb6, 0x38, 0x0a, 0xc8, 0x88, 0xf8, 0xe9, 0xf8, 0x10, + 0x47, 0x69, 0x1e, 0x93, 0x72, 0x1b, 0x2a, 0x65, 0xea, 0x26, 0x52, 0x16, 0x0a, 0x68, 0x53, 0xe5, + 0x41, 0x9c, 0xd1, 0x3a, 0xb4, 0x19, 0x1e, 0x07, 0x69, 0xf5, 0xca, 0xf0, 0x99, 0xb6, 0x0e, 0x42, + 0x6b, 0x00, 0x1e, 0x0d, 0x02, 0xec, 0x49, 0x02, 0x53, 0x12, 0x68, 0x10, 0x51, 0x39, 0x9c, 0x07, + 0x4e, 0x8c, 0xbd, 0x3e, 0xac, 0x1b, 0x9b, 0x75, 0xbb, 0xc1, 0x79, 0x70, 0x84, 0x3d, 0xe1, 0x47, + 0x12, 0x63, 0xe6, 0xc8, 0x01, 0xd4, 0x96, 0x7c, 0x2d, 0x01, 0x90, 0x63, 0x72, 0x15, 0x60, 0xc4, + 0x68, 0x32, 0x56, 0xd8, 0xce, 0x7a, 0x55, 0xcc, 0x62, 0x09, 0x91, 0xe8, 0x7b, 0x70, 0x23, 0x7e, + 0x1f, 0x06, 0x24, 0x3a, 0x75, 0xb8, 0xcb, 0x46, 0x98, 0xf7, 0xbb, 0xaa, 0x86, 0x53, 0xe8, 0x5b, + 0x09, 0xb4, 0xc6, 0x80, 0x76, 0x18, 0x76, 0x39, 0xfe, 0x88, 0xb5, 0xf3, 0x61, 0xdd, 0x8d, 0x96, + 0xa0, 0x41, 0x1d, 0x7c, 0xe1, 0x05, 0x69, 0x93, 0xd5, 0xe9, 0xde, 0x85, 0x17, 0x58, 0x0f, 0xa0, + 0x57, 0xd0, 0x98, 0x0e, 0xe6, 0x45, 0xa8, 0x63, 0xc6, 0x68, 0x36, 0x46, 0xd4, 0xc5, 0xfa, 0x3f, + 0xa0, 0x77, 0x63, 0xff, 0x53, 0x98, 0x67, 0x2d, 0x41, 0xaf, 0x20, 0x5a, 0xd9, 0x61, 0xfd, 0x6c, + 0x00, 0xda, 0x95, 0xd3, 0xe0, 0x8f, 0x2d, 0x62, 0xd1, 0x9f, 0x62, 0x49, 0xa8, 0x69, 0xe3, 0xbb, + 0xdc, 0x4d, 0x57, 0x58, 0x87, 0xc4, 0x4a, 0xfe, 0xae, 0xcb, 0xdd, 0x74, 0x95, 0x30, 0xec, 0x25, + 0x4c, 0x6c, 0x35, 0x59, 0x84, 0x72, 0x95, 0xd8, 0x19, 0x08, 0x3d, 0x86, 0x5b, 0x64, 0x14, 0x51, + 0x86, 0x27, 0x64, 0x8e, 0x0a, 0x55, 0x43, 0x12, 0x2f, 0x2a, 0x6c, 0xce, 0xb0, 0x27, 0x23, 0xb7, + 0x04, 0xbd, 0x82, 0x1b, 0xa9, 0x7b, 0xdf, 0x1a, 0xd0, 0x7f, 0xce, 0x69, 0x48, 0x3c, 0x1b, 0x0b, + 0x33, 0x0b, 0x4e, 0x6e, 0x40, 0x57, 0x4c, 0xde, 0x69, 0x47, 0x3b, 0x34, 0xf0, 0x27, 0x9b, 0xed, + 0x0e, 0x88, 0xe1, 0xeb, 0x68, 0xfe, 0x36, 0x69, 0xe0, 0xcb, 0x9a, 0xdb, 0x00, 0x31, 0x21, 0x35, + 0x7e, 0xb5, 0xe3, 0x3b, 0x11, 0x3e, 0x2f, 0xf0, 0x0b, 0x22, 0xc9, 0xaf, 0xc6, 0x6a, 0x33, 0xc2, + 0xe7, 0x82, 0xdf, 0x5a, 0x86, 0x3b, 0x33, 0x6c, 0x4b, 0x2d, 0xff, 0xde, 0x80, 0xde, 0xf3, 0x38, + 0x26, 0xa3, 0xe8, 0xbf, 0x72, 0xc0, 0x64, 0x46, 0x2f, 0x42, 0xdd, 0xa3, 0x49, 0xc4, 0xa5, 0xb1, + 0x75, 0x5b, 0x5d, 0xa6, 0x7a, 0xae, 0x52, 0xea, 0xb9, 0xa9, 0xae, 0xad, 0x96, 0xbb, 0x56, 0xeb, + 0xca, 0x5a, 0xa1, 0x2b, 0xff, 0x04, 0x6d, 0x91, 0x4e, 0xc7, 0xc3, 0x11, 0xc7, 0x2c, 0x9d, 0xc9, + 0x20, 0x40, 0x3b, 0x12, 0x62, 0x7d, 0x6d, 0xc0, 0x62, 0xd1, 0xd2, 0xb4, 0xc6, 0x2f, 0x5d, 0x11, + 0x62, 0x26, 0xb1, 0x20, 0x35, 0x53, 0x1c, 0x45, 0x77, 0x8f, 0x93, 0xe3, 0x80, 0x78, 0x8e, 0x40, + 0x28, 0xf3, 0x4c, 0x05, 0x79, 0xc7, 0x82, 0x89, 0xd3, 0x35, 0xdd, 0x69, 0x04, 0x35, 0x37, 0xe1, + 0x27, 0xd9, 0x9a, 0x10, 0x67, 0xeb, 0x31, 0xf4, 0xd4, 0x7b, 0xb0, 0x18, 0xb5, 0x55, 0x80, 0x7c, + 0x70, 0xc7, 0x7d, 0x43, 0x4d, 0x8f, 0x6c, 0x72, 0xc7, 0xd6, 0xbf, 0xc0, 0x3c, 0xa4, 0x2a, 0x10, + 0x31, 0x7a, 0x04, 0x66, 0x90, 0x5d, 0x24, 0x69, 0x7b, 0x1b, 0x4d, 0x9a, 0x2a, 0xa3, 0xb3, 0x27, + 0x44, 0xd6, 0x33, 0x68, 0x65, 0xe0, 0xcc, 0x37, 0xe3, 0x32, 0xdf, 0x2a, 0x53, 0xbe, 0x59, 0x3f, + 0x19, 0xb0, 0x58, 0x34, 0x39, 0x0d, 0xdf, 0x3b, 0xe8, 0xe6, 0x2a, 0x9c, 0xd0, 0x1d, 0xa7, 0xb6, + 0x3c, 0xd2, 0x6d, 0x29, 0xb3, 0xe5, 0x06, 0xc6, 0x2f, 0xdd, 0xb1, 0x2a, 0xa9, 0x4e, 0xa0, 0x81, + 0x06, 0x6f, 0x61, 0xa1, 0x44, 0x32, 0xe3, 0x31, 0xf4, 0x57, 0xfd, 0x31, 0x54, 0x78, 0xd0, 0xe5, + 0xdc, 0xfa, 0x0b, 0xe9, 0x29, 0xdc, 0x56, 0xfd, 0xb7, 0x93, 0x17, 0x5d, 0x16, 0xfb, 0x62, 0x6d, + 0x1a, 0xd3, 0xb5, 0x69, 0x0d, 0xa0, 0x5f, 0x66, 0x4d, 0xbb, 0x60, 0x04, 0x0b, 0x47, 0xdc, 0xe5, + 0x24, 0xe6, 0xc4, 0xcb, 0x5f, 0xe5, 0x53, 0xc5, 0x6c, 0x5c, 0xb7, 0x82, 0xca, 0xed, 0x30, 0x0f, + 0x55, 0xce, 0xb3, 0x3a, 0x13, 0x47, 0x91, 0x05, 0xa4, 0x6b, 0x4a, 0x73, 0xf0, 0x09, 0x54, 0x89, + 0x7a, 0xe0, 0x94, 0xbb, 0x81, 0x5a, 0xf1, 0x35, 0xb9, 0xe2, 0x4d, 0x09, 0x91, 0x3b, 0x5e, 0x6d, + 0x41, 0x5f, 0x61, 0xeb, 0xea, 0x01, 0x20, 0x00, 0x12, 0xb9, 0x0a, 0x20, 0x5b, 0x4a, 0x75, 0x43, + 0x43, 0xf1, 0x0a, 0xc8, 0x8e, 0x00, 0x58, 0x6b, 0xb0, 0xf2, 0x6f, 0xcc, 0xc5, 0x63, 0x85, 0xed, + 0xd0, 0x68, 0x48, 0x46, 0x09, 0x73, 0xb5, 0x54, 0x58, 0xdf, 0x18, 0xb0, 0x7a, 0x09, 0x41, 0xea, + 0x70, 0x1f, 0x9a, 0xa1, 0x1b, 0x73, 0xcc, 0xb2, 0x2e, 0xc9, 0xae, 0xd3, 0xa1, 0xa8, 0x5c, 0x17, + 0x8a, 0x6a, 0x29, 0x14, 0x4b, 0xd0, 0x08, 0xdd, 0x0b, 0x27, 0x3c, 0x4e, 0x5f, 0x23, 0xf5, 0xd0, + 0xbd, 0x78, 0x79, 0xbc, 0xfd, 0x4b, 0x13, 0x3a, 0x47, 0xd8, 0x3d, 0xc7, 0xd8, 0x97, 0x86, 0xa1, + 0x51, 0xd6, 0x10, 0xc5, 0x6f, 0x3a, 0x74, 0x6f, 0xba, 0xf2, 0x67, 0x7e, 0x44, 0x0e, 0xee, 0x5f, + 0x47, 0x96, 0xd6, 0xd6, 0x1c, 0x7a, 0x05, 0x6d, 0xed, 0xa3, 0x09, 0xad, 0x68, 0x8c, 0xa5, 0x6f, + 0xc1, 0xc1, 0xea, 0x25, 0xd8, 0x4c, 0xda, 0x23, 0x03, 0x1d, 0x42, 0x5b, 0xdb, 0xf5, 0xba, 0xbc, + 0xf2, 0xa3, 0x43, 0x97, 0x37, 0xe3, 0x81, 0x60, 0xcd, 0x09, 0x69, 0xda, 0xc6, 0xd6, 0xa5, 0x95, + 0xdf, 0x08, 0xba, 0xb4, 0x59, 0x6b, 0x5e, 0x4a, 0xd3, 0x16, 0xa4, 0x2e, 0xad, 0xbc, 0xfe, 0x75, + 0x69, 0xb3, 0xb6, 0xea, 0x1c, 0xfa, 0x1c, 0x16, 0x4a, 0xab, 0x0b, 0x59, 0x13, 0xae, 0xcb, 0x76, + 0xee, 0x60, 0xe3, 0x4a, 0x9a, 0x5c, 0xfe, 0x6b, 0xe8, 0xe8, 0x2b, 0x05, 0x69, 0x06, 0xcd, 0x58, + 0x8a, 0x83, 0xb5, 0xcb, 0xd0, 0xba, 0x40, 0x7d, 0x5a, 0xea, 0x02, 0x67, 0xec, 0x0b, 0x5d, 0xe0, + 0xac, 0x21, 0x6b, 0xcd, 0xa1, 0xcf, 0x60, 0x7e, 0x7a, 0x6a, 0xa1, 0xbb, 0xd3, 0x61, 0x2b, 0x0d, + 0xc3, 0x81, 0x75, 0x15, 0x49, 0x2e, 0xfc, 0x00, 0x60, 0x32, 0x8c, 0xd0, 0xf2, 0x84, 0xa7, 0x34, + 0x0c, 0x07, 0x2b, 0xb3, 0x91, 0xb9, 0xa8, 0x2f, 0x60, 0x69, 0x66, 0xc7, 0x23, 0xad, 0x4d, 0xae, + 0x9a, 0x19, 0x83, 0xbf, 0x5c, 0x4b, 0x97, 0xe9, 0x7a, 0xb1, 0x06, 0xf3, 0xb1, 0x6a, 0xe4, 0x61, + 0xbc, 0xe5, 0x05, 0x04, 0x47, 0xfc, 0x05, 0x48, 0x8e, 0x37, 0x8c, 0x72, 0x7a, 0xdc, 0x90, 0xbf, + 0x83, 0xfe, 0xfe, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xce, 0x15, 0x02, 0x1d, 0x12, 0x00, + 0x00, } diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 5c40332e6..b2ffacc01 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -1,6 +1,9 @@ package filer_pb import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) @@ -67,3 +70,14 @@ func AfterEntryDeserialization(chunks []*FileChunk) { } } + +func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *CreateEntryRequest) error { + resp, err := client.CreateEntry(ctx, request) + if err == nil && resp.Error != "" { + return fmt.Errorf("CreateEntry: %v", resp.Error) + } + if err != nil { + return fmt.Errorf("CreateEntry: %v", err) + } + return err +} diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index f99c7fdf6..58a3ab9c2 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -124,7 +124,7 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p } glog.V(1).Infof("create: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("create entry %s: %v", key, err) return fmt.Errorf("create entry %s: %v", key, err) } diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index ed9612d35..2fceacd2a 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -37,7 +37,7 @@ func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, d } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("mkdir %v: %v", request, err) return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) } @@ -68,7 +68,7 @@ func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, } glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("create file %v:%v", request, err) return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) } diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 5145035d2..03954a58c 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -132,27 +132,31 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { + resp = &filer_pb.CreateEntryResponse{} + fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))) chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) if req.Entry.Attributes == nil { glog.V(3).Infof("CreateEntry %s: nil attributes", filepath.Join(req.Directory, req.Entry.Name)) - return nil, fmt.Errorf("can not create entry with empty attributes") + resp.Error = fmt.Sprintf("can not create entry with empty attributes") + return } - err = fs.filer.CreateEntry(ctx, &filer2.Entry{ + createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{ FullPath: fullpath, Attr: filer2.PbToEntryAttribute(req.Entry.Attributes), Chunks: chunks, }, req.OExcl) - if err == nil { + if createErr == nil { fs.filer.DeleteChunks(garbages) } else { - glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) + glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr) + resp.Error = createErr.Error() } - return &filer_pb.CreateEntryResponse{}, err + return } func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 17f814302..875cc2999 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -155,7 +155,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(ctx, client, request); err != nil { return fmt.Errorf("mkdir %s/%s: %v", dir, name, err) } @@ -187,7 +187,7 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f dir, name := filer2.FullPath(fullFilePath).DirAndName() err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{ + if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{ Directory: dir, Entry: &filer_pb.Entry{ Name: name, diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go index 5ea8de9f5..52dd0321b 100644 --- a/weed/shell/command_fs_meta_load.go +++ b/weed/shell/command_fs_meta_load.go @@ -80,7 +80,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. return err } - if _, err = client.CreateEntry(ctx, &filer_pb.CreateEntryRequest{ + if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{ Directory: fullEntry.Dir, Entry: fullEntry.Entry, }); err != nil { From 835da19c09465dd8aae51a5c9f1ed637a9818f56 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 26 Jan 2020 00:50:18 -0800 Subject: [PATCH 0079/2432] add logging --- weed/filer2/filer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index c603777bd..a0af942e0 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -157,7 +157,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro oldEntry, _ := f.FindEntry(ctx, entry.FullPath) - glog.V(4).Infof("CreateEntry %s: old entry : %v", entry.FullPath, oldEntry) + glog.V(4).Infof("CreateEntry %s: old entry: %v exclusive:%v", entry.FullPath, oldEntry, o_excl) if oldEntry == nil { if err := f.store.InsertEntry(ctx, entry); err != nil { glog.Errorf("insert entry %s: %v", entry.FullPath, err) From 19a05ad1746f7489a8e4981c01a4b8d2f5573654 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 26 Jan 2020 12:47:23 -0800 Subject: [PATCH 0080/2432] add test cases --- weed/filer2/filechunks_test.go | 36 ++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go index e75e60753..ed30c2abc 100644 --- a/weed/filer2/filechunks_test.go +++ b/weed/filer2/filechunks_test.go @@ -331,6 +331,42 @@ func TestChunksReading(t *testing.T) { {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, }, }, + // case 8: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353}, + }, + Offset: 0, + Size: 300, + Expected: []*ChunkView{ + {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90}, + {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190}, + }, + }, + // case 9: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1}, + {Offset: 43175936, Size: 52981771-43175936, FileId: "2,112a36ea7f85", Mtime: 2}, + {Offset: 52981760, Size: 72564747-52981760, FileId: "4,112d5f31c5e7", Mtime: 3}, + {Offset: 72564736, Size: 133255179-72564736, FileId: "1,113245f0cdb6", Mtime: 4}, + {Offset: 133255168, Size: 137269259-133255168, FileId: "3,1141a70733b5", Mtime: 5}, + {Offset: 137269248, Size: 153578836-137269248, FileId: "1,114201d5bbdb", Mtime: 6}, + }, + Offset: 0, + Size: 153578836, + Expected: []*ChunkView{ + {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0}, + {Offset: 0, Size: 52981760-43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936}, + {Offset: 0, Size: 72564736-52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760}, + {Offset: 0, Size: 133255168-72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736}, + {Offset: 0, Size: 137269248-133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168}, + {Offset: 0, Size: 153578836-137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248}, + }, + }, } for i, testcase := range testcases { From 08e4b56a8abaaadb0701979e4bd857fb8c50c776 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 26 Jan 2020 13:01:11 -0800 Subject: [PATCH 0081/2432] mount: able to handle large git clone --- weed/filesys/dirty_page.go | 7 +---- weed/filesys/dirty_page_interval.go | 34 +++++++++++--------- weed/filesys/dirty_page_interval_test.go | 40 ++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 21 deletions(-) create mode 100644 weed/filesys/dirty_page_interval_test.go diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index f1532a6a0..7a41e371e 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -44,12 +44,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da return pages.flushAndSave(ctx, offset, data) } - hasOverlap := pages.intervals.AddInterval(data, offset) - if hasOverlap { - chunks, err = pages.saveExistingPagesToStorage(ctx) - pages.intervals.AddInterval(data, offset) - return - } + pages.intervals.AddInterval(data, offset) var chunk *filer_pb.FileChunk var hasSavedData bool diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go index c64196cdf..77fab75ef 100644 --- a/weed/filesys/dirty_page_interval.go +++ b/weed/filesys/dirty_page_interval.go @@ -65,7 +65,10 @@ func (c *ContinuousIntervals) TotalSize() (total int64) { return } -func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) (hasOverlap bool) { +func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { + + // TODO AddInterval needs to handle all possible out of order writes + interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))} var prevList, nextList *IntervalLinkedList @@ -75,6 +78,10 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) (hasOverlap nextList = list break } + if list.Head.Offset < interval.Offset+interval.Size && interval.Offset+interval.Size <= list.Head.Offset+list.Size() { + glog.V(0).Infof("unexpected [%d,%d) overlaps [%d,%d)", interval.Offset, interval.Offset+interval.Size, list.Head.Offset, list.Head.Offset+list.Size()) + break + } } for _, list := range c.lists { @@ -84,20 +91,17 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) (hasOverlap break } if list.Head.Offset <= offset && offset < list.Head.Offset+list.Size() { - if list.Tail.Offset <= offset { - dataStartIndex := list.Tail.Offset + list.Tail.Size - offset - glog.V(4).Infof("overlap data new [0,%d) same=%v", dataStartIndex, bytes.Compare(interval.Data[0:dataStartIndex], list.Tail.Data[len(list.Tail.Data)-int(dataStartIndex):])) - interval.Data = interval.Data[dataStartIndex:] - interval.Size -= dataStartIndex - interval.Offset = offset + dataStartIndex - glog.V(4).Infof("overlapping append as [%d,%d) dataSize=%d", interval.Offset, interval.Offset+interval.Size, len(interval.Data)) - list.addNodeToTail(interval) - prevList = list - break - } - glog.V(4).Infof("overlapped! interval is [%d,%d) dataSize=%d", interval.Offset, interval.Offset+interval.Size, len(interval.Data)) - hasOverlap = true - return + + // the new interval overwrites the old tail + dataStartIndex := list.Tail.Offset + list.Tail.Size - offset + glog.V(4).Infof("overlap data new [0,%d) same=%v", dataStartIndex, bytes.Compare(interval.Data[0:dataStartIndex], list.Tail.Data[len(list.Tail.Data)-int(dataStartIndex):])) + list.Tail.Data = list.Tail.Data[:len(list.Tail.Data)-int(dataStartIndex)] + list.Tail.Size -= dataStartIndex + glog.V(4).Infof("overlapping append as [%d,%d) dataSize=%d", interval.Offset, interval.Offset+interval.Size, len(interval.Data)) + + list.addNodeToTail(interval) + prevList = list + break } } diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go new file mode 100644 index 000000000..4f62f90c9 --- /dev/null +++ b/weed/filesys/dirty_page_interval_test.go @@ -0,0 +1,40 @@ +package filesys + +import ( + "bytes" + "testing" +) + +func TestContinuousIntervals_AddInterval(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25 + c.AddInterval(getBytes(25, 3), 0) + // _, _, 23, 23, 23, 23 + c.AddInterval(getBytes(23, 4), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 23, 23) +} + +func expectedData(t *testing.T, c *ContinuousIntervals, offset int, data ...byte) { + start, stop := int64(offset), int64(offset+len(data)) + for _, list := range c.lists { + nodeStart, nodeStop := max(start, list.Head.Offset), min(stop, list.Head.Offset+list.Size()) + if nodeStart < nodeStop { + buf := make([]byte, nodeStop-nodeStart) + list.ReadData(buf, nodeStart, nodeStop) + if bytes.Compare(buf, data[nodeStart-start:nodeStop-start]) != 0 { + t.Errorf("expected %v actual %v", data[nodeStart-start:nodeStop-start], buf) + } + } + } +} + +func getBytes(content byte, length int) []byte { + data := make([]byte, length) + for i := 0; i < length; i++ { + data[i] = content + } + return data +} From 8d372f7394d81749569853e55c029c1d3cbb710c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 26 Jan 2020 13:24:43 -0800 Subject: [PATCH 0082/2432] update docker readme --- docker/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index cfe281e71..45b5a98a9 100644 --- a/docker/README.md +++ b/docker/README.md @@ -16,6 +16,10 @@ docker-compose -f seaweedfs-compose.yml -p seaweedfs up ```bash cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker -docker-compose -f dev-compose.yml -p seaweedfs up +// use existing builds for git tip +docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up + +// use local repo +docker-compose -f local-dev-compose.yml -p seaweedfs up ``` From 0c298ef8906816b40b19db36be673af564af032a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 26 Jan 2020 13:27:25 -0800 Subject: [PATCH 0083/2432] Update README.md --- docker/README.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/docker/README.md b/docker/README.md index 45b5a98a9..1a2833c7e 100644 --- a/docker/README.md +++ b/docker/README.md @@ -11,15 +11,21 @@ docker-compose -f seaweedfs-compose.yml -p seaweedfs up ``` -## Development +## Try latest tip ```bash -cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker -// use existing builds for git tip +wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-dev-compose.yml + docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up -// use local repo +``` + +## Local Development + +```bash +cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker + docker-compose -f local-dev-compose.yml -p seaweedfs up ``` From 72a64a5cf8c2a5adfe59665a746e013ca948e681 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 26 Jan 2020 14:42:11 -0800 Subject: [PATCH 0084/2432] use the same context object in order to retry --- weed/command/filer_copy.go | 2 +- weed/filer2/filer_client_util.go | 8 ++++---- weed/filesys/dir.go | 14 ++++++++------ weed/filesys/dir_link.go | 2 +- weed/filesys/dir_rename.go | 2 +- weed/filesys/dirty_page.go | 2 +- weed/filesys/file.go | 2 +- weed/filesys/filehandle.go | 2 +- weed/filesys/wfs.go | 14 +++++++------- weed/filesys/wfs_deletion.go | 2 +- weed/filesys/xattr.go | 2 +- weed/operation/assign_file_id.go | 2 +- weed/operation/delete_content.go | 2 +- weed/operation/grpc_client.go | 12 ++++++------ weed/operation/lookup.go | 4 ++-- weed/operation/stats.go | 4 ++-- weed/operation/sync_volume.go | 4 ++-- weed/operation/tail_volume.go | 4 ++-- weed/replication/sink/filersink/fetch_write.go | 8 ++++---- weed/replication/sink/filersink/filer_sink.go | 8 ++++---- weed/replication/source/filer_source.go | 8 ++++---- weed/s3api/s3api_handlers.go | 2 +- weed/server/filer_server.go | 4 ++-- weed/server/master_grpc_server_collection.go | 8 ++++---- weed/server/master_server_handlers_admin.go | 4 ++-- weed/server/volume_grpc_copy.go | 2 +- weed/server/volume_grpc_erasure_coding.go | 2 +- weed/server/webdav_server.go | 18 +++++++++--------- weed/shell/command_ec_common.go | 8 ++++---- weed/shell/command_ec_decode.go | 6 +++--- weed/shell/command_ec_encode.go | 4 ++-- weed/shell/command_ec_rebuild.go | 4 ++-- weed/shell/command_fs_cat.go | 2 +- weed/shell/command_fs_du.go | 8 ++++---- weed/shell/command_fs_meta_cat.go | 2 +- weed/shell/command_fs_meta_load.go | 2 +- weed/shell/command_fs_mv.go | 2 +- weed/shell/command_volume_fix_replication.go | 2 +- weed/shell/command_volume_mount.go | 2 +- weed/shell/command_volume_move.go | 6 +++--- weed/shell/command_volume_tier_download.go | 2 +- weed/shell/command_volume_tier_upload.go | 2 +- weed/shell/command_volume_unmount.go | 2 +- weed/shell/commands.go | 2 +- weed/storage/store_ec.go | 4 ++-- weed/storage/store_ec_delete.go | 2 +- weed/storage/volume_backup.go | 4 +--- weed/topology/allocate_volume.go | 2 +- weed/topology/topology_vacuum.go | 16 ++++++++-------- weed/util/grpc_client_server.go | 6 +++--- weed/wdclient/masterclient.go | 4 ++-- 51 files changed, 121 insertions(+), 121 deletions(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 71143f307..e74ea7d93 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -466,7 +466,7 @@ func detectMimeType(f *os.File) string { func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(clientConn *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx context.Context, clientConn *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(clientConn) return fn(client) }, filerAddress, grpcDialOption) diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index 1c5af7fe2..af804b909 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -22,7 +22,7 @@ func VolumeId(fileId string) string { } type FilerClient interface { - WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error + WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error } func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath FullPath, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { @@ -33,7 +33,7 @@ func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath F vid2Locations := make(map[string]*filer_pb.Locations) - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read fh lookup volume id locations: %v", vids) resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ @@ -97,7 +97,7 @@ func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath FullPat dir, name := fullFilePath.DirAndName() - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -128,7 +128,7 @@ func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath FullPat func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath FullPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) { - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { lastEntryName := "" diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index fe6b30619..abe5a21a6 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -128,7 +128,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, } glog.V(1).Infof("create: %v", req.String()) - if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { if err := filer_pb.CreateEntry(ctx, client, request); err != nil { if strings.Contains(err.Error(), "EEXIST") { return fuse.EEXIST @@ -139,11 +139,13 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, }); err != nil { return nil, nil, err } - node := dir.newFile(req.Name, request.Entry) + var node fs.Node if request.Entry.IsDirectory { + node = dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), request.Entry) return node, nil, nil } + node = dir.newFile(req.Name, request.Entry) file := node.(*File) file.isOpen++ fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) @@ -165,7 +167,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err }, } - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: dir.Path, @@ -279,7 +281,7 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro dir.wfs.cacheDelete(filePath) - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -303,7 +305,7 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error dir.wfs.cacheDelete(filer2.NewFullPath(dir.Path, req.Name)) - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -427,7 +429,7 @@ func (dir *Dir) saveEntry(ctx context.Context) error { parentDir, name := filer2.FullPath(dir.Path).DirAndName() - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: parentDir, diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index 13be62670..8b7ec7e89 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -35,7 +35,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, }, } - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { if err := filer_pb.CreateEntry(ctx, client, request); err != nil { glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) return fuse.EIO diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index 8db879d2c..4eb3c15b5 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -15,7 +15,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector newDir := newDirectory.(*Dir) glog.V(4).Infof("dir Rename %s/%s => %s/%s", dir.Path, req.OldName, newDir.Path, req.NewName) - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: dir.Path, diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 7a41e371e..5ff128323 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -140,7 +140,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io. var fileId, host string var auth security.EncodedJwt - if err := pages.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := pages.f.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, diff --git a/weed/filesys/file.go b/weed/filesys/file.go index cc0717f18..eccef4e58 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -257,7 +257,7 @@ func (file *File) setEntry(entry *filer_pb.Entry) { } func (file *File) saveEntry(ctx context.Context) error { - return file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return file.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: file.dir.Path, diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index fad5418e2..ef0243090 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -169,7 +169,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { return nil } - err = fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fh.f.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { if fh.f.entry.Attributes != nil { fh.f.entry.Attributes.Mime = fh.contentType diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index bc78a0dbe..af1102b45 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -88,22 +88,22 @@ func (wfs *WFS) Root() (fs.Node, error) { return wfs.root, nil } -func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { - err := util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + err := util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) if err == nil { return nil } if strings.Contains(err.Error(), "context canceled") { - time.Sleep(1337 * time.Millisecond) + time.Sleep(3337 * time.Millisecond) glog.V(2).Infoln("retry context canceled request...") - return util.WithCachedGrpcClient(context.Background(), func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(context.Background(), func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) } return err @@ -163,7 +163,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. if wfs.stats.lastChecked < time.Now().Unix()-20 { - err := wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.StatisticsRequest{ Collection: wfs.option.Collection, diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index 52c275e26..cce0c792c 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -20,7 +20,7 @@ func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChu fileIds = append(fileIds, chunk.GetFileIdString()) } - wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds) return nil }) diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index 75ba0f2ba..9dfb491fd 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -117,7 +117,7 @@ func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *fi } // glog.V(3).Infof("read entry cache miss %s", fullpath) - err = wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 2dfa44483..b67d8b708 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -44,7 +44,7 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum continue } - lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + lastError = WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { req := &master_pb.AssignRequest{ Count: primaryRequest.Count, diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index e4aa6c6d3..95bbde9f9 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -117,7 +117,7 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str // DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { - err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { req := &volume_server_pb.BatchDeleteRequest{ FileIds: fileIds, diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index f6b2b69e9..e7ee2d2ba 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -12,7 +12,7 @@ import ( "strings" ) -func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { +func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(context.Context, volume_server_pb.VolumeServerClient) error) error { ctx := context.Background() @@ -21,9 +21,9 @@ func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, return err } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, grpcAddress, grpcDialOption) } @@ -38,7 +38,7 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil } -func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { +func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(ctx2 context.Context, masterClient master_pb.SeaweedClient) error) error { ctx := context.Background() @@ -47,9 +47,9 @@ func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr) } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, masterGrpcAddress, grpcDialOption) } diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go index d0773e7fd..78769ac5a 100644 --- a/weed/operation/lookup.go +++ b/weed/operation/lookup.go @@ -99,12 +99,12 @@ func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []strin //only query unknown_vids - err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + err := WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupVolumeRequest{ VolumeIds: unknown_vids, } - resp, grpcErr := masterClient.LookupVolume(context.Background(), req) + resp, grpcErr := masterClient.LookupVolume(ctx, req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/stats.go b/weed/operation/stats.go index b69a33750..3e6327f19 100644 --- a/weed/operation/stats.go +++ b/weed/operation/stats.go @@ -9,9 +9,9 @@ import ( func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { - err = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + err = WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { - grpcResponse, grpcErr := masterClient.Statistics(context.Background(), req) + grpcResponse, grpcErr := masterClient.Statistics(ctx, req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go index 5562f12ab..4b39ad544 100644 --- a/weed/operation/sync_volume.go +++ b/weed/operation/sync_volume.go @@ -8,9 +8,9 @@ import ( func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { - WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + WithVolumeServerClient(server, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { - resp, err = client.VolumeSyncStatus(context.Background(), &volume_server_pb.VolumeSyncStatusRequest{ + resp, err = client.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{ VolumeId: vid, }) return nil diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go index b53f18ce1..1e8b0a16e 100644 --- a/weed/operation/tail_volume.go +++ b/weed/operation/tail_volume.go @@ -26,9 +26,9 @@ func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.Volume } func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error { - return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + return WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { - stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{ + stream, err := client.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{ VolumeId: uint32(vid), SinceNs: sinceNs, IdleTimeoutSeconds: uint32(idleTimeoutSeconds), diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 97e9671a3..26c055da5 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -63,7 +63,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi var host string var auth security.EncodedJwt - if err := fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -104,11 +104,11 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi return } -func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx, client) }, fs.grpcAddress, fs.grpcDialOption) } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 58a3ab9c2..4790d1562 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -64,7 +64,7 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, } func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { - return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() @@ -87,7 +87,7 @@ func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, d func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { - return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() @@ -139,7 +139,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file // read existing entry var existingEntry *filer_pb.Entry - err = fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -191,7 +191,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } // save updated meta data - return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return true, fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: newParentPath, diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index d7b5ebc4d..aef13be75 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -45,7 +45,7 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl s vid := volumeId(part) - err = fs.withFilerClient(ctx, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(ctx, fs.grpcDialOption, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read lookup volume id locations: %v", vid) resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ @@ -89,11 +89,11 @@ func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename stri return filename, header, readCloser, err } -func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, fs.grpcAddress, fs.grpcDialOption) } diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 127be07e3..602f03e5c 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -39,7 +39,7 @@ func encodeResponse(response interface{}) []byte { func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index c703b8c6f..3a2eca6d4 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -124,8 +124,8 @@ func maybeStartMetrics(fs *FilerServer, option *FilerOption) { } func readFilerConfiguration(grpcDialOption grpc.DialOption, masterGrpcAddress string) (metricsAddress string, metricsIntervalSec int, err error) { - err = operation.WithMasterServerClient(masterGrpcAddress, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + err = operation.WithMasterServerClient(masterGrpcAddress, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { + resp, err := masterClient.GetMasterConfiguration(ctx, &master_pb.GetMasterConfigurationRequest{}) if err != nil { return fmt.Errorf("get master %s configuration: %v", masterGrpcAddress, err) } diff --git a/weed/server/master_grpc_server_collection.go b/weed/server/master_grpc_server_collection.go index f8e0785f6..f02b0f242 100644 --- a/weed/server/master_grpc_server_collection.go +++ b/weed/server/master_grpc_server_collection.go @@ -57,8 +57,8 @@ func (ms *MasterServer) doDeleteNormalCollection(collectionName string) error { } for _, server := range collection.ListVolumeServers() { - err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ Collection: collectionName, }) return deleteErr @@ -77,8 +77,8 @@ func (ms *MasterServer) doDeleteEcCollection(collectionName string) error { listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName) for _, server := range listOfEcServers { - err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ Collection: collectionName, }) return deleteErr diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index 2965a4863..44a04cb86 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -25,8 +25,8 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R return } for _, server := range collection.ListVolumeServers() { - err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ Collection: collection.Name, }) return deleteErr diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go index 0153d5efc..6d74f8171 100644 --- a/weed/server/volume_grpc_copy.go +++ b/weed/server/volume_grpc_copy.go @@ -41,7 +41,7 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo // confirm size and timestamp var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse var volumeFileName, idxFileName, datFileName string - err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { var err error volFileInfoResp, err = client.ReadVolumeFileStatus(ctx, &volume_server_pb.ReadVolumeFileStatusRequest{ diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 67efc0f6d..256e7c447 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -106,7 +106,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId)) - err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { // copy ec data slices for _, shardId := range req.ShardIds { diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 875cc2999..bdb6b61a9 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -98,11 +98,11 @@ func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { }, nil } -func (fs *WebDavFileSystem) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *WebDavFileSystem) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption) } @@ -137,7 +137,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm return os.ErrExist } - return fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(fullDirPath).DirAndName() request := &filer_pb.CreateEntryRequest{ Directory: dir, @@ -186,7 +186,7 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f } dir, name := filer2.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{ Directory: dir, Entry: &filer_pb.Entry{ @@ -251,7 +251,7 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) //_, err = fs.db.Exec(`delete from filesystem where fullFilePath = ?`, fullFilePath) } dir, name := filer2.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir, @@ -310,7 +310,7 @@ func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) oldDir, oldBaseName := filer2.FullPath(oldName).DirAndName() newDir, newBaseName := filer2.FullPath(newName).DirAndName() - return fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: oldDir, @@ -385,7 +385,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { var fileId, host string var auth security.EncodedJwt - if err = f.fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err = f.fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -429,7 +429,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { f.entry.Chunks = append(f.entry.Chunks, chunk) dir, _ := filer2.FullPath(f.name).DirAndName() - err = f.fs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = f.fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { f.entry.Attributes.Mtime = time.Now().Unix() request := &filer_pb.UpdateEntryRequest{ diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go index 2beed4742..e187d5a3b 100644 --- a/weed/shell/command_ec_common.go +++ b/weed/shell/command_ec_common.go @@ -56,7 +56,7 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption fmt.Printf("allocate %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) - err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { if targetServer.info.Id != existingLocation { @@ -216,7 +216,7 @@ func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOpt fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation) - return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{ VolumeId: uint32(volumeId), Collection: collection, @@ -232,7 +232,7 @@ func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation) - return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{ VolumeId: uint32(volumeId), ShardIds: toBeUnmountedhardIds, @@ -246,7 +246,7 @@ func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation) - return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{ VolumeId: uint32(volumeId), Collection: collection, diff --git a/weed/shell/command_ec_decode.go b/weed/shell/command_ec_decode.go index 1f9ad2ff9..8a705a5ae 100644 --- a/weed/shell/command_ec_decode.go +++ b/weed/shell/command_ec_decode.go @@ -99,7 +99,7 @@ func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error { // mount volume - if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ VolumeId: uint32(vid), }) @@ -132,7 +132,7 @@ func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, v fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer) - err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, genErr := volumeServerClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{ VolumeId: uint32(vid), Collection: collection, @@ -170,7 +170,7 @@ func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexB continue } - err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation) diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index 58527abf2..587b59388 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -120,7 +120,7 @@ func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, vol for _, location := range locations { - err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, markErr := volumeServerClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{ VolumeId: uint32(volumeId), }) @@ -138,7 +138,7 @@ func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, vol func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { - err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, genErr := volumeServerClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{ VolumeId: uint32(volumeId), Collection: collection, diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go index 2e2fca743..600a8cb45 100644 --- a/weed/shell/command_ec_rebuild.go +++ b/weed/shell/command_ec_rebuild.go @@ -170,7 +170,7 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) { - err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{ VolumeId: uint32(volumeId), Collection: collection, @@ -209,7 +209,7 @@ func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder var copyErr error if applyBalancing { - copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(volumeId), Collection: collection, diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 9db36e9d1..238dee7f9 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -46,7 +46,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write dir, name := filer2.FullPath(path).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 2c46350b2..d6ea51d0c 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -82,12 +82,12 @@ func duTraverseDirectory(ctx context.Context, writer io.Writer, filerClient file return } -func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { +func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000) - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) + return fn(ctx2, client) }, filerGrpcAddress, env.option.GrpcDialOption) } @@ -105,6 +105,6 @@ func (env *CommandEnv) getFilerClient(filerServer string, filerPort int64) *comm filerPort: filerPort, } } -func (c *commandFilerClient) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (c *commandFilerClient) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { return c.env.withFilerClient(ctx, c.filerServer, c.filerPort, fn) } diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go index 5908b0a3c..9980f67a2 100644 --- a/weed/shell/command_fs_meta_cat.go +++ b/weed/shell/command_fs_meta_cat.go @@ -45,7 +45,7 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W dir, name := filer2.FullPath(path).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go index 52dd0321b..8f2ef95e3 100644 --- a/weed/shell/command_fs_meta_load.go +++ b/weed/shell/command_fs_meta_load.go @@ -55,7 +55,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. ctx := context.Background() - err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { sizeBuf := make([]byte, 4) diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index 67606ab53..e77755921 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -53,7 +53,7 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer destinationDir, destinationName := filer2.FullPath(destinationPath).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { // collect destination entry info destinationRequest := &filer_pb.LookupDirectoryEntryRequest{ diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go index 6f35dd5d2..7a1a77cbe 100644 --- a/weed/shell/command_volume_fix_replication.go +++ b/weed/shell/command_volume_fix_replication.go @@ -113,7 +113,7 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, break } - err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{ VolumeId: volumeInfo.Id, SourceDataNode: sourceNode.dataNode.Id, diff --git a/weed/shell/command_volume_mount.go b/weed/shell/command_volume_mount.go index 50a307492..21bc342b4 100644 --- a/weed/shell/command_volume_mount.go +++ b/weed/shell/command_volume_mount.go @@ -51,7 +51,7 @@ func (c *commandVolumeMount) Do(args []string, commandEnv *CommandEnv, writer io } func mountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { - return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ VolumeId: uint32(volumeId), }) diff --git a/weed/shell/command_volume_move.go b/weed/shell/command_volume_move.go index e74b43ed4..2e39c0600 100644 --- a/weed/shell/command_volume_move.go +++ b/weed/shell/command_volume_move.go @@ -88,7 +88,7 @@ func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeI func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) { - err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { resp, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{ VolumeId: uint32(volumeId), SourceDataNode: sourceVolumeServer, @@ -104,7 +104,7 @@ func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) { - return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, replicateErr := volumeServerClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{ VolumeId: uint32(volumeId), SinceNs: lastAppendAtNs, @@ -117,7 +117,7 @@ func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne } func deleteVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { - return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, deleteErr := volumeServerClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{ VolumeId: uint32(volumeId), }) diff --git a/weed/shell/command_volume_tier_download.go b/weed/shell/command_volume_tier_download.go index 4584289d7..0f1a1bb6e 100644 --- a/weed/shell/command_volume_tier_download.go +++ b/weed/shell/command_volume_tier_download.go @@ -118,7 +118,7 @@ func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error { - err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{ VolumeId: uint32(volumeId), Collection: collection, diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go index 0a9e6165f..20da1187c 100644 --- a/weed/shell/command_volume_tier_upload.go +++ b/weed/shell/command_volume_tier_upload.go @@ -114,7 +114,7 @@ func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.W func uploadDatToRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error { - err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{ VolumeId: uint32(volumeId), Collection: collection, diff --git a/weed/shell/command_volume_unmount.go b/weed/shell/command_volume_unmount.go index 8096f34d8..826258dfb 100644 --- a/weed/shell/command_volume_unmount.go +++ b/weed/shell/command_volume_unmount.go @@ -51,7 +51,7 @@ func (c *commandVolumeUnmount) Do(args []string, commandEnv *CommandEnv, writer } func unmountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { - return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{ VolumeId: uint32(volumeId), }) diff --git a/weed/shell/commands.go b/weed/shell/commands.go index a6a0f7dec..f1fcb62d4 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -70,7 +70,7 @@ func (ce *CommandEnv) checkDirectory(ctx context.Context, filerServer string, fi dir, name := filer2.FullPath(path).DirAndName() - return ce.withFilerClient(ctx, filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return ce.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { resp, lookupErr := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go index 27406451f..47e061d05 100644 --- a/weed/storage/store_ec.go +++ b/weed/storage/store_ec.go @@ -230,7 +230,7 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras glog.V(3).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId) - err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupEcVolumeRequest{ VolumeId: uint32(ecVolume.VolumeId), } @@ -278,7 +278,7 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [ func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { - err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { // copy data slice shardReadClient, err := client.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{ diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go index e027d2887..2ac907f6c 100644 --- a/weed/storage/store_ec_delete.go +++ b/weed/storage/store_ec_delete.go @@ -87,7 +87,7 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shar func (s *Store) doDeleteNeedleFromRemoteEcShard(ctx context.Context, sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error { - return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { // copy data slice _, err := client.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{ diff --git a/weed/storage/volume_backup.go b/weed/storage/volume_backup.go index ec29c895e..3763d5515 100644 --- a/weed/storage/volume_backup.go +++ b/weed/storage/volume_backup.go @@ -64,8 +64,6 @@ update needle map when receiving new .dat bytes. But seems not necessary now.) func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.DialOption) error { - ctx := context.Background() - startFromOffset, _, _ := v.FileStat() appendAtNs, err := v.findLastAppendAtNs() if err != nil { @@ -74,7 +72,7 @@ func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.Dial writeOffset := int64(startFromOffset) - err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { stream, err := client.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{ VolumeId: uint32(v.Id), diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go index e5dc48652..6ca987bc5 100644 --- a/weed/topology/allocate_volume.go +++ b/weed/topology/allocate_volume.go @@ -15,7 +15,7 @@ type AllocateVolumeResult struct { func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid needle.VolumeId, option *VolumeGrowOption) error { - return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { _, deleteErr := client.AllocateVolume(context.Background(), &volume_server_pb.AllocateVolumeRequest{ VolumeId: uint32(vid), diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index ca626e973..e7dbf9b1e 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -19,8 +19,8 @@ func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vi errCount := int32(0) for index, dn := range locationlist.list { go func(index int, url string, vid needle.VolumeId) { - err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, err := volumeServerClient.VacuumVolumeCheck(context.Background(), &volume_server_pb.VacuumVolumeCheckRequest{ + err := operation.WithVolumeServerClient(url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, err := volumeServerClient.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{ VolumeId: uint32(vid), }) if err != nil { @@ -63,8 +63,8 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, for index, dn := range locationlist.list { go func(index int, url string, vid needle.VolumeId) { glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) - err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{ + err := operation.WithVolumeServerClient(url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCompact(ctx, &volume_server_pb.VacuumVolumeCompactRequest{ VolumeId: uint32(vid), }) return err @@ -93,8 +93,8 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v isCommitSuccess := true for _, dn := range locationlist.list { glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) - err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ + err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCommit(ctx, &volume_server_pb.VacuumVolumeCommitRequest{ VolumeId: uint32(vid), }) return err @@ -114,8 +114,8 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) { for _, dn := range locationlist.list { glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url()) - err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{ + err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCleanup(ctx, &volume_server_pb.VacuumVolumeCleanupRequest{ VolumeId: uint32(vid), }) return err diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index 63519d97a..7e396342b 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -57,14 +57,14 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr return grpc.DialContext(ctx, address, options...) } -func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { +func WithCachedGrpcClient(ctx context.Context, fn func(context.Context, *grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { grpcClientsLock.Lock() existingConnection, found := grpcClients[address] if found { grpcClientsLock.Unlock() - err := fn(existingConnection) + err := fn(ctx, existingConnection) if err != nil { grpcClientsLock.Lock() delete(grpcClients, address) @@ -83,7 +83,7 @@ func WithCachedGrpcClient(ctx context.Context, fn func(*grpc.ClientConn) error, grpcClients[address] = grpcConnection grpcClientsLock.Unlock() - err = fn(grpcConnection) + err = fn(ctx, grpcConnection) if err != nil { grpcClientsLock.Lock() delete(grpcClients, address) diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 111514f5e..30b0cf160 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -125,9 +125,9 @@ func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.Di return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) - return fn(ctx, client) + return fn(ctx2, client) }, masterGrpcAddress, grpcDialOption) } From 234f69452bbb141700b45d88ee456ae6f74f8081 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 26 Jan 2020 21:04:40 -0800 Subject: [PATCH 0085/2432] 1.52 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index d06cfb8b8..4e89291dd 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 51) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 52) ) From 2f6bb57979b5e891bfa647cd416287ccdea7cfdb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 27 Jan 2020 00:54:21 -0800 Subject: [PATCH 0086/2432] logging --- weed/filesys/filehandle.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index ef0243090..cf253a7ed 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -104,7 +104,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f // write the request to volume servers fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize))) - glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data))) + // glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data))) chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) if err != nil { From 081bc1ea25d8018a98aa45c9e25d3783d493ae42 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 27 Jan 2020 00:54:52 -0800 Subject: [PATCH 0087/2432] mount: able to write to any part of a file --- weed/filesys/dirty_page_interval.go | 66 ++++++++++++++++++------ weed/filesys/dirty_page_interval_test.go | 34 +++++++++++- 2 files changed, 82 insertions(+), 18 deletions(-) diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go index 77fab75ef..ed95783a8 100644 --- a/weed/filesys/dirty_page_interval.go +++ b/weed/filesys/dirty_page_interval.go @@ -65,12 +65,61 @@ func (c *ContinuousIntervals) TotalSize() (total int64) { return } +func subList(list *IntervalLinkedList, start, stop int64) *IntervalLinkedList { + var nodes []*IntervalNode + for t := list.Head; t != nil; t = t.Next { + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart >= nodeStop { + // skip non overlapping IntervalNode + continue + } + nodes = append(nodes, &IntervalNode{ + Data: t.Data[nodeStart-t.Offset : nodeStop-t.Offset], + Offset: nodeStart, + Size: nodeStop-nodeStart, + Next: nil, + }) + } + for i := 1; i < len(nodes); i++ { + nodes[i-1].Next = nodes[i] + } + return &IntervalLinkedList{ + Head: nodes[0], + Tail: nodes[len(nodes)-1], + } +} + func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { // TODO AddInterval needs to handle all possible out of order writes interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))} + var newLists []*IntervalLinkedList + for _, list := range c.lists { + // if list is to the left of new interval, add to the new list + if list.Tail.Offset+list.Tail.Size <= interval.Offset { + newLists = append(newLists, list) + } + // if list is to the right of new interval, add to the new list + if interval.Offset+interval.Size <= list.Head.Offset { + newLists = append(newLists, list) + } + // if new interval overwrite the right part of the list + if list.Head.Offset < interval.Offset && interval.Offset < list.Tail.Offset+list.Tail.Size { + // create a new list of the left part of existing list + newLists = append(newLists, subList(list, list.Offset(), interval.Offset)) + } + // if new interval overwrite the left part of the list + if list.Head.Offset < interval.Offset+interval.Size && interval.Offset+interval.Size < list.Tail.Offset+list.Tail.Size { + // create a new list of the right part of existing list + newLists = append(newLists, subList(list, interval.Offset+interval.Size, list.Tail.Offset+list.Tail.Size)) + } + // skip anything that is fully overwritten by the new interval + } + + c.lists = newLists + // add the new interval to the lists, connecting neighbor lists var prevList, nextList *IntervalLinkedList for _, list := range c.lists { @@ -78,10 +127,6 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { nextList = list break } - if list.Head.Offset < interval.Offset+interval.Size && interval.Offset+interval.Size <= list.Head.Offset+list.Size() { - glog.V(0).Infof("unexpected [%d,%d) overlaps [%d,%d)", interval.Offset, interval.Offset+interval.Size, list.Head.Offset, list.Head.Offset+list.Size()) - break - } } for _, list := range c.lists { @@ -90,19 +135,6 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { prevList = list break } - if list.Head.Offset <= offset && offset < list.Head.Offset+list.Size() { - - // the new interval overwrites the old tail - dataStartIndex := list.Tail.Offset + list.Tail.Size - offset - glog.V(4).Infof("overlap data new [0,%d) same=%v", dataStartIndex, bytes.Compare(interval.Data[0:dataStartIndex], list.Tail.Data[len(list.Tail.Data)-int(dataStartIndex):])) - list.Tail.Data = list.Tail.Data[:len(list.Tail.Data)-int(dataStartIndex)] - list.Tail.Size -= dataStartIndex - glog.V(4).Infof("overlapping append as [%d,%d) dataSize=%d", interval.Offset, interval.Offset+interval.Size, len(interval.Data)) - - list.addNodeToTail(interval) - prevList = list - break - } } if prevList != nil && nextList != nil { diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go index 4f62f90c9..184be2f3b 100644 --- a/weed/filesys/dirty_page_interval_test.go +++ b/weed/filesys/dirty_page_interval_test.go @@ -5,7 +5,7 @@ import ( "testing" ) -func TestContinuousIntervals_AddInterval(t *testing.T) { +func TestContinuousIntervals_AddIntervalAppend(t *testing.T) { c := &ContinuousIntervals{} @@ -15,6 +15,38 @@ func TestContinuousIntervals_AddInterval(t *testing.T) { c.AddInterval(getBytes(23, 4), 2) expectedData(t, c, 0, 25, 25, 23, 23, 23, 23) + +} + +func TestContinuousIntervals_AddIntervalInnerOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25, 25, 25 + c.AddInterval(getBytes(25, 5), 0) + // _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 25) + +} + +func TestContinuousIntervals_AddIntervalFullOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, + c.AddInterval(getBytes(25, 1), 0) + // _, _, _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 4) + // _, _, _, 24, 24, 24, 24 + c.AddInterval(getBytes(24, 4), 3) + + // _, 22, 22 + c.AddInterval(getBytes(22, 2), 1) + + expectedData(t, c, 0, 25, 22, 22, 24, 24, 24, 24) + } func expectedData(t *testing.T, c *ContinuousIntervals, offset int, data ...byte) { From c8d543ef51647fc656d4f507382a7dd2adb8d554 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 27 Jan 2020 00:58:26 -0800 Subject: [PATCH 0088/2432] avoid retry delay --- weed/filesys/wfs.go | 1 - 1 file changed, 1 deletion(-) diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index af1102b45..4807e367b 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -99,7 +99,6 @@ func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(context.Context, fi return nil } if strings.Contains(err.Error(), "context canceled") { - time.Sleep(3337 * time.Millisecond) glog.V(2).Infoln("retry context canceled request...") return util.WithCachedGrpcClient(context.Background(), func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) From f72405705192f531a0ee5da11361bc4300ccde08 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 27 Jan 2020 21:20:41 -0800 Subject: [PATCH 0089/2432] reduce logs --- weed/filesys/dirty_page_interval.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go index ed95783a8..162b9be64 100644 --- a/weed/filesys/dirty_page_interval.go +++ b/weed/filesys/dirty_page_interval.go @@ -4,8 +4,6 @@ import ( "bytes" "io" "math" - - "github.com/chrislusf/seaweedfs/weed/glog" ) type IntervalNode struct { @@ -31,12 +29,12 @@ func (list *IntervalLinkedList) Size() int64 { return list.Tail.Offset + list.Tail.Size - list.Head.Offset } func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) { - glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size) + // glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size) list.Tail.Next = node list.Tail = node } func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) { - glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size) + // glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size) node.Next = list.Head list.Head = node } @@ -91,8 +89,6 @@ func subList(list *IntervalLinkedList, start, stop int64) *IntervalLinkedList { func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { - // TODO AddInterval needs to handle all possible out of order writes - interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))} var newLists []*IntervalLinkedList @@ -138,7 +134,7 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { } if prevList != nil && nextList != nil { - glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size) + // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size) prevList.Tail.Next = nextList.Head prevList.Tail = nextList.Tail c.removeList(nextList) From 27b94cb65b34c084790f0a1884956702ee51acc2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 28 Jan 2020 00:49:47 -0800 Subject: [PATCH 0090/2432] fix wrong url fix https://github.com/chrislusf/seaweedfs/issues/1187 --- weed/util/config.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/weed/util/config.go b/weed/util/config.go index 4ba68b800..7b86b749e 100644 --- a/weed/util/config.go +++ b/weed/util/config.go @@ -27,10 +27,7 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { glog.V(0).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) if required { glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ - "\n\nPlease follow this example and add a filer.toml file to "+ - "current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/:\n"+ - " https://github.com/chrislusf/seaweedfs/blob/master/weed/%s.toml\n"+ - "\nOr use this command to generate the default toml file\n"+ + "\n\nPlease use this command to generate the default %s.toml file\n"+ " weed scaffold -config=%s -output=.\n\n\n", configFileName, configFileName, configFileName) } else { From d335f04de6861b571190c13bd7d65e9a0c02f187 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 29 Jan 2020 09:09:55 -0800 Subject: [PATCH 0091/2432] support env variables to overwrite toml file --- go.mod | 2 +- .../repeated_vacuum/repeated_vacuum.go | 6 ++--- unmaintained/volume_tailer/volume_tailer.go | 2 +- weed/command/backup.go | 4 +--- weed/command/benchmark.go | 3 +-- weed/command/filer.go | 7 +++--- weed/command/filer_copy.go | 6 ++--- weed/command/filer_replication.go | 17 ++++++------- weed/command/master.go | 12 +++++----- weed/command/mount_std.go | 3 +-- weed/command/s3.go | 7 +++--- weed/command/scaffold.go | 8 +++++++ weed/command/shell.go | 3 +-- weed/command/upload.go | 6 ++--- weed/command/volume.go | 2 +- weed/command/webdav.go | 3 +-- weed/filer2/cassandra/cassandra_store.go | 6 ++--- weed/filer2/configuration.go | 3 +-- weed/filer2/etcd/etcd_store.go | 6 ++--- weed/filer2/filechunks_test.go | 20 ++++++++-------- weed/filer2/filerstore.go | 6 ++--- weed/filer2/leveldb/leveldb_store.go | 4 ++-- weed/filer2/leveldb2/leveldb2_store.go | 4 ++-- weed/filer2/mysql/mysql_store.go | 18 +++++++------- weed/filer2/postgres/postgres_store.go | 18 +++++++------- weed/filer2/redis/redis_cluster_store.go | 14 +++++------ weed/filer2/redis/redis_store.go | 8 +++---- weed/filer2/tikv/tikv_store.go | 4 ++-- weed/filesys/dirty_page_interval.go | 2 +- weed/notification/aws_sqs/aws_sqs_pub.go | 14 +++++------ weed/notification/configuration.go | 9 ++++--- .../gocdk_pub_sub/gocdk_pub_sub.go | 11 +++++---- .../google_pub_sub/google_pub_sub.go | 12 +++++----- weed/notification/kafka/kafka_queue.go | 10 ++++---- weed/notification/log/log_queue.go | 2 +- weed/replication/replicator.go | 4 ++-- weed/replication/sink/azuresink/azure_sink.go | 10 ++++---- weed/replication/sink/b2sink/b2_sink.go | 10 ++++---- weed/replication/sink/filersink/filer_sink.go | 19 ++++++++------- weed/replication/sink/gcssink/gcs_sink.go | 8 +++---- weed/replication/sink/replication_sink.go | 2 +- weed/replication/sink/s3sink/s3_sink.go | 18 +++++++------- weed/replication/source/filer_source.go | 15 ++++++------ weed/replication/sub/notification_aws_sqs.go | 14 +++++------ .../sub/notification_gocdk_pub_sub.go | 4 ++-- .../sub/notification_google_pub_sub.go | 12 +++++----- weed/replication/sub/notification_kafka.go | 14 +++++------ weed/replication/sub/notifications.go | 2 +- weed/security/tls.go | 4 ++-- weed/server/filer_server.go | 8 +++---- weed/server/master_server.go | 16 ++++++------- weed/server/volume_grpc_client_to_master.go | 9 +++---- weed/server/volume_server.go | 7 +++--- weed/server/webdav_server.go | 4 +--- weed/shell/command_fs_meta_notify.go | 6 ++--- weed/storage/backend/backend.go | 14 +++++------ weed/storage/backend/s3_backend/s3_backend.go | 14 +++++------ weed/storage/volume_vacuum.go | 2 +- weed/util/config.go | 13 +++++++++- weed/util/config_test.go | 24 +++++++++++++++++++ 60 files changed, 268 insertions(+), 247 deletions(-) create mode 100644 weed/util/config_test.go diff --git a/go.mod b/go.mod index 8fab3bb84..48879fd8c 100644 --- a/go.mod +++ b/go.mod @@ -69,7 +69,7 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.4.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect - github.com/stretchr/testify v1.4.0 // indirect + github.com/stretchr/testify v1.4.0 github.com/syndtr/goleveldb v1.0.0 github.com/tidwall/gjson v1.3.2 github.com/tidwall/match v1.0.1 diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 28bcabb9b..718b6faa1 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -7,10 +7,8 @@ import ( "log" "math/rand" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -23,7 +21,7 @@ func main() { flag.Parse() util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") for i := 0; i < *repeat; i++ { assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1}) diff --git a/unmaintained/volume_tailer/volume_tailer.go b/unmaintained/volume_tailer/volume_tailer.go index f0ef51c09..3c2d36d22 100644 --- a/unmaintained/volume_tailer/volume_tailer.go +++ b/unmaintained/volume_tailer/volume_tailer.go @@ -25,7 +25,7 @@ func main() { flag.Parse() util2.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") vid := needle.VolumeId(*volumeId) diff --git a/weed/command/backup.go b/weed/command/backup.go index 0f6bed225..eb2b5ba4a 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -3,8 +3,6 @@ package command import ( "fmt" - "github.com/spf13/viper" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" @@ -66,7 +64,7 @@ var cmdBackup = &Command{ func runBackup(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") if *s.volumeId == -1 { return false diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 26be1fe3a..382e7c850 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -15,7 +15,6 @@ import ( "sync" "time" - "github.com/spf13/viper" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" @@ -109,7 +108,7 @@ var ( func runBenchmark(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - b.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) if *b.maxCpu < 1 { diff --git a/weed/command/filer.go b/weed/command/filer.go index b1ceb46f5..ea8392fac 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -6,14 +6,13 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "google.golang.org/grpc/reflection" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( @@ -145,7 +144,7 @@ func (fo *FilerOptions) startFiler() { if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "filer")) + grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer")) filer_pb.RegisterSeaweedFilerServer(grpcS, fs) reflection.Register(grpcS) go grpcS.Serve(grpcL) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index e74ea7d93..e5979d786 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -14,13 +14,13 @@ import ( "sync" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/spf13/viper" - "google.golang.org/grpc" ) var ( @@ -105,7 +105,7 @@ func runCopy(cmd *Command, args []string) bool { filerGrpcPort := filerPort + 10000 filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) - copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") ctx := context.Background() diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index c6e7f5dba..737f0d24a 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -39,7 +39,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("replication", true) util.LoadConfiguration("notification", true) - config := viper.GetViper() + config := util.GetViper() var notificationInput sub.NotificationInput @@ -47,8 +47,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { for _, input := range sub.NotificationInputs { if config.GetBool("notification." + input.GetName() + ".enabled") { - viperSub := config.Sub("notification." + input.GetName()) - if err := input.Initialize(viperSub); err != nil { + if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification input for %s: %+v", input.GetName(), err) } @@ -66,10 +65,9 @@ func runFilerReplicate(cmd *Command, args []string) bool { // avoid recursive replication if config.GetBool("notification.source.filer.enabled") && config.GetBool("notification.sink.filer.enabled") { - sourceConfig, sinkConfig := config.Sub("source.filer"), config.Sub("sink.filer") - if sourceConfig.GetString("grpcAddress") == sinkConfig.GetString("grpcAddress") { - fromDir := sourceConfig.GetString("directory") - toDir := sinkConfig.GetString("directory") + if config.GetString("source.filer.grpcAddress") == config.GetString("sink.filer.grpcAddress") { + fromDir := config.GetString("source.filer.directory") + toDir := config.GetString("sink.filer.directory") if strings.HasPrefix(toDir, fromDir) { glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir) } @@ -79,8 +77,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { var dataSink sink.ReplicationSink for _, sk := range sink.Sinks { if config.GetBool("sink." + sk.GetName() + ".enabled") { - viperSub := config.Sub("sink." + sk.GetName()) - if err := sk.Initialize(viperSub); err != nil { + if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize sink for %s: %+v", sk.GetName(), err) } @@ -98,7 +95,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { return true } - replicator := replication.NewReplicator(config.Sub("source.filer"), dataSink) + replicator := replication.NewReplicator(config, "source.filer.", dataSink) for { key, m, err := notificationInput.ReceiveMessage() diff --git a/weed/command/master.go b/weed/command/master.go index 8d0a3289c..c4b11119b 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -8,15 +8,15 @@ import ( "strings" "github.com/chrislusf/raft/protobuf" + "github.com/gorilla/mux" + "google.golang.org/grpc/reflection" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" - "github.com/spf13/viper" - "google.golang.org/grpc/reflection" ) var ( @@ -102,7 +102,7 @@ func runMaster(cmd *Command, args []string) bool { func startMaster(masterOption MasterOptions, masterWhiteList []string) { - backend.LoadConfiguration(viper.GetViper()) + backend.LoadConfiguration(util.GetViper()) myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.peers) @@ -115,7 +115,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { glog.Fatalf("Master startup error: %v", e) } // start raftServer - raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), + raftServer := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"), peers, myMasterAddress, *masterOption.metaFolder, ms.Topo, *masterOption.pulseSeconds) if raftServer == nil { glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *masterOption.metaFolder) @@ -129,7 +129,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } // Create your protocol servers. - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) + grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) protobuf.RegisterRaftServer(grpcS, raftServer) reflection.Register(grpcS) diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 453531d00..891810e61 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -13,7 +13,6 @@ import ( "time" "github.com/jacobsa/daemonize" - "github.com/spf13/viper" "github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/glog" @@ -148,7 +147,7 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), FilerMountRootPath: mountRoot, Collection: collection, Replication: replication, diff --git a/weed/command/s3.go b/weed/command/s3.go index e004bb066..10a486657 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -1,18 +1,17 @@ package command import ( + "fmt" "net/http" "time" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "fmt" + "github.com/gorilla/mux" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/s3api" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" ) var ( @@ -69,7 +68,7 @@ func (s3opt *S3Options) startS3Server() bool { FilerGrpcAddress: filerGrpcAddress, DomainName: *s3opt.domainName, BucketsPath: *s3opt.filerBucketsPath, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), }) if s3ApiServer_err != nil { glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 78eec277c..524bf5e13 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -14,6 +14,14 @@ var cmdScaffold = &Command{ Short: "generate basic configuration files", Long: `Generate filer.toml with all possible configurations for you to customize. + The options can also be overwritten by environment variables. + For example, the filer.toml mysql password can be overwritten by environment variable + export weed.mysql.password=some_password + Environment variable rules: + * Prefix fix with "WEED_" + * Upppercase the reset of variable name. + * Replace '.' with '_' + `, } diff --git a/weed/command/shell.go b/weed/command/shell.go index 34b5aef31..dcf70608f 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -6,7 +6,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/shell" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) var ( @@ -31,7 +30,7 @@ var cmdShell = &Command{ func runShell(command *Command, args []string) bool { util.LoadConfiguration("security", false) - shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") var filerPwdErr error shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = util.ParseFilerUrl(*shellInitialFilerUrl) diff --git a/weed/command/upload.go b/weed/command/upload.go index 25e938d9b..d71046131 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -6,11 +6,9 @@ import ( "os" "path/filepath" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" - - "github.com/chrislusf/seaweedfs/weed/operation" ) var ( @@ -63,7 +61,7 @@ var cmdUpload = &Command{ func runUpload(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") if len(args) == 0 { if *upload.dir == "" { diff --git a/weed/command/volume.go b/weed/command/volume.go index b0f46bbf3..9d665d143 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -234,7 +234,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume")) + grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume")) volume_server_pb.RegisterVolumeServerServer(grpcS, vs) reflection.Register(grpcS) go func() { diff --git a/weed/command/webdav.go b/weed/command/webdav.go index 371c4a9ad..0e6f89040 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -11,7 +11,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) var ( @@ -75,7 +74,7 @@ func (wo *WebDavOption) startWebDav() bool { ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ Filer: *wo.filer, FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), Collection: *wo.collection, Uid: uid, Gid: gid, diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index dcaab8bc4..f81ef946f 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -22,10 +22,10 @@ func (store *CassandraStore) GetName() string { return "cassandra" } -func (store *CassandraStore) Initialize(configuration util.Configuration) (err error) { +func (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("keyspace"), - configuration.GetStringSlice("hosts"), + configuration.GetString(prefix+"keyspace"), + configuration.GetStringSlice(prefix+"hosts"), ) } diff --git a/weed/filer2/configuration.go b/weed/filer2/configuration.go index 7b05b53dc..a174117ea 100644 --- a/weed/filer2/configuration.go +++ b/weed/filer2/configuration.go @@ -17,8 +17,7 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) { for _, store := range Stores { if config.GetBool(store.GetName() + ".enabled") { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { + if err := store.Initialize(config, store.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize store for %s: %+v", store.GetName(), err) } diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer2/etcd/etcd_store.go index 2eb9e3e86..0f0c01426 100644 --- a/weed/filer2/etcd/etcd_store.go +++ b/weed/filer2/etcd/etcd_store.go @@ -28,13 +28,13 @@ func (store *EtcdStore) GetName() string { return "etcd" } -func (store *EtcdStore) Initialize(configuration weed_util.Configuration) (err error) { - servers := configuration.GetString("servers") +func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + servers := configuration.GetString(prefix + "servers") if servers == "" { servers = "localhost:2379" } - timeout := configuration.GetString("timeout") + timeout := configuration.GetString(prefix + "timeout") if timeout == "" { timeout = "3s" } diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go index ed30c2abc..bb4a6c74d 100644 --- a/weed/filer2/filechunks_test.go +++ b/weed/filer2/filechunks_test.go @@ -350,21 +350,21 @@ func TestChunksReading(t *testing.T) { { Chunks: []*filer_pb.FileChunk{ {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1}, - {Offset: 43175936, Size: 52981771-43175936, FileId: "2,112a36ea7f85", Mtime: 2}, - {Offset: 52981760, Size: 72564747-52981760, FileId: "4,112d5f31c5e7", Mtime: 3}, - {Offset: 72564736, Size: 133255179-72564736, FileId: "1,113245f0cdb6", Mtime: 4}, - {Offset: 133255168, Size: 137269259-133255168, FileId: "3,1141a70733b5", Mtime: 5}, - {Offset: 137269248, Size: 153578836-137269248, FileId: "1,114201d5bbdb", Mtime: 6}, + {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2}, + {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3}, + {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4}, + {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5}, + {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6}, }, Offset: 0, Size: 153578836, Expected: []*ChunkView{ {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0}, - {Offset: 0, Size: 52981760-43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936}, - {Offset: 0, Size: 72564736-52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760}, - {Offset: 0, Size: 133255168-72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736}, - {Offset: 0, Size: 137269248-133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168}, - {Offset: 0, Size: 153578836-137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248}, + {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936}, + {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760}, + {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736}, + {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168}, + {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248}, }, }, } diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go index 0bb0bd611..ae25534ed 100644 --- a/weed/filer2/filerstore.go +++ b/weed/filer2/filerstore.go @@ -14,7 +14,7 @@ type FilerStore interface { // GetName gets the name to locate the configuration in filer.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error InsertEntry(context.Context, *Entry) error UpdateEntry(context.Context, *Entry) (err error) // err == filer2.ErrNotFound if not found @@ -47,8 +47,8 @@ func (fsw *FilerStoreWrapper) GetName() string { return fsw.actualStore.GetName() } -func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration) error { - return fsw.actualStore.Initialize(configuration) +func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration, prefix string) error { + return fsw.actualStore.Initialize(configuration, prefix) } func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error { diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 4952b3b3a..44e6ac0eb 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -30,8 +30,8 @@ func (store *LevelDBStore) GetName() string { return "leveldb" } -func (store *LevelDBStore) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir) } diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go index 8a16822ab..358d4d92a 100644 --- a/weed/filer2/leveldb2/leveldb2_store.go +++ b/weed/filer2/leveldb2/leveldb2_store.go @@ -30,8 +30,8 @@ func (store *LevelDB2Store) GetName() string { return "leveldb2" } -func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir, 8) } diff --git a/weed/filer2/mysql/mysql_store.go b/weed/filer2/mysql/mysql_store.go index d1b06ece5..63d99cd9d 100644 --- a/weed/filer2/mysql/mysql_store.go +++ b/weed/filer2/mysql/mysql_store.go @@ -26,16 +26,16 @@ func (store *MysqlStore) GetName() string { return "mysql" } -func (store *MysqlStore) Initialize(configuration util.Configuration) (err error) { +func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), - configuration.GetBool("interpolateParams"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetBool(prefix+"interpolateParams"), ) } diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go index 3ec000fe0..27a0c2513 100644 --- a/weed/filer2/postgres/postgres_store.go +++ b/weed/filer2/postgres/postgres_store.go @@ -26,16 +26,16 @@ func (store *PostgresStore) GetName() string { return "postgres" } -func (store *PostgresStore) Initialize(configuration util.Configuration) (err error) { +func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetString("sslmode"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetString(prefix+"sslmode"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), ) } diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer2/redis/redis_cluster_store.go index f1ad4b35c..eaaecb740 100644 --- a/weed/filer2/redis/redis_cluster_store.go +++ b/weed/filer2/redis/redis_cluster_store.go @@ -18,16 +18,16 @@ func (store *RedisClusterStore) GetName() string { return "redis_cluster" } -func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) { - configuration.SetDefault("useReadOnly", true) - configuration.SetDefault("routeByLatency", true) + configuration.SetDefault(prefix+"useReadOnly", true) + configuration.SetDefault(prefix+"routeByLatency", true) return store.initialize( - configuration.GetStringSlice("addresses"), - configuration.GetString("password"), - configuration.GetBool("useReadOnly"), - configuration.GetBool("routeByLatency"), + configuration.GetStringSlice(prefix+"addresses"), + configuration.GetString(prefix+"password"), + configuration.GetBool(prefix+"useReadOnly"), + configuration.GetBool(prefix+"routeByLatency"), ) } diff --git a/weed/filer2/redis/redis_store.go b/weed/filer2/redis/redis_store.go index c56fa014c..9debdb070 100644 --- a/weed/filer2/redis/redis_store.go +++ b/weed/filer2/redis/redis_store.go @@ -18,11 +18,11 @@ func (store *RedisStore) GetName() string { return "redis" } -func (store *RedisStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("address"), - configuration.GetString("password"), - configuration.GetInt("database"), + configuration.GetString(prefix+"address"), + configuration.GetString(prefix+"password"), + configuration.GetInt(prefix+"database"), ) } diff --git a/weed/filer2/tikv/tikv_store.go b/weed/filer2/tikv/tikv_store.go index 4eb8cb90d..24e05e3ad 100644 --- a/weed/filer2/tikv/tikv_store.go +++ b/weed/filer2/tikv/tikv_store.go @@ -30,8 +30,8 @@ func (store *TikvStore) GetName() string { return "tikv" } -func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) { - pdAddr := configuration.GetString("pdAddress") +func (store *TikvStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + pdAddr := configuration.GetString(prefix + "pdAddress") return store.initialize(pdAddr) } diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go index 162b9be64..ec94c6df1 100644 --- a/weed/filesys/dirty_page_interval.go +++ b/weed/filesys/dirty_page_interval.go @@ -74,7 +74,7 @@ func subList(list *IntervalLinkedList, start, stop int64) *IntervalLinkedList { nodes = append(nodes, &IntervalNode{ Data: t.Data[nodeStart-t.Offset : nodeStop-t.Offset], Offset: nodeStart, - Size: nodeStop-nodeStart, + Size: nodeStop - nodeStart, Next: nil, }) } diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go index 4c1302abb..d881049dd 100644 --- a/weed/notification/aws_sqs/aws_sqs_pub.go +++ b/weed/notification/aws_sqs/aws_sqs_pub.go @@ -27,14 +27,14 @@ func (k *AwsSqsPub) GetName() string { return "aws_sqs" } -func (k *AwsSqsPub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go index 7f8765cc3..36211692c 100644 --- a/weed/notification/configuration.go +++ b/weed/notification/configuration.go @@ -11,7 +11,7 @@ type MessageQueue interface { // GetName gets the name to locate the configuration in filer.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error SendMessage(key string, message proto.Message) error } @@ -21,7 +21,7 @@ var ( Queue MessageQueue ) -func LoadConfiguration(config *viper.Viper) { +func LoadConfiguration(config *viper.Viper, prefix string) { if config == nil { return @@ -30,9 +30,8 @@ func LoadConfiguration(config *viper.Viper) { validateOneEnabledQueue(config) for _, queue := range MessageQueues { - if config.GetBool(queue.GetName() + ".enabled") { - viperSub := config.Sub(queue.GetName()) - if err := queue.Initialize(viperSub); err != nil { + if config.GetBool(prefix + queue.GetName() + ".enabled") { + if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification for %s: %+v", queue.GetName(), err) } diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go index ebf44ea6f..706261b3a 100644 --- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -18,12 +18,13 @@ import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/notification" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/golang/protobuf/proto" "gocloud.dev/pubsub" _ "gocloud.dev/pubsub/awssnssqs" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" // _ "gocloud.dev/pubsub/azuresb" _ "gocloud.dev/pubsub/gcppubsub" _ "gocloud.dev/pubsub/natspubsub" @@ -43,8 +44,8 @@ func (k *GoCDKPubSub) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSub) Initialize(config util.Configuration) error { - k.topicURL = config.GetString("topic_url") +func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error { + k.topicURL = configuration.GetString(prefix + "topic_url") glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL) topic, err := pubsub.OpenTopic(context.Background(), k.topicURL) if err != nil { diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go index 7b26bfe38..363a86eb6 100644 --- a/weed/notification/google_pub_sub/google_pub_sub.go +++ b/weed/notification/google_pub_sub/google_pub_sub.go @@ -25,13 +25,13 @@ func (k *GooglePubSub) GetName() string { return "google_pub_sub" } -func (k *GooglePubSub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go index fd545722b..8d83b5892 100644 --- a/weed/notification/kafka/kafka_queue.go +++ b/weed/notification/kafka/kafka_queue.go @@ -21,12 +21,12 @@ func (k *KafkaQueue) GetName() string { return "kafka" } -func (k *KafkaQueue) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go index dcc038dfc..1ca4786a1 100644 --- a/weed/notification/log/log_queue.go +++ b/weed/notification/log/log_queue.go @@ -18,7 +18,7 @@ func (k *LogQueue) GetName() string { return "log" } -func (k *LogQueue) Initialize(configuration util.Configuration) (err error) { +func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (err error) { return nil } diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index 7353cdc91..a0ef6591c 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -18,10 +18,10 @@ type Replicator struct { source *source.FilerSource } -func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSink) *Replicator { +func NewReplicator(sourceConfig util.Configuration, configPrefix string, dataSink sink.ReplicationSink) *Replicator { source := &source.FilerSource{} - source.Initialize(sourceConfig) + source.Initialize(sourceConfig, configPrefix) dataSink.SetSourceFiler(source) diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 6381908a1..a0b1a41ab 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -35,12 +35,12 @@ func (g *AzureSink) GetSinkToDirectory() string { return g.dir } -func (g *AzureSink) Initialize(configuration util.Configuration) error { +func (g *AzureSink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("account_name"), - configuration.GetString("account_key"), - configuration.GetString("container"), - configuration.GetString("directory"), + configuration.GetString(prefix+"account_name"), + configuration.GetString(prefix+"account_key"), + configuration.GetString(prefix+"container"), + configuration.GetString(prefix+"directory"), ) } diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 35c2230fa..8c80a64bd 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -31,12 +31,12 @@ func (g *B2Sink) GetSinkToDirectory() string { return g.dir } -func (g *B2Sink) Initialize(configuration util.Configuration) error { +func (g *B2Sink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("b2_account_id"), - configuration.GetString("b2_master_application_key"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"b2_account_id"), + configuration.GetString(prefix+"b2_master_application_key"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 4790d1562..de99fbe1c 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -3,10 +3,11 @@ package filersink import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -38,13 +39,13 @@ func (fs *FilerSink) GetSinkToDirectory() string { return fs.dir } -func (fs *FilerSink) Initialize(configuration util.Configuration) error { +func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error { return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), - configuration.GetString("replication"), - configuration.GetString("collection"), - configuration.GetInt("ttlSec"), + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), + configuration.GetString(prefix+"replication"), + configuration.GetString(prefix+"collection"), + configuration.GetInt(prefix+"ttlSec"), ) } @@ -59,7 +60,7 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, fs.replication = replication fs.collection = collection fs.ttlSec = int32(ttlSec) - fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") return nil } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index abd7c49b9..5aa978ab8 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -34,11 +34,11 @@ func (g *GcsSink) GetSinkToDirectory() string { return g.dir } -func (g *GcsSink) Initialize(configuration util.Configuration) error { +func (g *GcsSink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index dd54f0005..208bbdf87 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -9,7 +9,7 @@ import ( type ReplicationSink interface { GetName() string - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 4cff341d0..e4e097c0f 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -39,16 +39,16 @@ func (s3sink *S3Sink) GetSinkToDirectory() string { return s3sink.dir } -func (s3sink *S3Sink) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("sink.s3.region: %v", configuration.GetString("region")) - glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString("bucket")) - glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString("directory")) +func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket")) + glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory")) return s3sink.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index aef13be75..c3ea44671 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -3,13 +3,14 @@ package source import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "google.golang.org/grpc" "io" "net/http" "strings" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -25,17 +26,17 @@ type FilerSource struct { Dir string } -func (fs *FilerSource) Initialize(configuration util.Configuration) error { +func (fs *FilerSource) Initialize(configuration util.Configuration, prefix string) error { return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), ) } func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { fs.grpcAddress = grpcAddress fs.Dir = dir - fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") return nil } diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go index bed26c79c..06869e619 100644 --- a/weed/replication/sub/notification_aws_sqs.go +++ b/weed/replication/sub/notification_aws_sqs.go @@ -27,14 +27,14 @@ func (k *AwsSqsInput) GetName() string { return "aws_sqs" } -func (k *AwsSqsInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go index eddba9ff8..9726096e5 100644 --- a/weed/replication/sub/notification_gocdk_pub_sub.go +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -27,8 +27,8 @@ func (k *GoCDKPubSubInput) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSubInput) Initialize(config util.Configuration) error { - subURL := config.GetString("sub_url") +func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error { + subURL := configuration.GetString(prefix + "sub_url") glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL) sub, err := pubsub.OpenSubscription(context.Background(), subURL) if err != nil { diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go index ad6b42a2e..a950bb42b 100644 --- a/weed/replication/sub/notification_google_pub_sub.go +++ b/weed/replication/sub/notification_google_pub_sub.go @@ -27,13 +27,13 @@ func (k *GooglePubSubInput) GetName() string { return "google_pub_sub" } -func (k *GooglePubSubInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go index 1a86a8307..fa9cfad9b 100644 --- a/weed/replication/sub/notification_kafka.go +++ b/weed/replication/sub/notification_kafka.go @@ -28,14 +28,14 @@ func (k *KafkaInput) GetName() string { return "kafka" } -func (k *KafkaInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), - configuration.GetString("offsetFile"), - configuration.GetInt("offsetSaveIntervalSeconds"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), + configuration.GetString(prefix+"offsetFile"), + configuration.GetInt(prefix+"offsetSaveIntervalSeconds"), ) } diff --git a/weed/replication/sub/notifications.go b/weed/replication/sub/notifications.go index 66fbef824..8a2668f98 100644 --- a/weed/replication/sub/notifications.go +++ b/weed/replication/sub/notifications.go @@ -9,7 +9,7 @@ type NotificationInput interface { // GetName gets the name to locate the configuration in sync.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) } diff --git a/weed/security/tls.go b/weed/security/tls.go index e81ba4831..f4f525ede 100644 --- a/weed/security/tls.go +++ b/weed/security/tls.go @@ -22,7 +22,7 @@ func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { glog.Errorf("load cert/key error: %v", err) return nil } - caCert, err := ioutil.ReadFile(config.GetString("ca")) + caCert, err := ioutil.ReadFile(config.GetString(component + ".ca")) if err != nil { glog.Errorf("read ca cert file error: %v", err) return nil @@ -49,7 +49,7 @@ func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { glog.Errorf("load cert/key error: %v", err) return grpc.WithInsecure() } - caCert, err := ioutil.ReadFile(config.GetString("ca")) + caCert, err := ioutil.ReadFile(config.GetString(component + ".ca")) if err != nil { glog.Errorf("read ca cert file error: %v", err) return grpc.WithInsecure() diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 3a2eca6d4..72cca1f6f 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -14,8 +14,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" - "github.com/chrislusf/seaweedfs/weed/filer2" _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" _ "github.com/chrislusf/seaweedfs/weed/filer2/etcd" @@ -61,7 +59,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) fs = &FilerServer{ option: option, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), } if len(option.Masters) == 0 { @@ -72,7 +70,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) go fs.filer.KeepConnectedToMaster() - v := viper.GetViper() + v := util.GetViper() if !util.LoadConfiguration("filer", false) { v.Set("leveldb2.enabled", true) v.Set("leveldb2.dir", option.DefaultLevelDbDir) @@ -86,7 +84,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") fs.filer.LoadConfiguration(v) - notification.LoadConfiguration(v.Sub("notification")) + notification.LoadConfiguration(v, "notification.") handleStaticResources(defaultMux) if !option.DisableHttp { diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 33a5129da..b3cc310e6 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -14,6 +14,9 @@ import ( "time" "github.com/chrislusf/raft" + "github.com/gorilla/mux" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" @@ -22,9 +25,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/gorilla/mux" - "github.com/spf13/viper" - "google.golang.org/grpc" ) const ( @@ -69,7 +69,7 @@ type MasterServer struct { func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *MasterServer { - v := viper.GetViper() + v := util.GetViper() signingKey := v.GetString("jwt.signing.key") v.SetDefault("jwt.signing.expires_after_seconds", 10) expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") @@ -83,7 +83,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste preallocateSize = int64(option.VolumeSizeLimitMB) * (1 << 20) } - grpcDialOption := security.LoadClientTLS(v.Sub("grpc"), "master") + grpcDialOption := security.LoadClientTLS(v, "grpc.master") ms := &MasterServer{ option: option, preallocateSize: preallocateSize, @@ -183,7 +183,7 @@ func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Requ func (ms *MasterServer) startAdminScripts() { var err error - v := viper.GetViper() + v := util.GetViper() adminScripts := v.GetString("master.maintenance.scripts") glog.V(0).Infof("adminScripts:\n%v", adminScripts) if adminScripts == "" { @@ -201,7 +201,7 @@ func (ms *MasterServer) startAdminScripts() { masterAddress := "localhost:" + strconv.Itoa(ms.option.Port) var shellOptions shell.ShellOptions - shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "master") + shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master") shellOptions.Masters = &masterAddress shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, err = util.ParseFilerUrl(filerURL) @@ -250,7 +250,7 @@ func (ms *MasterServer) startAdminScripts() { func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer { var seq sequence.Sequencer - v := viper.GetViper() + v := util.GetViper() seqType := strings.ToLower(v.GetString(SequencerType)) glog.V(1).Infof("[%s] : [%s]", SequencerType, seqType) switch strings.ToLower(seqType) { diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 6038752d2..dc47c2884 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -5,16 +5,17 @@ import ( "net" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" - "github.com/spf13/viper" - "google.golang.org/grpc" + + "golang.org/x/net/context" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/util" - "golang.org/x/net/context" ) func (vs *VolumeServer) GetMaster() string { @@ -26,7 +27,7 @@ func (vs *VolumeServer) heartbeat() { vs.store.SetDataCenter(vs.dataCenter) vs.store.SetRack(vs.rack) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "volume") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.volume") var err error var newLeader string diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index a406b36cc..0fdcf662a 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -7,8 +7,7 @@ import ( "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/stats" - - "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" @@ -47,7 +46,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, fileSizeLimitMB int, ) *VolumeServer { - v := viper.GetViper() + v := util.GetViper() signingKey := v.GetString("jwt.signing.key") v.SetDefault("jwt.signing.expires_after_seconds", 10) expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") @@ -64,7 +63,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, needleMapKind: needleMapKind, FixJpgOrientation: fixJpgOrientation, ReadRedirect: readRedirect, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.volume"), compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024, fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024, } diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index bdb6b61a9..d75869f30 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -17,8 +17,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" @@ -49,7 +47,7 @@ func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) { ws = &WebDavServer{ option: option, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), Handler: &webdav.Handler{ FileSystem: fs, LockSystem: webdav.NewMemLS(), diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go index a898df7a0..e2b2d22cc 100644 --- a/weed/shell/command_fs_meta_notify.go +++ b/weed/shell/command_fs_meta_notify.go @@ -5,8 +5,6 @@ import ( "fmt" "io" - "github.com/spf13/viper" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -42,8 +40,8 @@ func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer i } util.LoadConfiguration("notification", true) - v := viper.GetViper() - notification.LoadConfiguration(v.Sub("notification")) + v := util.GetViper() + notification.LoadConfiguration(v, "notification.") ctx := context.Background() diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go index 6ea850543..6941ca5a1 100644 --- a/weed/storage/backend/backend.go +++ b/weed/storage/backend/backend.go @@ -35,7 +35,7 @@ type StringProperties interface { type StorageType string type BackendStorageFactory interface { StorageType() StorageType - BuildStorage(configuration StringProperties, id string) (BackendStorage, error) + BuildStorage(configuration StringProperties, configPrefix string, id string) (BackendStorage, error) } var ( @@ -48,19 +48,17 @@ func LoadConfiguration(config *viper.Viper) { StorageBackendPrefix := "storage.backend" - backendSub := config.Sub(StorageBackendPrefix) - for backendTypeName := range config.GetStringMap(StorageBackendPrefix) { backendStorageFactory, found := BackendStorageFactories[StorageType(backendTypeName)] if !found { glog.Fatalf("backend storage type %s not found", backendTypeName) } - backendTypeSub := backendSub.Sub(backendTypeName) - for backendStorageId := range backendSub.GetStringMap(backendTypeName) { - if !backendTypeSub.GetBool(backendStorageId + ".enabled") { + for backendStorageId := range config.GetStringMap(StorageBackendPrefix + "." + backendTypeName) { + if !config.GetBool(StorageBackendPrefix + "." + backendTypeName + "." + backendStorageId + ".enabled") { continue } - backendStorage, buildErr := backendStorageFactory.BuildStorage(backendTypeSub.Sub(backendStorageId), backendStorageId) + backendStorage, buildErr := backendStorageFactory.BuildStorage(config, + StorageBackendPrefix+"."+backendTypeName+"."+backendStorageId+".", backendStorageId) if buildErr != nil { glog.Fatalf("fail to create backend storage %s.%s", backendTypeName, backendStorageId) } @@ -82,7 +80,7 @@ func LoadFromPbStorageBackends(storageBackends []*master_pb.StorageBackend) { glog.Warningf("storage type %s not found", storageBackend.Type) continue } - backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), storageBackend.Id) + backendStorage, buildErr := backendStorageFactory.BuildStorage(newProperties(storageBackend.Properties), "", storageBackend.Id) if buildErr != nil { glog.Fatalf("fail to create backend storage %s.%s", storageBackend.Type, storageBackend.Id) } diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 9f03cfa81..8d71861c2 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -26,8 +26,8 @@ type S3BackendFactory struct { func (factory *S3BackendFactory) StorageType() backend.StorageType { return backend.StorageType("s3") } -func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, id string) (backend.BackendStorage, error) { - return newS3BackendStorage(configuration, id) +func (factory *S3BackendFactory) BuildStorage(configuration backend.StringProperties, configPrefix string, id string) (backend.BackendStorage, error) { + return newS3BackendStorage(configuration, configPrefix, id) } type S3BackendStorage struct { @@ -39,13 +39,13 @@ type S3BackendStorage struct { conn s3iface.S3API } -func newS3BackendStorage(configuration backend.StringProperties, id string) (s *S3BackendStorage, err error) { +func newS3BackendStorage(configuration backend.StringProperties, configPrefix string, id string) (s *S3BackendStorage, err error) { s = &S3BackendStorage{} s.id = id - s.aws_access_key_id = configuration.GetString("aws_access_key_id") - s.aws_secret_access_key = configuration.GetString("aws_secret_access_key") - s.region = configuration.GetString("region") - s.bucket = configuration.GetString("bucket") + s.aws_access_key_id = configuration.GetString(configPrefix + "aws_access_key_id") + s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key") + s.region = configuration.GetString(configPrefix + "region") + s.bucket = configuration.GetString(configPrefix + "bucket") s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region) glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 0ca9016c8..523b37e34 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -356,7 +356,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64) (err error) { var ( srcDatBackend, dstDatBackend backend.BackendStorageFile - dataFile *os.File + dataFile *os.File ) if dstDatBackend, err = createVolumeFile(dstDatName, preallocate, 0); err != nil { return diff --git a/weed/util/config.go b/weed/util/config.go index 7b86b749e..dfbfdbd82 100644 --- a/weed/util/config.go +++ b/weed/util/config.go @@ -1,8 +1,11 @@ package util import ( - "github.com/chrislusf/seaweedfs/weed/glog" + "strings" + "github.com/spf13/viper" + + "github.com/chrislusf/seaweedfs/weed/glog" ) type Configuration interface { @@ -37,3 +40,11 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { return true } + +func GetViper() *viper.Viper { + v := viper.GetViper() + v.AutomaticEnv() + v.SetEnvPrefix("weed") + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + return v +} diff --git a/weed/util/config_test.go b/weed/util/config_test.go new file mode 100644 index 000000000..659814a4a --- /dev/null +++ b/weed/util/config_test.go @@ -0,0 +1,24 @@ +package util + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAllKeysWithEnv(t *testing.T) { + + v := GetViper() + v.BindEnv("id") + v.BindEnv("foo", "foo") + + // bind and define environment variables (including a nested one) + os.Setenv("WEED_ID", "13") + os.Setenv("WEED_FOO_BAR", "baz") + + sub := v.Sub("foo") + + assert.Equal(t, "13", v.GetString("id")) + assert.Equal(t, "baz", sub.GetString("bar")) +} From 8925f3305d6dc14e9750bd8eab92d614eb984edb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 29 Jan 2020 09:11:07 -0800 Subject: [PATCH 0092/2432] adjust example --- weed/command/scaffold.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 524bf5e13..3aebff396 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -16,7 +16,7 @@ var cmdScaffold = &Command{ The options can also be overwritten by environment variables. For example, the filer.toml mysql password can be overwritten by environment variable - export weed.mysql.password=some_password + export WEED_MYSQL_PASSWORD=some_password Environment variable rules: * Prefix fix with "WEED_" * Upppercase the reset of variable name. From 2a870875901d0ce1269e644bbf02056d5d7e848f Mon Sep 17 00:00:00 2001 From: eryx67 Date: Thu, 30 Jan 2020 09:51:58 +0500 Subject: [PATCH 0093/2432] support wav files compression --- weed/util/compression.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/weed/util/compression.go b/weed/util/compression.go index c6c9423e2..6072df632 100644 --- a/weed/util/compression.go +++ b/weed/util/compression.go @@ -60,7 +60,7 @@ func UnGzipData(input []byte) ([]byte, error) { // images switch ext { - case ".svg", ".bmp": + case ".svg", ".bmp", ".wav": return true, true } if strings.HasPrefix(mtype, "image/") { @@ -87,6 +87,14 @@ func UnGzipData(input []byte) ([]byte, error) { if strings.HasSuffix(mtype, "script") { return true, true } + + } + + if strings.HasPrefix(mtype, "audio/") { + switch strings.TrimPrefix(mtype, "audio/") { + case "wave", "wav", "x-wav", "x-pn-wav": + return true, true + } } return false, false From 20b1fb9ab73866c03f2bbaf4a286d3cd7e46a541 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 29 Jan 2020 21:24:06 -0800 Subject: [PATCH 0094/2432] fix test --- weed/util/config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/config_test.go b/weed/util/config_test.go index 659814a4a..b14ec2fa9 100644 --- a/weed/util/config_test.go +++ b/weed/util/config_test.go @@ -20,5 +20,5 @@ func TestAllKeysWithEnv(t *testing.T) { sub := v.Sub("foo") assert.Equal(t, "13", v.GetString("id")) - assert.Equal(t, "baz", sub.GetString("bar")) + assert.Equal(t, nil, sub) } From abf90ad7b794b37672e68dd7b10689c5b3b41e29 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 29 Jan 2020 21:38:53 -0800 Subject: [PATCH 0095/2432] remove testing for viper --- weed/util/config_test.go | 24 ------------------------ 1 file changed, 24 deletions(-) delete mode 100644 weed/util/config_test.go diff --git a/weed/util/config_test.go b/weed/util/config_test.go deleted file mode 100644 index b14ec2fa9..000000000 --- a/weed/util/config_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package util - -import ( - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAllKeysWithEnv(t *testing.T) { - - v := GetViper() - v.BindEnv("id") - v.BindEnv("foo", "foo") - - // bind and define environment variables (including a nested one) - os.Setenv("WEED_ID", "13") - os.Setenv("WEED_FOO_BAR", "baz") - - sub := v.Sub("foo") - - assert.Equal(t, "13", v.GetString("id")) - assert.Equal(t, nil, sub) -} From a80ecbfe84486487c868c90fc28d14ab337524d3 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 31 Jan 2020 00:11:08 -0800 Subject: [PATCH 0096/2432] s3: add s3 copy fix https://github.com/chrislusf/seaweedfs/issues/1190 --- weed/s3api/s3api_errors.go | 14 +++ weed/s3api/s3api_object_copy_handlers.go | 152 +++++++++++++++++++++++ weed/s3api/s3api_object_handlers.go | 3 +- weed/s3api/s3api_server.go | 9 +- weed/s3api/s3api_test.go | 32 +++++ weed/util/http_util.go | 21 ++++ 6 files changed, 225 insertions(+), 6 deletions(-) create mode 100644 weed/s3api/s3api_object_copy_handlers.go create mode 100644 weed/s3api/s3api_test.go diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go index 7ba55ed28..96f8d9fd6 100644 --- a/weed/s3api/s3api_errors.go +++ b/weed/s3api/s3api_errors.go @@ -41,6 +41,8 @@ const ( ErrInvalidPartNumberMarker ErrInvalidPart ErrInternalError + ErrInvalidCopyDest + ErrInvalidCopySource ErrNotImplemented ) @@ -118,6 +120,18 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", HTTPStatusCode: http.StatusBadRequest, }, + + ErrInvalidCopyDest: { + Code: "InvalidRequest", + Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCopySource: { + Code: "InvalidArgument", + Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNotImplemented: { Code: "NotImplemented", Description: "A header you provided implies functionality that is not implemented", diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go new file mode 100644 index 000000000..374e7ec9f --- /dev/null +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -0,0 +1,152 @@ +package s3api + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/gorilla/mux" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { + + vars := mux.Vars(r) + dstBucket := vars["bucket"] + dstObject := getObject(vars) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + if srcBucket == dstBucket && srcObject == dstObject { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + dstUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", + s3a.option.Filer, s3a.option.BucketsPath, dstBucket, dstObject, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + _, _, dataReader, err := util.DownloadFile(srcUrl) + if err != nil { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + + println("srcUrl:", srcUrl) + println("dstUrl:", dstUrl) + + if errCode != ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyObjectResult{ + ETag: etag, + LastModified: time.Now(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} + +func pathToBucketAndObject(path string) (bucket, object string) { + path = strings.TrimPrefix(path, "/") + parts := strings.SplitN(path, "/", 2) + if len(parts) == 2 { + return parts[0], "/" + parts[1] + } + return parts[0], "/" +} + +type CopyPartResult struct { + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` +} + +func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) { + // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + vars := mux.Vars(r) + dstBucket := vars["bucket"] + // dstObject := getObject(vars) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + uploadID := r.URL.Query().Get("uploadId") + partIDString := r.URL.Query().Get("partNumber") + + partID, err := strconv.Atoi(partIDString) + if err != nil { + writeErrorResponse(w, ErrInvalidPart, r.URL) + return + } + + // check partID with maximum part ID for multipart objects + if partID > 10000 { + writeErrorResponse(w, ErrInvalidMaxParts, r.URL) + return + } + + rangeHeader := r.Header.Get("x-amz-copy-source-range") + + dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", + s3a.option.Filer, s3a.genUploadsFolder(dstBucket), uploadID, partID-1, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader) + if err != nil { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + + if errCode != ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyPartResult{ + ETag: etag, + LastModified: time.Now(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 44e93d297..8dc733eb9 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -9,9 +9,10 @@ import ( "net/http" "strings" + "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/server" - "github.com/gorilla/mux" ) var ( diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index edf634444..2233c8384 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -44,6 +44,8 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { // HeadBucket bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler) + // CopyObjectPart + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // PutObjectPart bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // CompleteMultipartUpload @@ -57,6 +59,8 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { // ListMultipartUploads bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "") + // CopyObject + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler) // PutObject bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler) // PutBucket @@ -77,11 +81,6 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { // DeleteMultipleObjects bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "") /* - // CopyObject - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler) - - // CopyObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // not implemented // GetBucketLocation diff --git a/weed/s3api/s3api_test.go b/weed/s3api/s3api_test.go new file mode 100644 index 000000000..026766beb --- /dev/null +++ b/weed/s3api/s3api_test.go @@ -0,0 +1,32 @@ +package s3api + +import ( + "testing" + "time" +) + +func TestCopyObjectResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + + response := CopyObjectResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} + +func TestCopyPartResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + + response := CopyPartResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 740d41967..08007a038 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -286,3 +286,24 @@ func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte } } + +func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, error) { + + req, err := http.NewRequest("GET", fileUrl, nil) + if err != nil { + return nil, err + } + if rangeHeader != "" { + req.Header.Add("Range", rangeHeader) + } + + r, err := client.Do(req) + if err != nil { + return nil, err + } + if r.StatusCode >= 400 { + return nil, fmt.Errorf("%s: %s", fileUrl, r.Status) + } + + return r.Body, nil +} From b2743afaee2b73c403688c798c4b4d1f30603d17 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 31 Jan 2020 00:21:18 -0800 Subject: [PATCH 0097/2432] remove println --- weed/s3api/s3api_object_copy_handlers.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go index 374e7ec9f..5e0fa5de1 100644 --- a/weed/s3api/s3api_object_copy_handlers.go +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -51,9 +51,6 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) - println("srcUrl:", srcUrl) - println("dstUrl:", dstUrl) - if errCode != ErrNone { writeErrorResponse(w, errCode, r.URL) return From beb0a1599ee5595b3f3314176cfbf66e63495cda Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 31 Jan 2020 00:32:09 -0800 Subject: [PATCH 0098/2432] 1.53 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 4e89291dd..3d61b2006 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 52) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 53) ) From fb19263a719b07d14f59cf8906c03a2a7d7ca3b8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 31 Jan 2020 00:59:48 -0800 Subject: [PATCH 0099/2432] fix build --- weed/filer2/tikv/tikv_store_unsupported.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/filer2/tikv/tikv_store_unsupported.go b/weed/filer2/tikv/tikv_store_unsupported.go index 36de2d974..daf29612e 100644 --- a/weed/filer2/tikv/tikv_store_unsupported.go +++ b/weed/filer2/tikv/tikv_store_unsupported.go @@ -21,7 +21,7 @@ func (store *TikvStore) GetName() string { return "tikv" } -func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) { +func (store *TikvStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { return fmt.Errorf("not implemented for 32 bit computers") } From 40ae533fa34ae6e40fa31d6007e533a23391a5d8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 2 Feb 2020 15:37:23 -0800 Subject: [PATCH 0100/2432] shell: add volume.configure.replication to change replication for a volume fix https://github.com/chrislusf/seaweedfs/issues/1192 --- weed/pb/volume_info.go | 15 +- weed/pb/volume_server.proto | 11 + weed/pb/volume_server_pb/volume_server.pb.go | 550 ++++++++++-------- weed/server/volume_grpc_admin.go | 37 ++ .../command_volume_configure_replication.go | 105 ++++ weed/storage/disk_location.go | 30 +- weed/storage/erasure_coding/ec_volume.go | 2 +- weed/storage/store.go | 28 + weed/storage/volume_super_block.go | 8 + weed/storage/volume_tier.go | 2 +- 10 files changed, 536 insertions(+), 252 deletions(-) create mode 100644 weed/shell/command_volume_configure_replication.go diff --git a/weed/pb/volume_info.go b/weed/pb/volume_info.go index b2edf9c5e..c4f733f5c 100644 --- a/weed/pb/volume_info.go +++ b/weed/pb/volume_info.go @@ -15,39 +15,40 @@ import ( ) // MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil -func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool) { +func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool, error) { volumeInfo := &volume_server_pb.VolumeInfo{} glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName) if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead { if !exists { - return volumeInfo, false + return volumeInfo, false, nil } if !canRead { glog.Warningf("can not read %s", fileName) + return volumeInfo, false, fmt.Errorf("can not read %s", fileName) } - return volumeInfo, false + return volumeInfo, false, nil } glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) tierData, readErr := ioutil.ReadFile(fileName) if readErr != nil { glog.Warningf("fail to read %s : %v", fileName, readErr) - return volumeInfo, false + return volumeInfo, false, fmt.Errorf("fail to read %s : %v", fileName, readErr) } glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { glog.Warningf("unmarshal error: %v", err) - return volumeInfo, false + return volumeInfo, false, fmt.Errorf("unmarshal error: %v", err) } if len(volumeInfo.GetFiles()) == 0 { - return volumeInfo, false + return volumeInfo, false, nil } - return volumeInfo, true + return volumeInfo, true, nil } func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) error { diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 9cf7272ef..405d41e9c 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -35,6 +35,8 @@ service VolumeServer { } rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) { } + rpc VolumeConfigure (VolumeConfigureRequest) returns (VolumeConfigureResponse) { + } // copy the .idx .dat files, and mount this volume rpc VolumeCopy (VolumeCopyRequest) returns (VolumeCopyResponse) { @@ -189,6 +191,14 @@ message VolumeMarkReadonlyRequest { message VolumeMarkReadonlyResponse { } +message VolumeConfigureRequest { + uint32 volume_id = 1; + string replication = 2; +} +message VolumeConfigureResponse { + string error = 1; +} + message VolumeCopyRequest { uint32 volume_id = 1; string collection = 2; @@ -355,6 +365,7 @@ message RemoteFile { message VolumeInfo { repeated RemoteFile files = 1; uint32 version = 2; + string replication = 3; } message VolumeTierMoveDatToRemoteRequest { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 1c2e10d8e..2a8f91bc5 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -37,6 +37,8 @@ It has these top-level messages: VolumeDeleteResponse VolumeMarkReadonlyRequest VolumeMarkReadonlyResponse + VolumeConfigureRequest + VolumeConfigureResponse VolumeCopyRequest VolumeCopyResponse CopyFileRequest @@ -602,6 +604,46 @@ func (m *VolumeMarkReadonlyResponse) String() string { return proto.C func (*VolumeMarkReadonlyResponse) ProtoMessage() {} func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +type VolumeConfigureRequest struct { + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` +} + +func (m *VolumeConfigureRequest) Reset() { *m = VolumeConfigureRequest{} } +func (m *VolumeConfigureRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeConfigureRequest) ProtoMessage() {} +func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *VolumeConfigureRequest) GetVolumeId() uint32 { + if m != nil { + return m.VolumeId + } + return 0 +} + +func (m *VolumeConfigureRequest) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + +type VolumeConfigureResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` +} + +func (m *VolumeConfigureResponse) Reset() { *m = VolumeConfigureResponse{} } +func (m *VolumeConfigureResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeConfigureResponse) ProtoMessage() {} +func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *VolumeConfigureResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type VolumeCopyRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` @@ -613,7 +655,7 @@ type VolumeCopyRequest struct { func (m *VolumeCopyRequest) Reset() { *m = VolumeCopyRequest{} } func (m *VolumeCopyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeCopyRequest) ProtoMessage() {} -func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } func (m *VolumeCopyRequest) GetVolumeId() uint32 { if m != nil { @@ -657,7 +699,7 @@ type VolumeCopyResponse struct { func (m *VolumeCopyResponse) Reset() { *m = VolumeCopyResponse{} } func (m *VolumeCopyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeCopyResponse) ProtoMessage() {} -func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 { if m != nil { @@ -679,7 +721,7 @@ type CopyFileRequest struct { func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} } func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) } func (*CopyFileRequest) ProtoMessage() {} -func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *CopyFileRequest) GetVolumeId() uint32 { if m != nil { @@ -737,7 +779,7 @@ type CopyFileResponse struct { func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} } func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) } func (*CopyFileResponse) ProtoMessage() {} -func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } func (m *CopyFileResponse) GetFileContent() []byte { if m != nil { @@ -755,7 +797,7 @@ type VolumeTailSenderRequest struct { func (m *VolumeTailSenderRequest) Reset() { *m = VolumeTailSenderRequest{} } func (m *VolumeTailSenderRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTailSenderRequest) ProtoMessage() {} -func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } func (m *VolumeTailSenderRequest) GetVolumeId() uint32 { if m != nil { @@ -787,7 +829,7 @@ type VolumeTailSenderResponse struct { func (m *VolumeTailSenderResponse) Reset() { *m = VolumeTailSenderResponse{} } func (m *VolumeTailSenderResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTailSenderResponse) ProtoMessage() {} -func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } func (m *VolumeTailSenderResponse) GetNeedleHeader() []byte { if m != nil { @@ -820,7 +862,7 @@ type VolumeTailReceiverRequest struct { func (m *VolumeTailReceiverRequest) Reset() { *m = VolumeTailReceiverRequest{} } func (m *VolumeTailReceiverRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTailReceiverRequest) ProtoMessage() {} -func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } func (m *VolumeTailReceiverRequest) GetVolumeId() uint32 { if m != nil { @@ -856,7 +898,7 @@ type VolumeTailReceiverResponse struct { func (m *VolumeTailReceiverResponse) Reset() { *m = VolumeTailReceiverResponse{} } func (m *VolumeTailReceiverResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTailReceiverResponse) ProtoMessage() {} -func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } type VolumeEcShardsGenerateRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -866,7 +908,7 @@ type VolumeEcShardsGenerateRequest struct { func (m *VolumeEcShardsGenerateRequest) Reset() { *m = VolumeEcShardsGenerateRequest{} } func (m *VolumeEcShardsGenerateRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} -func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } +func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } func (m *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { if m != nil { @@ -888,7 +930,7 @@ type VolumeEcShardsGenerateResponse struct { func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } type VolumeEcShardsRebuildRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -898,7 +940,7 @@ type VolumeEcShardsRebuildRequest struct { func (m *VolumeEcShardsRebuildRequest) Reset() { *m = VolumeEcShardsRebuildRequest{} } func (m *VolumeEcShardsRebuildRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} -func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } +func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } func (m *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { if m != nil { @@ -921,7 +963,7 @@ type VolumeEcShardsRebuildResponse struct { func (m *VolumeEcShardsRebuildResponse) Reset() { *m = VolumeEcShardsRebuildResponse{} } func (m *VolumeEcShardsRebuildResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} -func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } func (m *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { if m != nil { @@ -943,7 +985,7 @@ type VolumeEcShardsCopyRequest struct { func (m *VolumeEcShardsCopyRequest) Reset() { *m = VolumeEcShardsCopyRequest{} } func (m *VolumeEcShardsCopyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsCopyRequest) ProtoMessage() {} -func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } +func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } func (m *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { if m != nil { @@ -1000,7 +1042,7 @@ type VolumeEcShardsCopyResponse struct { func (m *VolumeEcShardsCopyResponse) Reset() { *m = VolumeEcShardsCopyResponse{} } func (m *VolumeEcShardsCopyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsCopyResponse) ProtoMessage() {} -func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } type VolumeEcShardsDeleteRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1011,7 +1053,7 @@ type VolumeEcShardsDeleteRequest struct { func (m *VolumeEcShardsDeleteRequest) Reset() { *m = VolumeEcShardsDeleteRequest{} } func (m *VolumeEcShardsDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} -func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } +func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } func (m *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -1040,7 +1082,7 @@ type VolumeEcShardsDeleteResponse struct { func (m *VolumeEcShardsDeleteResponse) Reset() { *m = VolumeEcShardsDeleteResponse{} } func (m *VolumeEcShardsDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} -func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } +func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } type VolumeEcShardsMountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1051,7 +1093,7 @@ type VolumeEcShardsMountRequest struct { func (m *VolumeEcShardsMountRequest) Reset() { *m = VolumeEcShardsMountRequest{} } func (m *VolumeEcShardsMountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsMountRequest) ProtoMessage() {} -func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } func (m *VolumeEcShardsMountRequest) GetVolumeId() uint32 { if m != nil { @@ -1080,7 +1122,7 @@ type VolumeEcShardsMountResponse struct { func (m *VolumeEcShardsMountResponse) Reset() { *m = VolumeEcShardsMountResponse{} } func (m *VolumeEcShardsMountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsMountResponse) ProtoMessage() {} -func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } +func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } type VolumeEcShardsUnmountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1090,7 +1132,7 @@ type VolumeEcShardsUnmountRequest struct { func (m *VolumeEcShardsUnmountRequest) Reset() { *m = VolumeEcShardsUnmountRequest{} } func (m *VolumeEcShardsUnmountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} -func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } +func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } func (m *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { if m != nil { @@ -1112,7 +1154,7 @@ type VolumeEcShardsUnmountResponse struct { func (m *VolumeEcShardsUnmountResponse) Reset() { *m = VolumeEcShardsUnmountResponse{} } func (m *VolumeEcShardsUnmountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} -func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } type VolumeEcShardReadRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1125,7 +1167,7 @@ type VolumeEcShardReadRequest struct { func (m *VolumeEcShardReadRequest) Reset() { *m = VolumeEcShardReadRequest{} } func (m *VolumeEcShardReadRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardReadRequest) ProtoMessage() {} -func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } func (m *VolumeEcShardReadRequest) GetVolumeId() uint32 { if m != nil { @@ -1170,7 +1212,7 @@ type VolumeEcShardReadResponse struct { func (m *VolumeEcShardReadResponse) Reset() { *m = VolumeEcShardReadResponse{} } func (m *VolumeEcShardReadResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardReadResponse) ProtoMessage() {} -func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } +func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } func (m *VolumeEcShardReadResponse) GetData() []byte { if m != nil { @@ -1196,7 +1238,7 @@ type VolumeEcBlobDeleteRequest struct { func (m *VolumeEcBlobDeleteRequest) Reset() { *m = VolumeEcBlobDeleteRequest{} } func (m *VolumeEcBlobDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} -func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } +func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } func (m *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -1232,7 +1274,7 @@ type VolumeEcBlobDeleteResponse struct { func (m *VolumeEcBlobDeleteResponse) Reset() { *m = VolumeEcBlobDeleteResponse{} } func (m *VolumeEcBlobDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} -func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } type VolumeEcShardsToVolumeRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1242,7 +1284,7 @@ type VolumeEcShardsToVolumeRequest struct { func (m *VolumeEcShardsToVolumeRequest) Reset() { *m = VolumeEcShardsToVolumeRequest{} } func (m *VolumeEcShardsToVolumeRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} -func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } +func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } func (m *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { if m != nil { @@ -1264,7 +1306,7 @@ type VolumeEcShardsToVolumeResponse struct { func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} -func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } type ReadVolumeFileStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1273,7 +1315,7 @@ type ReadVolumeFileStatusRequest struct { func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} } func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusRequest) ProtoMessage() {} -func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { if m != nil { @@ -1296,7 +1338,7 @@ type ReadVolumeFileStatusResponse struct { func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusResponse) ProtoMessage() {} -func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { if m != nil { @@ -1364,7 +1406,7 @@ type DiskStatus struct { func (m *DiskStatus) Reset() { *m = DiskStatus{} } func (m *DiskStatus) String() string { return proto.CompactTextString(m) } func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } +func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } func (m *DiskStatus) GetDir() string { if m != nil { @@ -1407,7 +1449,7 @@ type MemStatus struct { func (m *MemStatus) Reset() { *m = MemStatus{} } func (m *MemStatus) String() string { return proto.CompactTextString(m) } func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } func (m *MemStatus) GetGoroutines() int32 { if m != nil { @@ -1472,7 +1514,7 @@ type RemoteFile struct { func (m *RemoteFile) Reset() { *m = RemoteFile{} } func (m *RemoteFile) String() string { return proto.CompactTextString(m) } func (*RemoteFile) ProtoMessage() {} -func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } +func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} } func (m *RemoteFile) GetBackendType() string { if m != nil { @@ -1524,14 +1566,15 @@ func (m *RemoteFile) GetExtension() string { } type VolumeInfo struct { - Files []*RemoteFile `protobuf:"bytes,1,rep,name=files" json:"files,omitempty"` - Version uint32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` + Files []*RemoteFile `protobuf:"bytes,1,rep,name=files" json:"files,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` } func (m *VolumeInfo) Reset() { *m = VolumeInfo{} } func (m *VolumeInfo) String() string { return proto.CompactTextString(m) } func (*VolumeInfo) ProtoMessage() {} -func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } +func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} } func (m *VolumeInfo) GetFiles() []*RemoteFile { if m != nil { @@ -1547,6 +1590,13 @@ func (m *VolumeInfo) GetVersion() uint32 { return 0 } +func (m *VolumeInfo) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + type VolumeTierMoveDatToRemoteRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` @@ -1558,7 +1608,7 @@ func (m *VolumeTierMoveDatToRemoteRequest) Reset() { *m = VolumeTierMove func (m *VolumeTierMoveDatToRemoteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{60} + return fileDescriptor0, []int{62} } func (m *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { @@ -1598,7 +1648,7 @@ func (m *VolumeTierMoveDatToRemoteResponse) Reset() { *m = VolumeTierMov func (m *VolumeTierMoveDatToRemoteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{61} + return fileDescriptor0, []int{63} } func (m *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { @@ -1625,7 +1675,7 @@ func (m *VolumeTierMoveDatFromRemoteRequest) Reset() { *m = VolumeTierMo func (m *VolumeTierMoveDatFromRemoteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{62} + return fileDescriptor0, []int{64} } func (m *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { @@ -1658,7 +1708,7 @@ func (m *VolumeTierMoveDatFromRemoteResponse) Reset() { *m = VolumeTierM func (m *VolumeTierMoveDatFromRemoteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{63} + return fileDescriptor0, []int{65} } func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { @@ -1687,7 +1737,7 @@ type QueryRequest struct { func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (m *QueryRequest) String() string { return proto.CompactTextString(m) } func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64} } +func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66} } func (m *QueryRequest) GetSelections() []string { if m != nil { @@ -1733,7 +1783,7 @@ type QueryRequest_Filter struct { func (m *QueryRequest_Filter) Reset() { *m = QueryRequest_Filter{} } func (m *QueryRequest_Filter) String() string { return proto.CompactTextString(m) } func (*QueryRequest_Filter) ProtoMessage() {} -func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64, 0} } +func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66, 0} } func (m *QueryRequest_Filter) GetField() string { if m != nil { @@ -1768,7 +1818,7 @@ func (m *QueryRequest_InputSerialization) Reset() { *m = QueryRequest_In func (m *QueryRequest_InputSerialization) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization) ProtoMessage() {} func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1} + return fileDescriptor0, []int{66, 1} } func (m *QueryRequest_InputSerialization) GetCompressionType() string { @@ -1816,7 +1866,7 @@ func (m *QueryRequest_InputSerialization_CSVInput) Reset() { func (m *QueryRequest_InputSerialization_CSVInput) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1, 0} + return fileDescriptor0, []int{66, 1, 0} } func (m *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { @@ -1878,7 +1928,7 @@ func (m *QueryRequest_InputSerialization_JSONInput) Reset() { func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1, 1} + return fileDescriptor0, []int{66, 1, 1} } func (m *QueryRequest_InputSerialization_JSONInput) GetType() string { @@ -1899,7 +1949,7 @@ func (m *QueryRequest_InputSerialization_ParquetInput) String() string { } func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1, 2} + return fileDescriptor0, []int{66, 1, 2} } type QueryRequest_OutputSerialization struct { @@ -1911,7 +1961,7 @@ func (m *QueryRequest_OutputSerialization) Reset() { *m = QueryRequest_O func (m *QueryRequest_OutputSerialization) String() string { return proto.CompactTextString(m) } func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 2} + return fileDescriptor0, []int{66, 2} } func (m *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { @@ -1944,7 +1994,7 @@ func (m *QueryRequest_OutputSerialization_CSVOutput) String() string { } func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 2, 0} + return fileDescriptor0, []int{66, 2, 0} } func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { @@ -1994,7 +2044,7 @@ func (m *QueryRequest_OutputSerialization_JSONOutput) String() string { } func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 2, 1} + return fileDescriptor0, []int{66, 2, 1} } func (m *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { @@ -2011,7 +2061,7 @@ type QueriedStripe struct { func (m *QueriedStripe) Reset() { *m = QueriedStripe{} } func (m *QueriedStripe) String() string { return proto.CompactTextString(m) } func (*QueriedStripe) ProtoMessage() {} -func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{65} } +func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{67} } func (m *QueriedStripe) GetRecords() []byte { if m != nil { @@ -2049,6 +2099,8 @@ func init() { proto.RegisterType((*VolumeDeleteResponse)(nil), "volume_server_pb.VolumeDeleteResponse") proto.RegisterType((*VolumeMarkReadonlyRequest)(nil), "volume_server_pb.VolumeMarkReadonlyRequest") proto.RegisterType((*VolumeMarkReadonlyResponse)(nil), "volume_server_pb.VolumeMarkReadonlyResponse") + proto.RegisterType((*VolumeConfigureRequest)(nil), "volume_server_pb.VolumeConfigureRequest") + proto.RegisterType((*VolumeConfigureResponse)(nil), "volume_server_pb.VolumeConfigureResponse") proto.RegisterType((*VolumeCopyRequest)(nil), "volume_server_pb.VolumeCopyRequest") proto.RegisterType((*VolumeCopyResponse)(nil), "volume_server_pb.VolumeCopyResponse") proto.RegisterType((*CopyFileRequest)(nil), "volume_server_pb.CopyFileRequest") @@ -2122,6 +2174,7 @@ type VolumeServerClient interface { VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) + VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) @@ -2293,6 +2346,15 @@ func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeM return out, nil } +func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) { + out := new(VolumeConfigureResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) { out := new(VolumeCopyResponse) err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, c.cc, opts...) @@ -2601,6 +2663,7 @@ type VolumeServerServer interface { VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) + VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) @@ -2865,6 +2928,24 @@ func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeConfigureRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeConfigure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_VolumeCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeCopyRequest) if err := dec(in); err != nil { @@ -3241,6 +3322,10 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeMarkReadonly", Handler: _VolumeServer_VolumeMarkReadonly_Handler, }, + { + MethodName: "VolumeConfigure", + Handler: _VolumeServer_VolumeConfigure_Handler, + }, { MethodName: "VolumeCopy", Handler: _VolumeServer_VolumeCopy_Handler, @@ -3329,187 +3414,190 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 2905 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x3a, 0x4d, 0x73, 0xdc, 0xc6, - 0xb1, 0x5c, 0x2e, 0x3f, 0x76, 0x7b, 0x49, 0x91, 0x1a, 0xd2, 0xd4, 0x1a, 0xa2, 0x24, 0x1a, 0xf2, - 0x87, 0x24, 0x5b, 0x94, 0x4c, 0xdb, 0xcf, 0x7e, 0xf6, 0xb3, 0xdf, 0x93, 0x28, 0xe9, 0x45, 0xb1, - 0x45, 0xd9, 0xa0, 0xac, 0x38, 0xb6, 0x2b, 0xa8, 0x21, 0x30, 0x2b, 0xc2, 0x04, 0x30, 0x10, 0x30, - 0x4b, 0x6b, 0x55, 0xc9, 0xc9, 0x39, 0xa4, 0x2a, 0x95, 0x1c, 0x52, 0xb9, 0xe4, 0x9c, 0x7b, 0xae, - 0xf9, 0x03, 0x39, 0xf8, 0x0f, 0xa4, 0x2a, 0xa7, 0x5c, 0x72, 0xce, 0x21, 0xb7, 0x54, 0xe5, 0x92, - 0x9a, 0x2f, 0x2c, 0x3e, 0xb9, 0xa0, 0xc5, 0x54, 0x2a, 0xb7, 0x41, 0x4f, 0x7f, 0x4c, 0xf7, 0x74, - 0xf7, 0x4c, 0x4f, 0x03, 0x56, 0x0e, 0xa9, 0x3f, 0x0c, 0x88, 0x9d, 0x90, 0xf8, 0x90, 0xc4, 0x9b, - 0x51, 0x4c, 0x19, 0x45, 0xcb, 0x39, 0xa0, 0x1d, 0xed, 0x99, 0xd7, 0x00, 0xdd, 0xc4, 0xcc, 0xd9, - 0xbf, 0x45, 0x7c, 0xc2, 0x88, 0x45, 0x1e, 0x0f, 0x49, 0xc2, 0xd0, 0xf3, 0xd0, 0x19, 0x78, 0x3e, - 0xb1, 0x3d, 0x37, 0xe9, 0xb7, 0x36, 0xda, 0x97, 0xba, 0xd6, 0x3c, 0xff, 0xbe, 0xeb, 0x26, 0xe6, - 0x7d, 0x58, 0xc9, 0x11, 0x24, 0x11, 0x0d, 0x13, 0x82, 0xde, 0x81, 0xf9, 0x98, 0x24, 0x43, 0x9f, - 0x49, 0x82, 0xde, 0xd6, 0xf9, 0xcd, 0xa2, 0xac, 0xcd, 0x94, 0x64, 0xe8, 0x33, 0x4b, 0xa3, 0x9b, - 0xdf, 0xb4, 0x60, 0x21, 0x3b, 0x83, 0xce, 0xc0, 0xbc, 0x12, 0xde, 0x6f, 0x6d, 0xb4, 0x2e, 0x75, - 0xad, 0x39, 0x29, 0x1b, 0xad, 0xc1, 0x5c, 0xc2, 0x30, 0x1b, 0x26, 0xfd, 0xe9, 0x8d, 0xd6, 0xa5, - 0x59, 0x4b, 0x7d, 0xa1, 0x55, 0x98, 0x25, 0x71, 0x4c, 0xe3, 0x7e, 0x5b, 0xa0, 0xcb, 0x0f, 0x84, - 0x60, 0x26, 0xf1, 0x9e, 0x92, 0xfe, 0xcc, 0x46, 0xeb, 0xd2, 0xa2, 0x25, 0xc6, 0xa8, 0x0f, 0xf3, - 0x87, 0x24, 0x4e, 0x3c, 0x1a, 0xf6, 0x67, 0x05, 0x58, 0x7f, 0x9a, 0xf3, 0x30, 0x7b, 0x3b, 0x88, - 0xd8, 0xc8, 0x7c, 0x1b, 0xfa, 0x0f, 0xb1, 0x33, 0x1c, 0x06, 0x0f, 0xc5, 0xf2, 0xb7, 0xf7, 0x89, - 0x73, 0xa0, 0xcd, 0x72, 0x16, 0xba, 0x4a, 0x29, 0xb5, 0xb6, 0x45, 0xab, 0x23, 0x01, 0x77, 0x5d, - 0xf3, 0xff, 0xe0, 0xf9, 0x0a, 0x42, 0x65, 0x9e, 0x8b, 0xb0, 0xf8, 0x08, 0xc7, 0x7b, 0xf8, 0x11, - 0xb1, 0x63, 0xcc, 0x3c, 0x2a, 0xa8, 0x5b, 0xd6, 0x82, 0x02, 0x5a, 0x1c, 0x66, 0x7e, 0x01, 0x46, - 0x8e, 0x03, 0x0d, 0x22, 0xec, 0xb0, 0x26, 0xc2, 0xd1, 0x06, 0xf4, 0xa2, 0x98, 0x60, 0xdf, 0xa7, - 0x0e, 0x66, 0x44, 0xd8, 0xa7, 0x6d, 0x65, 0x41, 0xe6, 0x39, 0x38, 0x5b, 0xc9, 0x5c, 0x2e, 0xd0, - 0x7c, 0xa7, 0xb0, 0x7a, 0x1a, 0x04, 0x5e, 0x23, 0xd1, 0xe6, 0x7a, 0x69, 0xd5, 0x82, 0x52, 0xf1, - 0xfd, 0xef, 0xc2, 0xac, 0x4f, 0x70, 0x38, 0x8c, 0x1a, 0x31, 0x2e, 0xae, 0x58, 0x93, 0xa6, 0x9c, - 0xcf, 0x48, 0xb7, 0xd9, 0xa6, 0xbe, 0x4f, 0x1c, 0xe6, 0xd1, 0x50, 0xb3, 0x3d, 0x0f, 0xe0, 0xa4, - 0x40, 0xe5, 0x44, 0x19, 0x88, 0x69, 0x40, 0xbf, 0x4c, 0xaa, 0xd8, 0xfe, 0xb9, 0x05, 0xcf, 0xdd, - 0x50, 0x46, 0x93, 0x82, 0x1b, 0x6d, 0x40, 0x5e, 0xe4, 0x74, 0x51, 0x64, 0x71, 0x83, 0xda, 0xa5, - 0x0d, 0xe2, 0x18, 0x31, 0x89, 0x7c, 0xcf, 0xc1, 0x82, 0xc5, 0x8c, 0x60, 0x91, 0x05, 0xa1, 0x65, - 0x68, 0x33, 0xe6, 0x0b, 0xcf, 0xed, 0x5a, 0x7c, 0x88, 0xb6, 0x60, 0x2d, 0x20, 0x01, 0x8d, 0x47, - 0x76, 0x80, 0x23, 0x3b, 0xc0, 0x4f, 0x6c, 0xee, 0xe6, 0x76, 0xb0, 0xd7, 0x9f, 0x13, 0xeb, 0x43, - 0x72, 0xf6, 0x1e, 0x8e, 0xee, 0xe1, 0x27, 0xbb, 0xde, 0x53, 0x72, 0x6f, 0xcf, 0xec, 0xc3, 0x5a, - 0x51, 0x3f, 0xa5, 0xfa, 0x7f, 0xc1, 0x19, 0x09, 0xd9, 0x1d, 0x85, 0xce, 0xae, 0x88, 0xad, 0x46, - 0x1b, 0xf5, 0x8f, 0x16, 0xf4, 0xcb, 0x84, 0xca, 0xf3, 0x9f, 0xd5, 0x6a, 0xc7, 0xb6, 0xc9, 0x05, - 0xe8, 0x31, 0xec, 0xf9, 0x36, 0x1d, 0x0c, 0x12, 0xc2, 0x84, 0x21, 0x66, 0x2c, 0xe0, 0xa0, 0xfb, - 0x02, 0x82, 0x2e, 0xc3, 0xb2, 0x23, 0xbd, 0xdf, 0x8e, 0xc9, 0xa1, 0x27, 0xb2, 0xc1, 0xbc, 0x58, - 0xd8, 0x92, 0xa3, 0xa3, 0x42, 0x82, 0x91, 0x09, 0x8b, 0x9e, 0xfb, 0xc4, 0x16, 0xe9, 0x48, 0x24, - 0x93, 0x8e, 0xe0, 0xd6, 0xf3, 0xdc, 0x27, 0x77, 0x3c, 0x9f, 0x70, 0x8b, 0x9a, 0x0f, 0x61, 0x5d, - 0x2a, 0x7f, 0x37, 0x74, 0x62, 0x12, 0x90, 0x90, 0x61, 0x7f, 0x9b, 0x46, 0xa3, 0x46, 0x6e, 0xf3, - 0x3c, 0x74, 0x12, 0x2f, 0x74, 0x88, 0x1d, 0xca, 0xa4, 0x36, 0x63, 0xcd, 0x8b, 0xef, 0x9d, 0xc4, - 0xbc, 0x09, 0xe7, 0x6a, 0xf8, 0x2a, 0xcb, 0xbe, 0x00, 0x0b, 0x62, 0x61, 0x0e, 0x0d, 0x19, 0x09, - 0x99, 0xe0, 0xbd, 0x60, 0xf5, 0x38, 0x6c, 0x5b, 0x82, 0xcc, 0xd7, 0x01, 0x49, 0x1e, 0xf7, 0xe8, - 0x30, 0x6c, 0x16, 0xce, 0xcf, 0xc1, 0x4a, 0x8e, 0x44, 0xf9, 0xc6, 0x1b, 0xb0, 0x2a, 0xc1, 0x9f, - 0x86, 0x41, 0x63, 0x5e, 0x67, 0xe0, 0xb9, 0x02, 0x91, 0xe2, 0xb6, 0xa5, 0x85, 0xe4, 0x8f, 0x9d, - 0x23, 0x99, 0xad, 0xe9, 0x15, 0xe4, 0x4f, 0x1e, 0x91, 0xb9, 0xe4, 0x82, 0x71, 0x7c, 0x60, 0x11, - 0xec, 0xd2, 0xd0, 0x1f, 0x35, 0xce, 0x5c, 0x15, 0x94, 0x8a, 0xef, 0xef, 0x5a, 0x70, 0x5a, 0xa7, - 0xb4, 0x86, 0xbb, 0x79, 0x4c, 0x77, 0x6e, 0xd7, 0xba, 0xf3, 0xcc, 0xd8, 0x9d, 0x2f, 0xc1, 0x72, - 0x42, 0x87, 0xb1, 0x43, 0x6c, 0x17, 0x33, 0x6c, 0x87, 0xd4, 0x25, 0xca, 0xdb, 0x4f, 0x49, 0xf8, - 0x2d, 0xcc, 0xf0, 0x0e, 0x75, 0x89, 0xf9, 0xbf, 0x7a, 0xb3, 0x73, 0x5e, 0x72, 0x19, 0x4e, 0xfb, - 0x38, 0x61, 0x36, 0x8e, 0x22, 0x12, 0xba, 0x36, 0x66, 0xdc, 0xd5, 0x5a, 0xc2, 0xd5, 0x4e, 0xf1, - 0x89, 0x1b, 0x02, 0x7e, 0x83, 0xed, 0x24, 0xe6, 0xaf, 0xa7, 0x61, 0x89, 0xd3, 0x72, 0xd7, 0x6e, - 0xa4, 0xef, 0x32, 0xb4, 0xc9, 0x13, 0xa6, 0x14, 0xe5, 0x43, 0x74, 0x0d, 0x56, 0x54, 0x0c, 0x79, - 0x34, 0x1c, 0x87, 0x57, 0x5b, 0x66, 0xa3, 0xf1, 0x54, 0x1a, 0x61, 0x17, 0xa0, 0x97, 0x30, 0x1a, - 0xe9, 0x68, 0x9d, 0x91, 0xd1, 0xca, 0x41, 0x2a, 0x5a, 0xf3, 0x36, 0x9d, 0xad, 0xb0, 0xe9, 0x82, - 0x97, 0xd8, 0xc4, 0xb1, 0xe5, 0xaa, 0x44, 0xbc, 0x77, 0x2c, 0xf0, 0x92, 0xdb, 0x8e, 0xb4, 0x06, - 0xfa, 0x00, 0xd6, 0xbd, 0x47, 0x21, 0x8d, 0x89, 0xad, 0x0c, 0x29, 0xa2, 0x26, 0xa4, 0xcc, 0x1e, - 0xd0, 0x61, 0xe8, 0x8a, 0xd8, 0xef, 0x58, 0x7d, 0x89, 0xb3, 0x2b, 0x50, 0xb8, 0x05, 0x76, 0x28, - 0xbb, 0xc3, 0xe7, 0xcd, 0xb7, 0x60, 0x79, 0x6c, 0x95, 0xe6, 0xb1, 0xf7, 0x4d, 0x4b, 0xa7, 0xd3, - 0x07, 0xd8, 0xf3, 0x77, 0x49, 0xe8, 0x92, 0xf8, 0x19, 0x73, 0x02, 0xba, 0x0e, 0xab, 0x9e, 0xeb, - 0x13, 0x9b, 0x79, 0x01, 0xa1, 0x43, 0x66, 0x27, 0xc4, 0xa1, 0xa1, 0x9b, 0x68, 0xfb, 0xf2, 0xb9, - 0x07, 0x72, 0x6a, 0x57, 0xce, 0x98, 0x3f, 0x4d, 0x73, 0x73, 0x76, 0x15, 0xe3, 0x5b, 0x49, 0x48, - 0x08, 0x67, 0xb8, 0x4f, 0xb0, 0x4b, 0x62, 0xa5, 0xc6, 0x82, 0x04, 0x7e, 0x4f, 0xc0, 0xf8, 0x0e, - 0x29, 0xa4, 0x3d, 0xea, 0x8e, 0xc4, 0x8a, 0x16, 0x2c, 0x90, 0xa0, 0x9b, 0xd4, 0x1d, 0x89, 0x24, - 0x99, 0xd8, 0xc2, 0xc9, 0x9c, 0xfd, 0x61, 0x78, 0x20, 0x56, 0xd3, 0xb1, 0x7a, 0x5e, 0xf2, 0x11, - 0x4e, 0xd8, 0x36, 0x07, 0x99, 0xbf, 0x6f, 0xe9, 0x28, 0xe5, 0xcb, 0xb0, 0x88, 0x43, 0xbc, 0xc3, - 0x7f, 0x83, 0x39, 0x38, 0x85, 0x72, 0x82, 0xdc, 0xed, 0x54, 0x05, 0x1c, 0x92, 0x73, 0xea, 0x2c, - 0x13, 0x33, 0xe3, 0x24, 0x91, 0x5f, 0xb8, 0x4a, 0x12, 0x5f, 0xea, 0x24, 0x7d, 0xdb, 0xd9, 0xdd, - 0xc7, 0xb1, 0x9b, 0xfc, 0x3f, 0x09, 0x49, 0x8c, 0xd9, 0x89, 0x5c, 0x1a, 0xcc, 0x0d, 0x38, 0x5f, - 0xc7, 0x5d, 0xc9, 0xff, 0x42, 0x1f, 0x3e, 0x1a, 0xc3, 0x22, 0x7b, 0x43, 0xcf, 0x77, 0x4f, 0x44, - 0xfc, 0x87, 0x45, 0xe5, 0x52, 0xe6, 0xca, 0x7f, 0xae, 0xc0, 0xe9, 0x58, 0x80, 0x98, 0x9d, 0x70, - 0x84, 0xb4, 0x5e, 0x58, 0xb4, 0x96, 0xd4, 0x84, 0x20, 0xe4, 0x75, 0xc3, 0xcf, 0xa7, 0xb5, 0x07, - 0x68, 0x6e, 0x27, 0x96, 0x56, 0xcf, 0x42, 0x77, 0x2c, 0xbe, 0x2d, 0xc4, 0x77, 0x12, 0x25, 0x97, - 0x7b, 0xa7, 0x43, 0xa3, 0x91, 0x4d, 0x1c, 0x79, 0x8e, 0x8b, 0xad, 0xee, 0x58, 0x3d, 0x0e, 0xbc, - 0xed, 0x88, 0x63, 0xbc, 0x79, 0x8e, 0xcd, 0x70, 0xfb, 0x4a, 0x72, 0x9b, 0xcb, 0x72, 0xfb, 0x4a, - 0x70, 0xd3, 0x38, 0x87, 0xde, 0x40, 0xe2, 0xcc, 0x8f, 0x71, 0x1e, 0x7a, 0x03, 0x8e, 0x33, 0xf6, - 0xaa, 0xbc, 0x31, 0xd4, 0xae, 0x7e, 0x0d, 0x67, 0xf3, 0xb3, 0xcd, 0x8f, 0xc9, 0x67, 0x32, 0x96, - 0x79, 0xbe, 0xe8, 0x4e, 0x85, 0xb3, 0xf6, 0xb0, 0xb8, 0xec, 0xc6, 0xf7, 0x8a, 0x67, 0x5b, 0xd7, - 0xb9, 0xa2, 0x41, 0xf2, 0x97, 0x93, 0xcf, 0x8a, 0xcb, 0x3e, 0xc6, 0x25, 0xe5, 0x68, 0xc1, 0x17, - 0x8a, 0x21, 0x50, 0xbc, 0xc9, 0xfc, 0x26, 0xcd, 0xaf, 0x0a, 0x83, 0xdf, 0x23, 0x1a, 0xe7, 0x35, - 0x25, 0x57, 0x98, 0x63, 0xd1, 0x9a, 0x57, 0x62, 0x79, 0xa1, 0xab, 0xce, 0x43, 0x59, 0x27, 0xa8, - 0xaf, 0x5c, 0x49, 0xdb, 0x56, 0x25, 0xad, 0x2e, 0xd5, 0x0f, 0xc8, 0x48, 0xf8, 0xec, 0x8c, 0x2c, - 0xd5, 0x3f, 0x24, 0x23, 0x73, 0xa7, 0x10, 0x71, 0x72, 0x69, 0x2a, 0x76, 0x11, 0xcc, 0x70, 0x67, - 0x57, 0x29, 0x5f, 0x8c, 0xd1, 0x39, 0x00, 0x2f, 0xb1, 0x5d, 0xb1, 0xe7, 0x72, 0x51, 0x1d, 0xab, - 0xeb, 0x29, 0x27, 0x70, 0xcd, 0x5f, 0xb4, 0xc6, 0x0c, 0x6f, 0xfa, 0x74, 0xef, 0x04, 0xbd, 0x32, - 0xab, 0x45, 0x3b, 0xa7, 0x45, 0xb6, 0x66, 0x9f, 0xc9, 0xd7, 0xec, 0x99, 0x20, 0xca, 0x2e, 0xa7, - 0x2e, 0x35, 0x3f, 0xa0, 0x27, 0x57, 0xcf, 0x95, 0x53, 0xf3, 0x98, 0xbb, 0x92, 0xff, 0x2e, 0x9c, - 0xe5, 0x06, 0x97, 0x50, 0x51, 0x2d, 0x34, 0xaf, 0xa8, 0xfe, 0x3a, 0x0d, 0xeb, 0xd5, 0xc4, 0x4d, - 0xaa, 0xaa, 0xf7, 0xc0, 0x48, 0xab, 0x16, 0x7e, 0x34, 0x26, 0x0c, 0x07, 0x51, 0x7a, 0x38, 0xca, - 0x33, 0xf4, 0x8c, 0x2a, 0x61, 0x1e, 0xe8, 0x79, 0x7d, 0x42, 0x96, 0x4a, 0x9e, 0x76, 0xa9, 0xe4, - 0xe1, 0x02, 0x5c, 0xcc, 0xea, 0x04, 0xc8, 0x3b, 0xdc, 0x19, 0x17, 0xb3, 0x3a, 0x01, 0x29, 0xb1, - 0x10, 0x20, 0xbd, 0xb6, 0xa7, 0xf0, 0x85, 0x80, 0x73, 0x00, 0xea, 0x7a, 0x35, 0x0c, 0x75, 0x09, - 0xd7, 0x95, 0x97, 0xab, 0x61, 0x58, 0x7b, 0xcb, 0x9c, 0xaf, 0xbd, 0x65, 0xe6, 0x77, 0xb3, 0x53, - 0xda, 0xcd, 0xcf, 0x00, 0x6e, 0x79, 0xc9, 0x81, 0x34, 0x32, 0xbf, 0xd6, 0xba, 0x5e, 0xac, 0xde, - 0x0d, 0xf8, 0x90, 0x43, 0xb0, 0xef, 0x2b, 0xd3, 0xf1, 0x21, 0x0f, 0x9f, 0x61, 0x42, 0x5c, 0x65, - 0x1d, 0x31, 0xe6, 0xb0, 0x41, 0x4c, 0x88, 0x32, 0x80, 0x18, 0x9b, 0xbf, 0x6d, 0x41, 0xf7, 0x1e, - 0x09, 0x14, 0xe7, 0xf3, 0x00, 0x8f, 0x68, 0x4c, 0x87, 0xcc, 0x0b, 0x89, 0xbc, 0x85, 0xcf, 0x5a, - 0x19, 0xc8, 0x77, 0x97, 0x23, 0x52, 0x03, 0xf1, 0x07, 0xca, 0x98, 0x62, 0xcc, 0x61, 0xfb, 0x04, - 0x47, 0xca, 0x7e, 0x62, 0x8c, 0x56, 0x61, 0x36, 0x61, 0xd8, 0x39, 0x10, 0xc6, 0x9a, 0xb1, 0xe4, - 0x87, 0xf9, 0xa7, 0x16, 0x80, 0x45, 0x02, 0xca, 0x84, 0xaf, 0xf1, 0xdb, 0xed, 0x1e, 0x76, 0x0e, - 0x78, 0xbd, 0xc0, 0x46, 0x11, 0x51, 0x96, 0xe8, 0x29, 0xd8, 0x83, 0x51, 0x24, 0x76, 0x48, 0xa3, - 0xa8, 0xfc, 0xd5, 0xb5, 0xba, 0x0a, 0x22, 0x2b, 0x03, 0x1d, 0xca, 0x5d, 0x8b, 0x0f, 0x33, 0x39, - 0x4d, 0x2e, 0x5b, 0xe7, 0xb4, 0xb3, 0xd0, 0x2d, 0xba, 0x82, 0x48, 0x05, 0xc2, 0x0f, 0x2e, 0xc2, - 0x62, 0x40, 0x5d, 0x6f, 0xe0, 0x11, 0x57, 0x38, 0x9a, 0x52, 0x65, 0x41, 0x03, 0xb9, 0x73, 0xa1, - 0x75, 0xe8, 0x92, 0x27, 0x8c, 0x84, 0xa9, 0x0f, 0x74, 0xad, 0x31, 0xc0, 0xfc, 0x1c, 0x40, 0x97, - 0xd1, 0x03, 0x8a, 0xb6, 0x60, 0x96, 0x33, 0xd7, 0x8f, 0x94, 0xeb, 0xe5, 0x47, 0xca, 0xb1, 0x19, - 0x2c, 0x89, 0x9a, 0x4d, 0x40, 0xd3, 0xf9, 0x04, 0xf4, 0x6d, 0x0b, 0x36, 0xd4, 0xe5, 0xd0, 0x23, - 0xf1, 0x3d, 0x7a, 0xc8, 0x2f, 0x0a, 0x0f, 0xa8, 0x64, 0x71, 0x22, 0x79, 0xf1, 0x1d, 0xe8, 0xbb, - 0x24, 0x61, 0x5e, 0x28, 0xca, 0x43, 0x5b, 0x9b, 0x3c, 0xc4, 0x01, 0x51, 0xc6, 0x5d, 0xcb, 0xcc, - 0xdf, 0x94, 0xd3, 0x3b, 0x38, 0x20, 0xe8, 0x2a, 0xac, 0x1c, 0x10, 0x12, 0xd9, 0x3e, 0x75, 0xb0, - 0x6f, 0xeb, 0x88, 0x53, 0xb7, 0x9f, 0x65, 0x3e, 0xf5, 0x11, 0x9f, 0xb9, 0x25, 0xa3, 0xce, 0x4c, - 0xe0, 0x85, 0x23, 0x34, 0x51, 0x59, 0x67, 0x1d, 0xba, 0x51, 0x4c, 0x1d, 0x92, 0x70, 0x8f, 0x6c, - 0x89, 0x43, 0x68, 0x0c, 0x40, 0xd7, 0x61, 0x25, 0xfd, 0xf8, 0x98, 0xc4, 0x0e, 0x09, 0x19, 0x7e, - 0x24, 0xdf, 0x22, 0xa7, 0xad, 0xaa, 0x29, 0xf3, 0x57, 0x2d, 0x30, 0x4b, 0x52, 0xef, 0xc4, 0x34, - 0x38, 0x41, 0x0b, 0x5e, 0x83, 0x55, 0x61, 0x87, 0x58, 0xb0, 0x1c, 0x1b, 0x42, 0x16, 0x29, 0xa7, - 0xf9, 0x9c, 0x94, 0xa6, 0x2d, 0x31, 0x84, 0x8b, 0x47, 0xae, 0xe9, 0x5f, 0x64, 0x8b, 0xbf, 0x2f, - 0xc0, 0xc2, 0x27, 0x43, 0x12, 0x8f, 0x32, 0x8f, 0x98, 0x09, 0x51, 0x5a, 0xe8, 0x57, 0xf8, 0x0c, - 0x84, 0xe7, 0xd1, 0x41, 0x4c, 0x03, 0x3b, 0x7d, 0xa8, 0x9f, 0x16, 0x28, 0x3d, 0x0e, 0xbc, 0x23, - 0x1f, 0xeb, 0xd1, 0xfb, 0x30, 0x37, 0xf0, 0x7c, 0x46, 0xe4, 0xd3, 0x78, 0x6f, 0xeb, 0xa5, 0xb2, - 0xbf, 0x67, 0x65, 0x6e, 0xde, 0x11, 0xc8, 0x96, 0x22, 0x42, 0x7b, 0xb0, 0xe2, 0x85, 0x91, 0x28, - 0xac, 0x62, 0x0f, 0xfb, 0xde, 0xd3, 0xf1, 0x33, 0x5c, 0x6f, 0xeb, 0xf5, 0x09, 0xbc, 0xee, 0x72, - 0xca, 0xdd, 0x2c, 0xa1, 0x85, 0xbc, 0x12, 0x0c, 0x11, 0x58, 0xa5, 0x43, 0x56, 0x16, 0x32, 0x2b, - 0x84, 0x6c, 0x4d, 0x10, 0x72, 0x5f, 0x90, 0xe6, 0xa5, 0xac, 0xd0, 0x32, 0xd0, 0xd8, 0x81, 0x39, - 0xa9, 0x1c, 0xcf, 0x80, 0x03, 0x8f, 0xf8, 0xba, 0xb9, 0x20, 0x3f, 0x78, 0x90, 0xd3, 0x88, 0xc4, - 0x38, 0xd4, 0xc9, 0x4c, 0x7f, 0x72, 0xfc, 0x43, 0xec, 0x0f, 0x75, 0xbc, 0xc9, 0x0f, 0xe3, 0x8f, - 0xb3, 0x80, 0xca, 0x1a, 0xea, 0xb7, 0xc5, 0x98, 0x24, 0x3c, 0x41, 0x64, 0xb3, 0xe7, 0x52, 0x06, - 0x2e, 0x32, 0xe8, 0x0f, 0xa0, 0xeb, 0x24, 0x87, 0xb6, 0x30, 0x89, 0x90, 0xd9, 0xdb, 0x7a, 0xf7, - 0xd8, 0x26, 0xdd, 0xdc, 0xde, 0x7d, 0x28, 0xa0, 0x56, 0xc7, 0x49, 0x0e, 0xc5, 0x08, 0x7d, 0x0e, - 0xf0, 0x55, 0x42, 0x43, 0xc5, 0x59, 0x6e, 0xfc, 0x7b, 0xc7, 0xe7, 0xfc, 0xfd, 0xdd, 0xfb, 0x3b, - 0x92, 0x75, 0x97, 0xb3, 0x93, 0xbc, 0x1d, 0x58, 0x8c, 0x70, 0xfc, 0x78, 0x48, 0x98, 0x62, 0x2f, - 0x7d, 0xe1, 0x83, 0xe3, 0xb3, 0xff, 0x58, 0xb2, 0x91, 0x12, 0x16, 0xa2, 0xcc, 0x97, 0xf1, 0xed, - 0x34, 0x74, 0xb4, 0x5e, 0xbc, 0x36, 0x13, 0x1e, 0x2e, 0x5f, 0x28, 0x6c, 0x2f, 0x1c, 0x50, 0x65, - 0xd1, 0x53, 0x1c, 0x2e, 0x1f, 0x29, 0x44, 0x6e, 0xbf, 0x0c, 0xcb, 0x31, 0x71, 0x68, 0xec, 0xf2, - 0x1b, 0xac, 0x17, 0x78, 0xdc, 0xed, 0xe5, 0x5e, 0x2e, 0x49, 0xf8, 0x2d, 0x0d, 0x46, 0xaf, 0xc0, - 0x92, 0xd8, 0xf6, 0x0c, 0x66, 0x5b, 0xf3, 0x24, 0x7e, 0x06, 0xf1, 0x32, 0x2c, 0x3f, 0x1e, 0xf2, - 0xbc, 0xe1, 0xec, 0xe3, 0x18, 0x3b, 0x8c, 0xa6, 0x6f, 0x05, 0x4b, 0x02, 0xbe, 0x9d, 0x82, 0xd1, - 0x9b, 0xb0, 0x26, 0x51, 0x49, 0xe2, 0xe0, 0x28, 0xa5, 0x20, 0xb1, 0x2a, 0x25, 0x57, 0xc5, 0xec, - 0x6d, 0x31, 0xb9, 0xad, 0xe7, 0x90, 0x01, 0x1d, 0x87, 0x06, 0x01, 0x09, 0x59, 0x22, 0x0e, 0xb7, - 0xae, 0x95, 0x7e, 0xa3, 0x1b, 0x70, 0x0e, 0xfb, 0x3e, 0xfd, 0xda, 0x16, 0x94, 0xae, 0x5d, 0xd2, - 0x4e, 0x16, 0x96, 0x86, 0x40, 0xfa, 0x44, 0xe0, 0x58, 0x79, 0x45, 0x8d, 0x0b, 0xd0, 0x4d, 0xf7, - 0x91, 0xdf, 0x07, 0x32, 0x0e, 0x29, 0xc6, 0xc6, 0x29, 0x58, 0xc8, 0xee, 0x84, 0xf1, 0xb7, 0x36, - 0xac, 0x54, 0x04, 0x15, 0xfa, 0x02, 0x80, 0x7b, 0xab, 0x0c, 0x2d, 0xe5, 0xae, 0xff, 0x73, 0xfc, - 0xe0, 0xe4, 0xfe, 0x2a, 0xc1, 0x16, 0xf7, 0x7e, 0x39, 0x44, 0x3f, 0x82, 0x9e, 0xf0, 0x58, 0xc5, - 0x5d, 0xba, 0xec, 0xfb, 0xdf, 0x81, 0x3b, 0xd7, 0x55, 0xb1, 0x17, 0x31, 0x20, 0xc7, 0xc6, 0x5f, - 0x5a, 0xd0, 0x4d, 0x05, 0xf3, 0xdb, 0x8d, 0xdc, 0x28, 0xb1, 0xd7, 0x89, 0xbe, 0xdd, 0x08, 0xd8, - 0x1d, 0x01, 0xfa, 0x8f, 0x74, 0x25, 0xe3, 0x6d, 0x80, 0xb1, 0xfe, 0x95, 0x2a, 0xb4, 0x2a, 0x55, - 0x30, 0x2f, 0xc3, 0x22, 0xb7, 0xac, 0x47, 0xdc, 0x5d, 0x16, 0x7b, 0x91, 0x68, 0x93, 0x4a, 0x9c, - 0x44, 0x95, 0x87, 0xfa, 0x73, 0xeb, 0x0f, 0x06, 0x2c, 0x64, 0x9f, 0xc7, 0xd0, 0x97, 0xd0, 0xcb, - 0xb4, 0x83, 0xd1, 0x8b, 0xe5, 0x4d, 0x2b, 0xb7, 0x97, 0x8d, 0x97, 0x26, 0x60, 0xa9, 0x0a, 0x6a, - 0x0a, 0x85, 0x70, 0xba, 0xd4, 0x53, 0x45, 0x57, 0xca, 0xd4, 0x75, 0x1d, 0x5b, 0xe3, 0xd5, 0x46, - 0xb8, 0xa9, 0x3c, 0x06, 0x2b, 0x15, 0x4d, 0x52, 0xf4, 0xda, 0x04, 0x2e, 0xb9, 0x46, 0xad, 0x71, - 0xb5, 0x21, 0x76, 0x2a, 0xf5, 0x31, 0xa0, 0x72, 0x07, 0x15, 0xbd, 0x3a, 0x91, 0xcd, 0xb8, 0x43, - 0x6b, 0xbc, 0xd6, 0x0c, 0xb9, 0x56, 0x51, 0xd9, 0x5b, 0x9d, 0xa8, 0x68, 0xae, 0x7b, 0x3b, 0x51, - 0xd1, 0x42, 0xc3, 0x76, 0x0a, 0x1d, 0xc0, 0x72, 0xb1, 0xef, 0x8a, 0x2e, 0xd7, 0xfd, 0x27, 0x50, - 0x6a, 0xeb, 0x1a, 0x57, 0x9a, 0xa0, 0xa6, 0xc2, 0x08, 0x9c, 0xca, 0xf7, 0x39, 0xd1, 0x2b, 0x65, - 0xfa, 0xca, 0x4e, 0xaf, 0x71, 0x69, 0x32, 0x62, 0x56, 0xa7, 0x62, 0xef, 0xb3, 0x4a, 0xa7, 0x9a, - 0xc6, 0x6a, 0x95, 0x4e, 0x75, 0xad, 0x54, 0x73, 0x0a, 0xfd, 0x58, 0x37, 0xd4, 0x0a, 0x3d, 0x41, - 0xb4, 0x59, 0xc7, 0xa6, 0xba, 0x29, 0x69, 0x5c, 0x6b, 0x8c, 0xaf, 0x65, 0x5f, 0x6f, 0xf1, 0x58, - 0xcf, 0xb4, 0x06, 0xab, 0x62, 0xbd, 0xdc, 0x6c, 0xac, 0x8a, 0xf5, 0xaa, 0xfe, 0xe2, 0x14, 0xda, - 0x83, 0xc5, 0x5c, 0xb3, 0x10, 0xbd, 0x5c, 0x47, 0x99, 0x7f, 0xdd, 0x33, 0x5e, 0x99, 0x88, 0x97, - 0xca, 0xb0, 0x75, 0xf6, 0x52, 0xe9, 0xaa, 0x76, 0x71, 0xf9, 0x7c, 0xf5, 0xf2, 0x24, 0xb4, 0x5c, - 0x28, 0x97, 0x5a, 0x8a, 0x95, 0xa1, 0x5c, 0xd7, 0xb2, 0xac, 0x0c, 0xe5, 0xfa, 0x2e, 0xe5, 0x14, - 0xfa, 0xa1, 0x2e, 0x70, 0x85, 0x23, 0x5c, 0xac, 0xa3, 0xce, 0xee, 0xfe, 0x8b, 0x47, 0x23, 0xa5, - 0xac, 0xbf, 0x86, 0xd5, 0xaa, 0x57, 0x28, 0x74, 0xb5, 0xaa, 0x6c, 0xae, 0x7d, 0xea, 0x32, 0x36, - 0x9b, 0xa2, 0xa7, 0x82, 0x3f, 0x85, 0x8e, 0x6e, 0xb9, 0xa1, 0x17, 0xca, 0xd4, 0x85, 0x26, 0xa5, - 0x61, 0x1e, 0x85, 0x92, 0x71, 0xe0, 0x40, 0xc7, 0xea, 0xb8, 0x17, 0x56, 0x1f, 0xab, 0xa5, 0xae, - 0x5d, 0x7d, 0xac, 0x96, 0x5b, 0x6b, 0x42, 0x5c, 0xea, 0x0c, 0xd9, 0xd6, 0x51, 0xbd, 0x33, 0x54, - 0x74, 0xc6, 0xea, 0x9d, 0xa1, 0xb2, 0x1b, 0x35, 0x85, 0x7e, 0x02, 0x6b, 0xd5, 0x1d, 0x23, 0x54, - 0x1b, 0xf1, 0x35, 0x9d, 0x2b, 0xe3, 0x7a, 0x73, 0x82, 0x54, 0xfc, 0x53, 0x9d, 0x9f, 0x0a, 0x1d, - 0xa3, 0xfa, 0xfc, 0x54, 0xdd, 0xb7, 0x32, 0xae, 0x35, 0xc6, 0x2f, 0x87, 0x5e, 0xb6, 0xa5, 0x52, - 0x6f, 0xed, 0x8a, 0x2e, 0x54, 0xbd, 0xb5, 0x2b, 0xbb, 0x34, 0x22, 0x3e, 0xaa, 0xda, 0x25, 0x55, - 0xf1, 0x71, 0x44, 0x3f, 0xc7, 0xd8, 0x6c, 0x8a, 0x9e, 0x3b, 0xbe, 0xcb, 0xfd, 0x10, 0x34, 0x71, - 0xfd, 0xb9, 0xcc, 0x7c, 0xb5, 0x21, 0x76, 0xfd, 0xee, 0xea, 0x4c, 0x3d, 0x51, 0x81, 0x42, 0xc6, - 0xbe, 0xd6, 0x18, 0x3f, 0x95, 0x1d, 0xe9, 0x9f, 0x31, 0x32, 0xbd, 0x0c, 0x74, 0x65, 0x02, 0x9f, - 0x4c, 0x2f, 0xc6, 0x78, 0xb5, 0x11, 0x6e, 0x55, 0xf4, 0x66, 0xbb, 0x0b, 0x47, 0xf9, 0x53, 0xa9, - 0x25, 0x72, 0x94, 0x3f, 0x55, 0x34, 0x2c, 0x2a, 0xa2, 0x57, 0x37, 0x15, 0x26, 0x47, 0x6f, 0xa1, - 0xb9, 0x31, 0x39, 0x7a, 0x4b, 0xfd, 0x8a, 0x29, 0xf4, 0xb3, 0x71, 0x93, 0xbe, 0xfc, 0x08, 0x88, - 0xb6, 0x6a, 0x53, 0x51, 0xed, 0xdb, 0xa7, 0xf1, 0xc6, 0xb1, 0x68, 0x32, 0xc6, 0xff, 0x65, 0x4b, - 0x77, 0xfc, 0x2a, 0x5f, 0xe1, 0xd0, 0x9b, 0x0d, 0x18, 0x97, 0x1e, 0x12, 0x8d, 0xb7, 0x8e, 0x49, - 0x95, 0x59, 0xd0, 0x47, 0x30, 0x2b, 0xaa, 0x4f, 0x74, 0xfe, 0xe8, 0xb2, 0xd4, 0xb8, 0x50, 0x3d, - 0x9f, 0x16, 0x57, 0x9c, 0xdb, 0xde, 0x9c, 0xf8, 0x1d, 0xf7, 0x8d, 0x7f, 0x06, 0x00, 0x00, 0xff, - 0xff, 0x96, 0x31, 0x6f, 0x58, 0xa5, 0x2b, 0x00, 0x00, + // 2959 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x5a, 0x4b, 0x73, 0xdc, 0xc6, + 0x11, 0xe6, 0x72, 0xf9, 0xd8, 0xed, 0x5d, 0x8a, 0xd4, 0x90, 0xa6, 0xd6, 0x20, 0x25, 0xd1, 0x90, + 0x1f, 0xa4, 0x6c, 0x91, 0x32, 0x6d, 0xc7, 0x8e, 0x1d, 0x3b, 0x91, 0x28, 0x29, 0x51, 0x6c, 0x51, + 0x36, 0x28, 0xcb, 0x4e, 0xec, 0x0a, 0x0a, 0x04, 0x66, 0x45, 0x98, 0x00, 0x06, 0x02, 0x66, 0x69, + 0xae, 0xca, 0x39, 0x39, 0x87, 0x54, 0xa5, 0x92, 0x43, 0x2a, 0x97, 0x9c, 0x73, 0xf7, 0x35, 0x7f, + 0xc1, 0x7f, 0x20, 0x55, 0x39, 0xe5, 0x92, 0x73, 0x0e, 0xb9, 0xa5, 0x2a, 0x97, 0xd4, 0xbc, 0xb0, + 0x78, 0x72, 0x41, 0x8b, 0xa9, 0x54, 0x6e, 0x83, 0x9e, 0x9e, 0xee, 0x99, 0x9e, 0xee, 0x9e, 0xe9, + 0xf9, 0x00, 0x8b, 0x47, 0xc4, 0x1b, 0xf8, 0xd8, 0x8c, 0x71, 0x74, 0x84, 0xa3, 0xcd, 0x30, 0x22, + 0x94, 0xa0, 0x85, 0x0c, 0xd1, 0x0c, 0xf7, 0xf5, 0x2d, 0x40, 0x37, 0x2d, 0x6a, 0x1f, 0xdc, 0xc2, + 0x1e, 0xa6, 0xd8, 0xc0, 0x8f, 0x07, 0x38, 0xa6, 0xe8, 0x59, 0x68, 0xf5, 0x5d, 0x0f, 0x9b, 0xae, + 0x13, 0xf7, 0x1a, 0x6b, 0xcd, 0xf5, 0xb6, 0x31, 0xcb, 0xbe, 0xef, 0x3a, 0xb1, 0x7e, 0x1f, 0x16, + 0x33, 0x03, 0xe2, 0x90, 0x04, 0x31, 0x46, 0x6f, 0xc1, 0x6c, 0x84, 0xe3, 0x81, 0x47, 0xc5, 0x80, + 0xce, 0xf6, 0xa5, 0xcd, 0xbc, 0xae, 0xcd, 0x64, 0xc8, 0xc0, 0xa3, 0x86, 0x62, 0xd7, 0xbf, 0x6e, + 0x40, 0x37, 0xdd, 0x83, 0x2e, 0xc0, 0xac, 0x54, 0xde, 0x6b, 0xac, 0x35, 0xd6, 0xdb, 0xc6, 0x8c, + 0xd0, 0x8d, 0x96, 0x61, 0x26, 0xa6, 0x16, 0x1d, 0xc4, 0xbd, 0xc9, 0xb5, 0xc6, 0xfa, 0xb4, 0x21, + 0xbf, 0xd0, 0x12, 0x4c, 0xe3, 0x28, 0x22, 0x51, 0xaf, 0xc9, 0xd9, 0xc5, 0x07, 0x42, 0x30, 0x15, + 0xbb, 0x4f, 0x70, 0x6f, 0x6a, 0xad, 0xb1, 0x3e, 0x67, 0xf0, 0x36, 0xea, 0xc1, 0xec, 0x11, 0x8e, + 0x62, 0x97, 0x04, 0xbd, 0x69, 0x4e, 0x56, 0x9f, 0xfa, 0x2c, 0x4c, 0xdf, 0xf6, 0x43, 0x3a, 0xd4, + 0xdf, 0x84, 0xde, 0x43, 0xcb, 0x1e, 0x0c, 0xfc, 0x87, 0x7c, 0xfa, 0x3b, 0x07, 0xd8, 0x3e, 0x54, + 0x66, 0x59, 0x81, 0xb6, 0x5c, 0x94, 0x9c, 0xdb, 0x9c, 0xd1, 0x12, 0x84, 0xbb, 0x8e, 0xfe, 0x23, + 0x78, 0xb6, 0x64, 0xa0, 0x34, 0xcf, 0x15, 0x98, 0x7b, 0x64, 0x45, 0xfb, 0xd6, 0x23, 0x6c, 0x46, + 0x16, 0x75, 0x09, 0x1f, 0xdd, 0x30, 0xba, 0x92, 0x68, 0x30, 0x9a, 0xfe, 0x19, 0x68, 0x19, 0x09, + 0xc4, 0x0f, 0x2d, 0x9b, 0xd6, 0x51, 0x8e, 0xd6, 0xa0, 0x13, 0x46, 0xd8, 0xf2, 0x3c, 0x62, 0x5b, + 0x14, 0x73, 0xfb, 0x34, 0x8d, 0x34, 0x49, 0xbf, 0x08, 0x2b, 0xa5, 0xc2, 0xc5, 0x04, 0xf5, 0xb7, + 0x72, 0xb3, 0x27, 0xbe, 0xef, 0xd6, 0x52, 0xad, 0xaf, 0x16, 0x66, 0xcd, 0x47, 0x4a, 0xb9, 0xdf, + 0xcf, 0xf5, 0x7a, 0xd8, 0x0a, 0x06, 0x61, 0x2d, 0xc1, 0xf9, 0x19, 0xab, 0xa1, 0x89, 0xe4, 0x0b, + 0xc2, 0x6d, 0x76, 0x88, 0xe7, 0x61, 0x9b, 0xba, 0x24, 0x50, 0x62, 0x2f, 0x01, 0xd8, 0x09, 0x51, + 0x3a, 0x51, 0x8a, 0xa2, 0x6b, 0xd0, 0x2b, 0x0e, 0x95, 0x62, 0xff, 0xd6, 0x80, 0x67, 0x6e, 0x48, + 0xa3, 0x09, 0xc5, 0xb5, 0x36, 0x20, 0xab, 0x72, 0x32, 0xaf, 0x32, 0xbf, 0x41, 0xcd, 0xc2, 0x06, + 0x31, 0x8e, 0x08, 0x87, 0x9e, 0x6b, 0x5b, 0x5c, 0xc4, 0x14, 0x17, 0x91, 0x26, 0xa1, 0x05, 0x68, + 0x52, 0xea, 0x71, 0xcf, 0x6d, 0x1b, 0xac, 0x89, 0xb6, 0x61, 0xd9, 0xc7, 0x3e, 0x89, 0x86, 0xa6, + 0x6f, 0x85, 0xa6, 0x6f, 0x1d, 0x9b, 0xcc, 0xcd, 0x4d, 0x7f, 0xbf, 0x37, 0xc3, 0xe7, 0x87, 0x44, + 0xef, 0x3d, 0x2b, 0xbc, 0x67, 0x1d, 0xef, 0xb9, 0x4f, 0xf0, 0xbd, 0x7d, 0xbd, 0x07, 0xcb, 0xf9, + 0xf5, 0xc9, 0xa5, 0x7f, 0x0f, 0x2e, 0x08, 0xca, 0xde, 0x30, 0xb0, 0xf7, 0x78, 0x6c, 0xd5, 0xda, + 0xa8, 0x7f, 0x37, 0xa0, 0x57, 0x1c, 0x28, 0x3d, 0xff, 0x69, 0xad, 0x76, 0x6a, 0x9b, 0x5c, 0x86, + 0x0e, 0xb5, 0x5c, 0xcf, 0x24, 0xfd, 0x7e, 0x8c, 0x29, 0x37, 0xc4, 0x94, 0x01, 0x8c, 0x74, 0x9f, + 0x53, 0xd0, 0x06, 0x2c, 0xd8, 0xc2, 0xfb, 0xcd, 0x08, 0x1f, 0xb9, 0x3c, 0x1b, 0xcc, 0xf2, 0x89, + 0xcd, 0xdb, 0x2a, 0x2a, 0x04, 0x19, 0xe9, 0x30, 0xe7, 0x3a, 0xc7, 0x26, 0x4f, 0x47, 0x3c, 0x99, + 0xb4, 0xb8, 0xb4, 0x8e, 0xeb, 0x1c, 0xdf, 0x71, 0x3d, 0xcc, 0x2c, 0xaa, 0x3f, 0x84, 0x55, 0xb1, + 0xf8, 0xbb, 0x81, 0x1d, 0x61, 0x1f, 0x07, 0xd4, 0xf2, 0x76, 0x48, 0x38, 0xac, 0xe5, 0x36, 0xcf, + 0x42, 0x2b, 0x76, 0x03, 0x1b, 0x9b, 0x81, 0x48, 0x6a, 0x53, 0xc6, 0x2c, 0xff, 0xde, 0x8d, 0xf5, + 0x9b, 0x70, 0xb1, 0x42, 0xae, 0xb4, 0xec, 0x73, 0xd0, 0xe5, 0x13, 0xb3, 0x49, 0x40, 0x71, 0x40, + 0xb9, 0xec, 0xae, 0xd1, 0x61, 0xb4, 0x1d, 0x41, 0xd2, 0x5f, 0x05, 0x24, 0x64, 0xdc, 0x23, 0x83, + 0xa0, 0x5e, 0x38, 0x3f, 0x03, 0x8b, 0x99, 0x21, 0xd2, 0x37, 0x5e, 0x83, 0x25, 0x41, 0xfe, 0x38, + 0xf0, 0x6b, 0xcb, 0xba, 0x00, 0xcf, 0xe4, 0x06, 0x49, 0x69, 0xdb, 0x4a, 0x49, 0xf6, 0xd8, 0x39, + 0x51, 0xd8, 0xb2, 0x9a, 0x41, 0xf6, 0xe4, 0xe1, 0x99, 0x4b, 0x4c, 0xd8, 0x8a, 0x0e, 0x0d, 0x6c, + 0x39, 0x24, 0xf0, 0x86, 0xb5, 0x33, 0x57, 0xc9, 0x48, 0x29, 0xf7, 0x13, 0x58, 0x56, 0x19, 0x2d, + 0xe8, 0xbb, 0x8f, 0x06, 0x11, 0xae, 0x9b, 0x89, 0xd3, 0x2e, 0x3b, 0x59, 0x70, 0x59, 0x7d, 0x4b, + 0x85, 0x59, 0x4a, 0xb0, 0xdc, 0xd2, 0xe4, 0x24, 0x6b, 0xa4, 0x4e, 0x32, 0xfd, 0x9b, 0x06, 0x9c, + 0x57, 0x23, 0x6a, 0xfa, 0xd5, 0x29, 0x03, 0xab, 0x59, 0x19, 0x58, 0x53, 0xa3, 0xc0, 0x5a, 0x87, + 0x85, 0x98, 0x0c, 0x22, 0x1b, 0x9b, 0x8e, 0x45, 0x2d, 0x33, 0x20, 0x0e, 0x96, 0x71, 0x77, 0x4e, + 0xd0, 0x6f, 0x59, 0xd4, 0xda, 0x25, 0x0e, 0xd6, 0x7f, 0xa8, 0xdc, 0x2e, 0xe3, 0xaf, 0x1b, 0x70, + 0xde, 0xb3, 0x62, 0x6a, 0x5a, 0x61, 0x88, 0x03, 0xc7, 0xb4, 0x28, 0x73, 0xfa, 0x06, 0x77, 0xfa, + 0x73, 0xac, 0xe3, 0x06, 0xa7, 0xdf, 0xa0, 0xbb, 0xb1, 0xfe, 0x87, 0x49, 0x98, 0x67, 0x63, 0x59, + 0x90, 0xd5, 0x5a, 0xef, 0x02, 0x34, 0xf1, 0x31, 0x95, 0x0b, 0x65, 0x4d, 0xb4, 0x05, 0x8b, 0x32, + 0x9a, 0x5d, 0x12, 0x8c, 0x02, 0xbd, 0x29, 0xf2, 0xe2, 0xa8, 0x2b, 0x89, 0xf5, 0xcb, 0xd0, 0x89, + 0x29, 0x09, 0x55, 0xde, 0x98, 0x12, 0x79, 0x83, 0x91, 0x64, 0xde, 0xc8, 0xda, 0x74, 0xba, 0xc4, + 0xa6, 0x5d, 0x37, 0x36, 0xb1, 0x6d, 0x8a, 0x59, 0xf1, 0xcc, 0xd3, 0x32, 0xc0, 0x8d, 0x6f, 0xdb, + 0xc2, 0x1a, 0xe8, 0x3d, 0x58, 0x75, 0x1f, 0x05, 0x24, 0xc2, 0xa6, 0x34, 0x24, 0x8f, 0xdf, 0x80, + 0x50, 0xb3, 0x4f, 0x06, 0x81, 0xc3, 0xb3, 0x50, 0xcb, 0xe8, 0x09, 0x9e, 0x3d, 0xce, 0xc2, 0x2c, + 0xb0, 0x4b, 0xe8, 0x1d, 0xd6, 0xaf, 0xbf, 0x01, 0x0b, 0x23, 0xab, 0xd4, 0xcf, 0x02, 0x5f, 0x37, + 0x94, 0xc7, 0x3d, 0xb0, 0x5c, 0x6f, 0x0f, 0x07, 0x0e, 0x8e, 0x9e, 0x32, 0x3b, 0xa1, 0xeb, 0xb0, + 0xe4, 0x3a, 0x1e, 0x36, 0xa9, 0xeb, 0x63, 0x32, 0xa0, 0x66, 0x8c, 0x6d, 0x12, 0x38, 0xb1, 0xb2, + 0x2f, 0xeb, 0x7b, 0x20, 0xba, 0xf6, 0x44, 0x8f, 0xfe, 0xab, 0xe4, 0x94, 0x48, 0xcf, 0x62, 0x74, + 0x3f, 0x0a, 0x30, 0x66, 0x02, 0x0f, 0xb0, 0xe5, 0xe0, 0x48, 0x2e, 0xa3, 0x2b, 0x88, 0x3f, 0xe1, + 0x34, 0xb6, 0x43, 0x92, 0x69, 0x9f, 0x38, 0x43, 0x3e, 0xa3, 0xae, 0x01, 0x82, 0x74, 0x93, 0x38, + 0x43, 0x9e, 0xae, 0x63, 0x93, 0x3b, 0x99, 0x7d, 0x30, 0x08, 0x0e, 0xf9, 0x6c, 0x5a, 0x46, 0xc7, + 0x8d, 0x3f, 0xb0, 0x62, 0xba, 0xc3, 0x48, 0xfa, 0x9f, 0x1b, 0x2a, 0x5f, 0xb0, 0x69, 0x18, 0xd8, + 0xc6, 0xee, 0xd1, 0xff, 0xc0, 0x1c, 0x6c, 0x84, 0x74, 0x82, 0xcc, 0x3d, 0x59, 0x06, 0x1c, 0x12, + 0x7d, 0xf2, 0x54, 0xe5, 0x3d, 0xa3, 0x74, 0x95, 0x9d, 0xb8, 0x4c, 0x57, 0x9f, 0xab, 0xe3, 0xe2, + 0xb6, 0xbd, 0x77, 0x60, 0x45, 0x4e, 0xfc, 0x63, 0x1c, 0xe0, 0xc8, 0xa2, 0x67, 0x72, 0x7d, 0xd1, + 0xd7, 0xe0, 0x52, 0x95, 0x74, 0xa9, 0xff, 0x33, 0x75, 0x0c, 0x2a, 0x0e, 0x03, 0xef, 0x0f, 0x5c, + 0xcf, 0x39, 0x13, 0xf5, 0xef, 0xe7, 0x17, 0x97, 0x08, 0x97, 0xfe, 0x73, 0x15, 0xce, 0x47, 0x9c, + 0x44, 0xcd, 0x98, 0x31, 0x24, 0x95, 0xcb, 0x9c, 0x31, 0x2f, 0x3b, 0xf8, 0x40, 0x56, 0xc1, 0xfc, + 0x66, 0x52, 0x79, 0x80, 0x92, 0x76, 0x66, 0x69, 0x75, 0x05, 0xda, 0x23, 0xf5, 0x4d, 0xae, 0xbe, + 0x15, 0x4b, 0xbd, 0xcc, 0x3b, 0x6d, 0x12, 0x0e, 0x4d, 0x6c, 0x8b, 0x1b, 0x05, 0xdf, 0xea, 0x96, + 0xd1, 0x61, 0xc4, 0xdb, 0x36, 0xbf, 0x50, 0xd4, 0xcf, 0xb1, 0x29, 0x69, 0x5f, 0x08, 0x69, 0x33, + 0x69, 0x69, 0x5f, 0x70, 0x69, 0x8a, 0xe7, 0xc8, 0xed, 0x0b, 0x9e, 0xd9, 0x11, 0xcf, 0x43, 0xb7, + 0xcf, 0x78, 0x46, 0x5e, 0x95, 0x35, 0x86, 0xdc, 0xd5, 0x2f, 0x61, 0x25, 0xdb, 0x5b, 0xff, 0xc0, + 0x7e, 0x2a, 0x63, 0xe9, 0x97, 0xf2, 0xee, 0x94, 0x3b, 0xf5, 0x8f, 0xf2, 0xd3, 0xae, 0x7d, 0xc3, + 0x79, 0xba, 0x79, 0x5d, 0xcc, 0x1b, 0x24, 0x7b, 0x4d, 0xfa, 0x34, 0x3f, 0xed, 0x53, 0x5c, 0x97, + 0x4e, 0x56, 0x7c, 0x39, 0x1f, 0x02, 0xf9, 0x3b, 0xd5, 0x1f, 0x93, 0xfc, 0x2a, 0x39, 0xd8, 0x8d, + 0xa6, 0x76, 0x5e, 0x93, 0x7a, 0xb9, 0x39, 0xe6, 0x8c, 0x59, 0xa9, 0x96, 0x95, 0xdc, 0xf2, 0x3c, + 0x14, 0x15, 0x8b, 0xfc, 0xca, 0x14, 0xd7, 0x4d, 0x59, 0x5c, 0xab, 0x47, 0x83, 0x43, 0x3c, 0xe4, + 0x3e, 0x3b, 0x25, 0x1e, 0x0d, 0xde, 0xc7, 0x43, 0x7d, 0x37, 0x17, 0x71, 0x62, 0x6a, 0x32, 0x76, + 0x11, 0x4c, 0x31, 0x67, 0x97, 0x29, 0x9f, 0xb7, 0xd1, 0x45, 0x00, 0x37, 0x36, 0x1d, 0xbe, 0xe7, + 0x62, 0x52, 0x2d, 0xa3, 0xed, 0x4a, 0x27, 0x70, 0xf4, 0xdf, 0x36, 0x46, 0x02, 0x6f, 0x7a, 0x64, + 0xff, 0x0c, 0xbd, 0x32, 0xbd, 0x8a, 0x66, 0x66, 0x15, 0xe9, 0xd7, 0x83, 0xa9, 0xec, 0xeb, 0x41, + 0x2a, 0x88, 0xd2, 0xd3, 0xa9, 0x4a, 0xcd, 0x0f, 0xc8, 0xd9, 0x55, 0x96, 0xc5, 0xd4, 0x3c, 0x92, + 0x2e, 0xf5, 0xbf, 0x0d, 0x2b, 0xcc, 0xe0, 0x82, 0xca, 0xeb, 0x96, 0xfa, 0xb5, 0xdd, 0x3f, 0x26, + 0x61, 0xb5, 0x7c, 0x70, 0x9d, 0xfa, 0xee, 0x1d, 0xd0, 0x92, 0xfa, 0x89, 0x1d, 0x8d, 0x31, 0xb5, + 0xfc, 0x30, 0x39, 0x1c, 0xc5, 0x19, 0x7a, 0x41, 0x16, 0x53, 0x0f, 0x54, 0xbf, 0x3a, 0x21, 0x0b, + 0xc5, 0x57, 0xb3, 0x50, 0x7c, 0x31, 0x05, 0x8e, 0x45, 0xab, 0x14, 0x88, 0x3b, 0xdc, 0x05, 0xc7, + 0xa2, 0x55, 0x0a, 0x92, 0xc1, 0x5c, 0x81, 0xf0, 0xda, 0x8e, 0xe4, 0xe7, 0x0a, 0x2e, 0x02, 0xc8, + 0xeb, 0xd5, 0x20, 0x50, 0xc5, 0x64, 0x5b, 0x5c, 0xae, 0x06, 0x41, 0xe5, 0x2d, 0x73, 0xb6, 0xf2, + 0x96, 0x99, 0xdd, 0xcd, 0x56, 0x61, 0x37, 0x3f, 0x05, 0xb8, 0xe5, 0xc6, 0x87, 0xc2, 0xc8, 0xec, + 0x5a, 0xeb, 0xb8, 0xaa, 0x1a, 0x60, 0x4d, 0x46, 0xb1, 0x3c, 0x4f, 0x9a, 0x8e, 0x35, 0x59, 0xf8, + 0x0c, 0x62, 0xec, 0x48, 0xeb, 0xf0, 0x36, 0xa3, 0xf5, 0x23, 0x8c, 0xa5, 0x01, 0x78, 0x5b, 0xff, + 0x53, 0x03, 0xda, 0xf7, 0xb0, 0x2f, 0x25, 0x5f, 0x02, 0x78, 0x44, 0x22, 0x32, 0xa0, 0x6e, 0x80, + 0xc5, 0x2d, 0x7c, 0xda, 0x48, 0x51, 0xbe, 0xbb, 0x1e, 0x9e, 0x1a, 0xb0, 0xd7, 0x97, 0xc6, 0xe4, + 0x6d, 0x46, 0x3b, 0xc0, 0x56, 0x28, 0xed, 0xc7, 0xdb, 0xac, 0xd6, 0x89, 0xa9, 0x65, 0x1f, 0x72, + 0x63, 0x4d, 0x19, 0xe2, 0x43, 0xff, 0x6b, 0x03, 0xc0, 0xc0, 0x3e, 0xa1, 0xdc, 0xd7, 0xd8, 0xed, + 0x76, 0xdf, 0xb2, 0x0f, 0x59, 0xbd, 0x40, 0x87, 0x21, 0x96, 0x96, 0xe8, 0x48, 0xda, 0x83, 0x61, + 0xc8, 0x77, 0x48, 0xb1, 0xc8, 0xfc, 0xd5, 0x36, 0xda, 0x92, 0x22, 0x2a, 0x03, 0x15, 0xca, 0x6d, + 0x83, 0x35, 0x53, 0x39, 0x4d, 0x4c, 0x5b, 0xe5, 0xb4, 0x15, 0x68, 0xe7, 0x5d, 0x81, 0xa7, 0x02, + 0xee, 0x07, 0x57, 0x60, 0xce, 0x27, 0x8e, 0xdb, 0x77, 0xb1, 0xc3, 0x1d, 0x4d, 0x2e, 0xa5, 0xab, + 0x88, 0xcc, 0xb9, 0xd0, 0x2a, 0xb4, 0xf1, 0x31, 0xc5, 0x41, 0xe2, 0x03, 0x6d, 0x63, 0x44, 0xd0, + 0xbf, 0x02, 0x50, 0x05, 0x7d, 0x9f, 0xa0, 0x6d, 0x98, 0x66, 0xc2, 0xd5, 0x73, 0xe9, 0x6a, 0xf1, + 0xb9, 0x74, 0x64, 0x06, 0x43, 0xb0, 0xa6, 0x13, 0xd0, 0x64, 0x26, 0x01, 0x8d, 0xaf, 0xe7, 0xf4, + 0x6f, 0x1b, 0xb0, 0x26, 0xaf, 0x8f, 0x2e, 0x8e, 0xee, 0x91, 0x23, 0x76, 0x95, 0x78, 0x40, 0x84, + 0x92, 0x33, 0xc9, 0x9c, 0x6f, 0x41, 0xcf, 0xc1, 0x31, 0x75, 0x03, 0xae, 0xd0, 0x54, 0x9b, 0x12, + 0x58, 0x3e, 0x96, 0x13, 0x5a, 0x4e, 0xf5, 0xdf, 0x14, 0xdd, 0xbb, 0x96, 0x8f, 0xd1, 0x35, 0x58, + 0x3c, 0xc4, 0x38, 0x34, 0x3d, 0x62, 0x5b, 0x9e, 0xa9, 0x62, 0x52, 0xde, 0x8f, 0x16, 0x58, 0xd7, + 0x07, 0xac, 0xe7, 0x96, 0x88, 0x4b, 0x3d, 0x86, 0xe7, 0x4e, 0x58, 0x89, 0xcc, 0x4b, 0xab, 0xd0, + 0x0e, 0x23, 0x62, 0xe3, 0x98, 0xf9, 0x6c, 0x83, 0x1f, 0x53, 0x23, 0x02, 0xba, 0x0e, 0x8b, 0xc9, + 0xc7, 0x87, 0x38, 0xb2, 0x71, 0x40, 0xad, 0x47, 0xe2, 0xdd, 0x74, 0xd2, 0x28, 0xeb, 0xd2, 0x7f, + 0xdf, 0x00, 0xbd, 0xa0, 0xf5, 0x4e, 0x44, 0xfc, 0x33, 0xb4, 0xe0, 0x16, 0x2c, 0x71, 0x3b, 0x44, + 0x5c, 0xe4, 0xc8, 0x10, 0xa2, 0x8c, 0x39, 0xcf, 0xfa, 0x84, 0x36, 0x65, 0x89, 0x01, 0x5c, 0x39, + 0x71, 0x4e, 0xff, 0x25, 0x5b, 0xfc, 0xab, 0x0b, 0xdd, 0x8f, 0x06, 0x38, 0x1a, 0xa6, 0x1e, 0x5c, + 0x63, 0x2c, 0x57, 0xa1, 0x10, 0x83, 0x14, 0x85, 0x65, 0xda, 0x7e, 0x44, 0x7c, 0x33, 0x01, 0x15, + 0x26, 0x39, 0x4b, 0x87, 0x11, 0xef, 0x08, 0x60, 0x01, 0xbd, 0x0b, 0x33, 0x7d, 0xd7, 0xa3, 0x58, + 0x3c, 0xe3, 0x77, 0xb6, 0x5f, 0x28, 0x46, 0x44, 0x5a, 0xe7, 0xe6, 0x1d, 0xce, 0x6c, 0xc8, 0x41, + 0x68, 0x1f, 0x16, 0xdd, 0x20, 0xe4, 0xa5, 0x57, 0xe4, 0x5a, 0x9e, 0xfb, 0x64, 0xf4, 0x64, 0xd8, + 0xd9, 0x7e, 0x75, 0x8c, 0xac, 0xbb, 0x6c, 0xe4, 0x5e, 0x7a, 0xa0, 0x81, 0xdc, 0x02, 0x0d, 0x61, + 0x58, 0x22, 0x03, 0x5a, 0x54, 0x32, 0xcd, 0x95, 0x6c, 0x8f, 0x51, 0x72, 0x9f, 0x0f, 0xcd, 0x6a, + 0x59, 0x24, 0x45, 0xa2, 0xb6, 0x0b, 0x33, 0x62, 0x71, 0x2c, 0x47, 0xf6, 0x5d, 0xec, 0x29, 0x20, + 0x44, 0x7c, 0xb0, 0x34, 0x40, 0x42, 0x1c, 0x59, 0x81, 0x4a, 0x77, 0xea, 0x93, 0xf1, 0x1f, 0x59, + 0xde, 0x40, 0xc5, 0x9b, 0xf8, 0xd0, 0xfe, 0x32, 0x0d, 0xa8, 0xb8, 0x42, 0xf5, 0x0e, 0x1a, 0xe1, + 0x98, 0xa5, 0x90, 0x74, 0x7e, 0x9d, 0x4f, 0xd1, 0x79, 0x8e, 0xfd, 0x04, 0xda, 0x76, 0x7c, 0x64, + 0x72, 0x93, 0x70, 0x9d, 0x9d, 0xed, 0xb7, 0x4f, 0x6d, 0xd2, 0xcd, 0x9d, 0xbd, 0x87, 0x9c, 0x6a, + 0xb4, 0xec, 0xf8, 0x88, 0xb7, 0xd0, 0xcf, 0x01, 0xbe, 0x88, 0x49, 0x20, 0x25, 0x8b, 0x8d, 0x7f, + 0xe7, 0xf4, 0x92, 0x7f, 0xba, 0x77, 0x7f, 0x57, 0x88, 0x6e, 0x33, 0x71, 0x42, 0xb6, 0x0d, 0x73, + 0xa1, 0x15, 0x3d, 0x1e, 0x60, 0x2a, 0xc5, 0x0b, 0x5f, 0x78, 0xef, 0xf4, 0xe2, 0x3f, 0x14, 0x62, + 0x84, 0x86, 0x6e, 0x98, 0xfa, 0xd2, 0xbe, 0x9d, 0x84, 0x96, 0x5a, 0x17, 0xab, 0xde, 0xb8, 0x87, + 0x8b, 0x37, 0x0c, 0xd3, 0x0d, 0xfa, 0x44, 0x5a, 0xf4, 0x1c, 0xa3, 0x8b, 0x67, 0x0c, 0x9e, 0xfd, + 0x37, 0x60, 0x21, 0xc2, 0x36, 0x89, 0x1c, 0x76, 0xc7, 0x75, 0x7d, 0x97, 0xb9, 0xbd, 0xd8, 0xcb, + 0x79, 0x41, 0xbf, 0xa5, 0xc8, 0xe8, 0x25, 0x98, 0xe7, 0xdb, 0x9e, 0xe2, 0x6c, 0x2a, 0x99, 0xd8, + 0x4b, 0x31, 0x6e, 0xc0, 0xc2, 0xe3, 0x01, 0xcb, 0x1b, 0xf6, 0x81, 0x15, 0x59, 0x36, 0x25, 0xc9, + 0x6b, 0xc2, 0x3c, 0xa7, 0xef, 0x24, 0x64, 0xf4, 0x3a, 0x2c, 0x0b, 0x56, 0x1c, 0xdb, 0x56, 0x98, + 0x8c, 0xc0, 0x91, 0x2c, 0x36, 0x97, 0x78, 0xef, 0x6d, 0xde, 0xb9, 0xa3, 0xfa, 0x90, 0x06, 0x2d, + 0x9b, 0xf8, 0x3e, 0x0e, 0x68, 0xcc, 0x8f, 0xbf, 0xb6, 0x91, 0x7c, 0xa3, 0x1b, 0x70, 0xd1, 0xf2, + 0x3c, 0xf2, 0xa5, 0xc9, 0x47, 0x3a, 0x66, 0x61, 0x75, 0xa2, 0xf4, 0xd4, 0x38, 0xd3, 0x47, 0x9c, + 0xc7, 0xc8, 0x2e, 0x54, 0xbb, 0x0c, 0xed, 0x64, 0x1f, 0xd9, 0x8d, 0x21, 0xe5, 0x90, 0xbc, 0xad, + 0x9d, 0x83, 0x6e, 0x7a, 0x27, 0xb4, 0x7f, 0x36, 0x61, 0xb1, 0x24, 0xa8, 0xd0, 0x67, 0x00, 0xcc, + 0x5b, 0x45, 0x68, 0x49, 0x77, 0xfd, 0xc1, 0xe9, 0x83, 0x93, 0xf9, 0xab, 0x20, 0x1b, 0xcc, 0xfb, + 0x45, 0x13, 0xfd, 0x02, 0x3a, 0xdc, 0x63, 0xa5, 0x74, 0xe1, 0xb2, 0xef, 0x7e, 0x07, 0xe9, 0x6c, + 0xad, 0x52, 0x3c, 0x8f, 0x01, 0xd1, 0xd6, 0xfe, 0xde, 0x80, 0x76, 0xa2, 0x98, 0xdd, 0x7f, 0xc4, + 0x46, 0xf1, 0xbd, 0x8e, 0xd5, 0xfd, 0x87, 0xd3, 0xee, 0x70, 0xd2, 0xff, 0xa5, 0x2b, 0x69, 0x6f, + 0x02, 0x8c, 0xd6, 0x5f, 0xba, 0x84, 0x46, 0xe9, 0x12, 0xf4, 0x0d, 0x98, 0x63, 0x96, 0x75, 0xb1, + 0xb3, 0x47, 0x23, 0x37, 0xe4, 0x90, 0xae, 0xe0, 0x89, 0x65, 0x01, 0xa9, 0x3e, 0xb7, 0xbf, 0x59, + 0x81, 0x6e, 0xfa, 0x01, 0x0d, 0x7d, 0x0e, 0x9d, 0x14, 0x74, 0x8d, 0x9e, 0x2f, 0x6e, 0x5a, 0x11, + 0x0a, 0xd7, 0x5e, 0x18, 0xc3, 0x25, 0x6b, 0xac, 0x09, 0x14, 0xc0, 0xf9, 0x02, 0xfe, 0x8b, 0xae, + 0x16, 0x47, 0x57, 0xa1, 0xcb, 0xda, 0xcb, 0xb5, 0x78, 0x13, 0x7d, 0x14, 0x16, 0x4b, 0x00, 0x5d, + 0xf4, 0xca, 0x18, 0x29, 0x19, 0x50, 0x59, 0xbb, 0x56, 0x93, 0x3b, 0xd1, 0xfa, 0x18, 0x50, 0x11, + 0xed, 0x45, 0x2f, 0x8f, 0x15, 0x33, 0x42, 0x93, 0xb5, 0x57, 0xea, 0x31, 0x57, 0x2e, 0x54, 0xe0, + 0xc0, 0x63, 0x17, 0x9a, 0x41, 0x9a, 0xc7, 0x2e, 0x34, 0x07, 0x2e, 0x4f, 0xa0, 0x43, 0x58, 0xc8, + 0x63, 0xc4, 0x68, 0xa3, 0xea, 0x9f, 0x86, 0x02, 0x04, 0xad, 0x5d, 0xad, 0xc3, 0x9a, 0x28, 0xc3, + 0x70, 0x2e, 0x8b, 0xc9, 0xa2, 0x97, 0x8a, 0xe3, 0x4b, 0x51, 0x69, 0x6d, 0x7d, 0x3c, 0x63, 0x7a, + 0x4d, 0x79, 0x9c, 0xb6, 0x6c, 0x4d, 0x15, 0x20, 0x70, 0xd9, 0x9a, 0xaa, 0x60, 0x5f, 0x7d, 0x02, + 0x7d, 0xa5, 0xc0, 0xbf, 0x1c, 0x7e, 0x89, 0x36, 0xab, 0xc4, 0x94, 0x03, 0xa8, 0xda, 0x56, 0x6d, + 0x7e, 0xa5, 0xfb, 0x7a, 0x83, 0xc5, 0x7a, 0x0a, 0xc6, 0x2c, 0x8b, 0xf5, 0x22, 0x30, 0x5a, 0x16, + 0xeb, 0x65, 0x58, 0xe8, 0x04, 0xda, 0x87, 0xb9, 0x0c, 0xb0, 0x89, 0x5e, 0xac, 0x1a, 0x99, 0x7d, + 0xff, 0xd3, 0x5e, 0x1a, 0xcb, 0x97, 0xe8, 0x30, 0x55, 0xf6, 0x92, 0xe9, 0xaa, 0x72, 0x72, 0xd9, + 0x7c, 0xf5, 0xe2, 0x38, 0xb6, 0x4c, 0x28, 0x17, 0xe0, 0xcf, 0xd2, 0x50, 0xae, 0x82, 0x57, 0x4b, + 0x43, 0xb9, 0x1a, 0x51, 0x9d, 0x40, 0x07, 0x30, 0x9f, 0x83, 0x3e, 0xd1, 0x7a, 0x95, 0x88, 0x3c, + 0xec, 0xaa, 0x6d, 0xd4, 0xe0, 0x4c, 0x34, 0xfd, 0x4c, 0x15, 0xdb, 0xdc, 0xe5, 0xae, 0x54, 0x0f, + 0x1d, 0xf9, 0xd9, 0xf3, 0x27, 0x33, 0x25, 0xa2, 0xbf, 0x84, 0xa5, 0xb2, 0x17, 0x31, 0x74, 0xad, + 0xac, 0x84, 0xaf, 0x7c, 0x76, 0xd3, 0x36, 0xeb, 0xb2, 0x27, 0x8a, 0x3f, 0x86, 0x96, 0x82, 0xff, + 0xd0, 0x73, 0xc5, 0xd1, 0x39, 0xc0, 0x54, 0xd3, 0x4f, 0x62, 0x49, 0x85, 0x8a, 0xaf, 0xb2, 0xc2, + 0x08, 0x97, 0xab, 0xce, 0x0a, 0x05, 0x04, 0xb1, 0x3a, 0x2b, 0x14, 0x61, 0x3e, 0xae, 0x2e, 0x71, + 0xbb, 0x34, 0x8c, 0x55, 0xed, 0x76, 0x25, 0x28, 0x5d, 0xb5, 0xdb, 0x95, 0x22, 0x63, 0x13, 0xe8, + 0x97, 0x0a, 0xca, 0xcf, 0xa3, 0x57, 0xa8, 0x32, 0xb7, 0x54, 0xa0, 0x68, 0xda, 0xf5, 0xfa, 0x03, + 0x12, 0xf5, 0x4f, 0x54, 0x26, 0xcc, 0xa1, 0x57, 0xd5, 0x99, 0xb0, 0x1c, 0x43, 0xd3, 0xb6, 0x6a, + 0xf3, 0x17, 0x83, 0x3c, 0x0d, 0xef, 0x54, 0x5b, 0xbb, 0x04, 0x11, 0xab, 0xb6, 0x76, 0x29, 0x62, + 0xc4, 0xe3, 0xa3, 0x0c, 0xba, 0x29, 0x8b, 0x8f, 0x13, 0xb0, 0x25, 0x6d, 0xb3, 0x2e, 0x7b, 0xe6, + 0xa2, 0x50, 0xc4, 0x66, 0xd0, 0xd8, 0xf9, 0x67, 0xce, 0x80, 0x6b, 0x35, 0xb9, 0xab, 0x77, 0x57, + 0x9d, 0x09, 0x63, 0x17, 0x90, 0x3b, 0x1b, 0xb6, 0x6a, 0xf3, 0x27, 0xba, 0x43, 0xf5, 0x63, 0x48, + 0x0a, 0x57, 0x41, 0x57, 0xc7, 0xc8, 0x49, 0xe1, 0x42, 0xda, 0xcb, 0xb5, 0x78, 0xcb, 0xa2, 0x37, + 0x8d, 0x74, 0x9c, 0xe4, 0x4f, 0x05, 0x78, 0xe6, 0x24, 0x7f, 0x2a, 0x01, 0x4f, 0x4a, 0xa2, 0x57, + 0x01, 0x1c, 0xe3, 0xa3, 0x37, 0x07, 0xb4, 0x8c, 0x8f, 0xde, 0x02, 0x76, 0x32, 0x81, 0x7e, 0x3d, + 0xfa, 0x61, 0xa0, 0xf8, 0xdc, 0x88, 0xb6, 0x2b, 0x53, 0x51, 0xe5, 0x2b, 0xab, 0xf6, 0xda, 0xa9, + 0xc6, 0xa4, 0x8c, 0xff, 0xbb, 0x86, 0x42, 0x1f, 0x4b, 0xdf, 0xfb, 0xd0, 0xeb, 0x35, 0x04, 0x17, + 0x9e, 0x2c, 0xb5, 0x37, 0x4e, 0x39, 0x2a, 0x35, 0xa1, 0x0f, 0x60, 0x9a, 0xd7, 0xb9, 0xe8, 0xd2, + 0xc9, 0x05, 0xb0, 0x76, 0xb9, 0xbc, 0x3f, 0x29, 0xe3, 0x98, 0xb4, 0xfd, 0x19, 0xfe, 0x93, 0xf2, + 0x6b, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x66, 0x23, 0x9f, 0xad, 0xbb, 0x2c, 0x00, 0x00, } diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go index c631d2535..43987b748 100644 --- a/weed/server/volume_grpc_admin.go +++ b/weed/server/volume_grpc_admin.go @@ -2,10 +2,12 @@ package weed_server import ( "context" + "fmt" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) { @@ -96,6 +98,41 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb. } +func (vs *VolumeServer) VolumeConfigure(ctx context.Context, req *volume_server_pb.VolumeConfigureRequest) (*volume_server_pb.VolumeConfigureResponse, error) { + + resp := &volume_server_pb.VolumeConfigureResponse{} + + // check replication format + if _, err := super_block.NewReplicaPlacementFromString(req.Replication); err != nil { + resp.Error = fmt.Sprintf("volume configure replication %v: %v", req, err) + return resp, nil + } + + // unmount + if err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)); err != nil { + glog.Errorf("volume configure unmount %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure unmount %v: %v", req, err) + return resp, nil + } + + // modify the volume info file + if err := vs.store.ConfigureVolume(needle.VolumeId(req.VolumeId), req.Replication); err != nil { + glog.Errorf("volume configure %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure %v: %v", req, err) + return resp, nil + } + + // mount + if err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)); err != nil { + glog.Errorf("volume configure mount %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure mount %v: %v", req, err) + return resp, nil + } + + return resp, nil + +} + func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_server_pb.VolumeMarkReadonlyRequest) (*volume_server_pb.VolumeMarkReadonlyResponse, error) { resp := &volume_server_pb.VolumeMarkReadonlyResponse{} diff --git a/weed/shell/command_volume_configure_replication.go b/weed/shell/command_volume_configure_replication.go new file mode 100644 index 000000000..6000d0de0 --- /dev/null +++ b/weed/shell/command_volume_configure_replication.go @@ -0,0 +1,105 @@ +package shell + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" +) + +func init() { + Commands = append(Commands, &commandVolumeConfigureReplication{}) +} + +type commandVolumeConfigureReplication struct { +} + +func (c *commandVolumeConfigureReplication) Name() string { + return "volume.configure.replication" +} + +func (c *commandVolumeConfigureReplication) Help() string { + return `change volume replication value + + This command changes a volume replication value. It should be followed by volume.fix.replication. + +` +} + +func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + configureReplicationCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeIdInt := configureReplicationCommand.Int("volumeId", 0, "the volume id") + replicationString := configureReplicationCommand.String("replication", "", "the intended replication value") + if err = configureReplicationCommand.Parse(args); err != nil { + return nil + } + + if *replicationString == "" { + return fmt.Errorf("empty replication value") + } + + replicaPlacement, err := super_block.NewReplicaPlacementFromString(*replicationString) + if err != nil { + return fmt.Errorf("replication format: %v", err) + } + replicaPlacementInt32 := uint32(replicaPlacement.Byte()) + + var resp *master_pb.VolumeListResponse + ctx := context.Background() + err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return err + } + + vid := needle.VolumeId(*volumeIdInt) + + // find all data nodes with volumes that needs replication change + var allLocations []location + eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + loc := newLocation(dc, string(rack), dn) + for _, v := range dn.VolumeInfos { + if v.Id == uint32(vid) && v.ReplicaPlacement != replicaPlacementInt32 { + allLocations = append(allLocations, loc) + continue + } + } + }) + + if len(allLocations) == 0 { + return fmt.Errorf("no volume needs change") + } + + for _, dst := range allLocations { + err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, configureErr := volumeServerClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{ + VolumeId: uint32(vid), + Replication: replicaPlacement.String(), + }) + if configureErr != nil { + return configureErr + } + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil + }) + + if err != nil { + return err + } + + } + + return nil +} diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index e116fc715..a12a68cbc 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -1,13 +1,12 @@ package storage import ( + "fmt" "io/ioutil" "os" "strings" "sync" - "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" @@ -172,16 +171,10 @@ func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (e error) { } func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapType) bool { - if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { - for _, fileInfo := range fileInfos { - volId, _, err := l.volumeIdFromPath(fileInfo) - if vid == volId && err == nil { - l.loadExistingVolume(fileInfo, needleMapKind) - return true - } - } + if fileInfo, found := l.LocateVolume(vid); found { + l.loadExistingVolume(fileInfo, needleMapKind) + return true } - return false } @@ -217,7 +210,7 @@ func (l *DiskLocation) unmountVolumeByCollection(collectionName string) map[need } } - for k, _ := range deltaVols { + for k := range deltaVols { delete(l.volumes, k) } return deltaVols @@ -260,3 +253,16 @@ func (l *DiskLocation) Close() { return } + +func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) { + if fileInfos, err := ioutil.ReadDir(l.Directory); err == nil { + for _, fileInfo := range fileInfos { + volId, _, err := l.volumeIdFromPath(fileInfo) + if vid == volId && err == nil { + return fileInfo, true + } + } + } + + return nil, false +} diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index 579f037fb..3d9aa2cff 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -60,7 +60,7 @@ func NewEcVolume(dir string, collection string, vid needle.VolumeId) (ev *EcVolu // read volume info ev.Version = needle.Version3 - if volumeInfo, found := pb.MaybeLoadVolumeInfo(baseFileName + ".vif"); found { + if volumeInfo, found, _ := pb.MaybeLoadVolumeInfo(baseFileName + ".vif"); found { ev.Version = needle.Version(volumeInfo.Version) } else { pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(ev.Version)}) diff --git a/weed/storage/store.go b/weed/storage/store.go index 512f72ceb..2d02e2f80 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -2,11 +2,14 @@ package storage import ( "fmt" + "path/filepath" + "strings" "sync/atomic" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/needle" @@ -346,6 +349,31 @@ func (s *Store) DeleteVolume(i needle.VolumeId) error { return fmt.Errorf("volume %d not found on disk", i) } +func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error { + + for _, location := range s.Locations { + fileInfo, found := location.LocateVolume(i) + if !found { + continue + } + // load, modify, save + baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name())) + vifFile := filepath.Join(location.Directory, baseFileName + ".vif") + volumeInfo, _, err := pb.MaybeLoadVolumeInfo(vifFile) + if err != nil { + return fmt.Errorf("volume %d fail to load vif", i) + } + volumeInfo.Replication = replication + err = pb.SaveVolumeInfo(vifFile, volumeInfo) + if err != nil { + return fmt.Errorf("volume %d fail to save vif", i) + } + return nil + } + + return fmt.Errorf("volume %d not found on disk", i) +} + func (s *Store) SetVolumeSizeLimit(x uint64) { atomic.StoreUint64(&s.volumeSizeLimit, x) } diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go index 61c09d85a..1d7f35595 100644 --- a/weed/storage/volume_super_block.go +++ b/weed/storage/volume_super_block.go @@ -1,6 +1,7 @@ package storage import ( + "fmt" "os" "github.com/chrislusf/seaweedfs/weed/glog" @@ -36,5 +37,12 @@ func (v *Volume) maybeWriteSuperBlock() error { func (v *Volume) readSuperBlock() (err error) { v.SuperBlock, err = super_block.ReadSuperBlock(v.DataBackend) + if v.volumeInfo != nil && v.volumeInfo.Replication != ""{ + if replication, err := super_block.NewReplicaPlacementFromString(v.volumeInfo.Replication); err != nil { + return fmt.Errorf("Error parse volume %d replication %s : %v", v.Id, v.volumeInfo.Replication, err) + } else { + v.SuperBlock.ReplicaPlacement = replication + } + } return err } diff --git a/weed/storage/volume_tier.go b/weed/storage/volume_tier.go index 85eafa848..fd7b08654 100644 --- a/weed/storage/volume_tier.go +++ b/weed/storage/volume_tier.go @@ -14,7 +14,7 @@ func (v *Volume) GetVolumeInfo() *volume_server_pb.VolumeInfo { func (v *Volume) maybeLoadVolumeInfo() (found bool) { - v.volumeInfo, v.hasRemoteFile = pb.MaybeLoadVolumeInfo(v.FileName() + ".vif") + v.volumeInfo, v.hasRemoteFile, _ = pb.MaybeLoadVolumeInfo(v.FileName() + ".vif") if v.hasRemoteFile { glog.V(0).Infof("volume %d is tiered to %s as %s and read only", v.Id, From 382ff218d33e07eda91491e87cac51445c8f8fdb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 3 Feb 2020 17:04:06 -0800 Subject: [PATCH 0101/2432] filer: set file size, streaming chunk file uploading fix https://github.com/chrislusf/seaweedfs/issues/1193 --- weed/server/filer_server_handlers_write.go | 4 +- .../filer_server_handlers_write_autochunk.go | 101 +++++++----------- 2 files changed, 43 insertions(+), 62 deletions(-) diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 6b5d258bc..4707f1011 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -32,7 +32,7 @@ var ( type FilerPostResult struct { Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` + Size int64 `json:"size,omitempty"` Error string `json:"error,omitempty"` Fid string `json:"fid,omitempty"` Url string `json:"url,omitempty"` @@ -130,7 +130,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { // send back post result reply := FilerPostResult{ Name: ret.Name, - Size: ret.Size, + Size: int64(ret.Size), Error: ret.Error, Fid: fileId, Url: urlLocation, diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 5739c2d4e..25c0a4b4d 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -1,10 +1,8 @@ package weed_server import ( - "bytes" "context" "io" - "io/ioutil" "net/http" "path" "strconv" @@ -92,66 +90,47 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r var fileChunks []*filer_pb.FileChunk - totalBytesRead := int64(0) - tmpBufferSize := int32(1024 * 1024) - tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize)) - chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow - chunkBufOffset := int32(0) chunkOffset := int64(0) - writtenChunks := 0 - filerResult = &FilerPostResult{ - Name: fileName, - } + for chunkOffset < contentLength { + limitedReader := io.LimitReader(part1, int64(chunkSize)) - for totalBytesRead < contentLength { - tmpBuffer.Reset() - bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize)) - readFully := readErr != nil && readErr == io.EOF - tmpBuf := tmpBuffer.Bytes() - bytesToCopy := tmpBuf[0:int(bytesRead)] - - copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy) - chunkBufOffset = chunkBufOffset + int32(bytesRead) - - if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) { - writtenChunks = writtenChunks + 1 - fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) - if assignErr != nil { - return nil, assignErr - } - - // upload the chunk to the volume server - chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) - uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "", fileId, auth) - if uploadErr != nil { - return nil, uploadErr - } - - // Save to chunk manifest structure - fileChunks = append(fileChunks, - &filer_pb.FileChunk{ - FileId: fileId, - Offset: chunkOffset, - Size: uint64(chunkBufOffset), - Mtime: time.Now().UnixNano(), - }, - ) - - // reset variables for the next chunk - chunkBufOffset = 0 - chunkOffset = totalBytesRead + int64(bytesRead) + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + if assignErr != nil { + return nil, assignErr } - totalBytesRead = totalBytesRead + int64(bytesRead) + // upload the chunk to the volume server + chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) + uploadedSize, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, chunkName, "", fileId, auth) + if uploadErr != nil { + return nil, uploadErr + } - if bytesRead == 0 || readFully { + // if last chunk exhausted the reader exactly at the border + if uploadedSize == 0 { break } - if readErr != nil { - return nil, readErr + // Save to chunk manifest structure + fileChunks = append(fileChunks, + &filer_pb.FileChunk{ + FileId: fileId, + Offset: chunkOffset, + Size: uint64(uploadedSize), + Mtime: time.Now().UnixNano(), + }, + ) + + glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadedSize), contentLength) + + // if last chunk was not at full chunk size, but already exhausted the reader + if uploadedSize < int64(chunkSize) { + break } + // reset variables for the next chunk + chunkOffset = chunkOffset + int64(uploadedSize) } path := r.URL.Path @@ -176,6 +155,12 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r }, Chunks: fileChunks, } + + filerResult = &FilerPostResult{ + Name: fileName, + Size: chunkOffset, + } + if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil { fs.filer.DeleteChunks(entry.Chunks) replyerr = dbErr @@ -188,7 +173,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r } func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, - chunkBuf []byte, fileName string, contentType string, fileId string, auth security.EncodedJwt) (err error) { + limitedReader io.Reader, fileName string, contentType string, fileId string, auth security.EncodedJwt) (size int64, err error) { stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc() start := time.Now() @@ -196,13 +181,9 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) }() - ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) - uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, auth) - if uploadResult != nil { - glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size) - } + uploadResult, uploadError := operation.Upload(urlLocation, fileName, limitedReader, false, contentType, nil, auth) if uploadError != nil { - err = uploadError + return 0, uploadError } - return + return int64(uploadResult.Size), nil } From dc786a63ac658c6b527d3b5a8e426cb9b8969a20 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 3 Feb 2020 18:15:16 -0800 Subject: [PATCH 0102/2432] master: add configurable volume growth toml setting --- weed/command/scaffold.go | 9 +++++++++ weed/topology/volume_growth.go | 14 ++++++++++---- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 3aebff396..ab658735f 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -369,6 +369,8 @@ type = "memory" # Choose [memory|etcd] type for storing the file id sequence sequencer_etcd_urls = "http://127.0.0.1:2379" +# configurations for tiered cloud storage +# old volumes are transparently moved to cloud for cost efficiency [storage.backend] [storage.backend.s3.default] enabled = false @@ -377,5 +379,12 @@ sequencer_etcd_urls = "http://127.0.0.1:2379" region = "us-east-2" bucket = "your_bucket_name" # an existing bucket +# create this number of logical volumes if no more writable volumes +[master.volume_growth] +count_1 = 7 # create 1 x 7 = 7 actual volumes +count_2 = 6 # create 2 x 6 = 12 actual volumes +count_3 = 3 # create 3 x 3 = 9 actual volumes +count_other = 1 # create n x 1 = n actual volumes + ` ) diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 80fbc86cd..781a34ba3 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -7,6 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/util" "google.golang.org/grpc" @@ -48,15 +49,20 @@ func NewDefaultVolumeGrowth() *VolumeGrowth { // one replication type may need rp.GetCopyCount() actual volumes // given copyCount, how many logical volumes to create func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) { + v := util.GetViper() + v.SetDefault("master.volume_growth.copy_1", 7) + v.SetDefault("master.volume_growth.copy_2", 6) + v.SetDefault("master.volume_growth.copy_3", 3) + v.SetDefault("master.volume_growth.copy_other", 1) switch copyCount { case 1: - count = 7 + count = v.GetInt("master.volume_growth.copy_1") case 2: - count = 6 + count = v.GetInt("master.volume_growth.copy_2") case 3: - count = 3 + count = v.GetInt("master.volume_growth.copy_3") default: - count = 1 + count = v.GetInt("master.volume_growth.copy_other") } return } From 3a35632d586a9a12115ff7204db6e2b8ee855e15 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 4 Feb 2020 10:37:14 -0800 Subject: [PATCH 0103/2432] sync before closing file merge changes from https://github.com/stlpmo-jn/seaweedfs/commit/1747fc2d523a42af6cade446c56317d840bd2c67 --- weed/storage/needle_map_leveldb.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index ef8571e83..3bb258559 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -128,8 +128,17 @@ func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error { } func (m *LevelDbNeedleMap) Close() { - m.indexFile.Close() - m.db.Close() + indexFileName := m.indexFile.Name() + if err := m.indexFile.Sync(); err != nil { + glog.Warningf("sync file %s failed: %v", indexFileName, err) + } + if err := m.indexFile.Close(); err != nil { + glog.Warningf("close index file %s failed: %v", indexFileName, err) + } + + if err := m.db.Close(); err != nil { + glog.Warningf("close levelDB failed: %v", err) + } } func (m *LevelDbNeedleMap) Destroy() error { From 08e47025427bf2764cf06853e169398ee84b7b09 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 4 Feb 2020 10:38:12 -0800 Subject: [PATCH 0104/2432] sync before closing index file merge from https://github.com/stlpmo-jn/seaweedfs/commit/0181f87d9142852f981541cb05689df9fc9ef6a0 --- weed/storage/needle_map_memory.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/weed/storage/needle_map_memory.go b/weed/storage/needle_map_memory.go index 37dee7889..84197912f 100644 --- a/weed/storage/needle_map_memory.go +++ b/weed/storage/needle_map_memory.go @@ -64,6 +64,10 @@ func (nm *NeedleMap) Delete(key NeedleId, offset Offset) error { return nm.appendToIndexFile(key, offset, TombstoneFileSize) } func (nm *NeedleMap) Close() { + indexFileName := nm.indexFile.Name() + if err := nm.indexFile.Sync(); err != nil { + glog.Warningf("sync file %s failed, %v", indexFileName, err) + } _ = nm.indexFile.Close() } func (nm *NeedleMap) Destroy() error { From c1288e9eb464b7b455a2a868597126f3fa1ff5b9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 4 Feb 2020 21:12:09 -0800 Subject: [PATCH 0105/2432] volume: sdx generation uses memdb instead of compactMap fix https://github.com/chrislusf/seaweedfs/issues/1194 --- weed/storage/erasure_coding/ec_encoder.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index eeb384b91..7cb8064ac 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -5,12 +5,13 @@ import ( "io" "os" + "github.com/klauspost/reedsolomon" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/klauspost/reedsolomon" ) const ( @@ -280,14 +281,14 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o } -func readCompactMap(baseFileName string) (*needle_map.CompactMap, error) { +func readCompactMap(baseFileName string) (*needle_map.MemDb, error) { indexFile, err := os.OpenFile(baseFileName+".idx", os.O_RDONLY, 0644) if err != nil { return nil, fmt.Errorf("cannot read Volume Index %s.idx: %v", baseFileName, err) } defer indexFile.Close() - cm := needle_map.NewCompactMap() + cm := needle_map.NewMemDb() err = idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { if !offset.IsZero() && size != types.TombstoneFileSize { cm.Set(key, offset, size) From 8d94564f4152cd890d5896a3dedf5e7589c5023e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 4 Feb 2020 21:16:34 -0800 Subject: [PATCH 0106/2432] refactor --- weed/storage/erasure_coding/ec_encoder.go | 8 ++++---- weed/storage/erasure_coding/ec_test.go | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index 7cb8064ac..9e2edf57d 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -26,9 +26,9 @@ const ( // all keys are sorted in ascending order func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) { - cm, err := readCompactMap(baseFileName) + nm, err := readNeedleMap(baseFileName) if err != nil { - return fmt.Errorf("readCompactMap: %v", err) + return fmt.Errorf("readNeedleMap: %v", err) } ecxFile, err := os.OpenFile(baseFileName+ext, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) @@ -37,7 +37,7 @@ func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) { } defer ecxFile.Close() - err = cm.AscendingVisit(func(value needle_map.NeedleValue) error { + err = nm.AscendingVisit(func(value needle_map.NeedleValue) error { bytes := value.ToBytes() _, writeErr := ecxFile.Write(bytes) return writeErr @@ -281,7 +281,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o } -func readCompactMap(baseFileName string) (*needle_map.MemDb, error) { +func readNeedleMap(baseFileName string) (*needle_map.MemDb, error) { indexFile, err := os.OpenFile(baseFileName+".idx", os.O_RDONLY, 0644) if err != nil { return nil, fmt.Errorf("cannot read Volume Index %s.idx: %v", baseFileName, err) diff --git a/weed/storage/erasure_coding/ec_test.go b/weed/storage/erasure_coding/ec_test.go index 0e4aaa27c..b2c94cfd7 100644 --- a/weed/storage/erasure_coding/ec_test.go +++ b/weed/storage/erasure_coding/ec_test.go @@ -41,9 +41,9 @@ func TestEncodingDecoding(t *testing.T) { } func validateFiles(baseFileName string) error { - cm, err := readCompactMap(baseFileName) + nm, err := readNeedleMap(baseFileName) if err != nil { - return fmt.Errorf("readCompactMap: %v", err) + return fmt.Errorf("readNeedleMap: %v", err) } datFile, err := os.OpenFile(baseFileName+".dat", os.O_RDONLY, 0) @@ -60,7 +60,7 @@ func validateFiles(baseFileName string) error { ecFiles, err := openEcFiles(baseFileName, true) defer closeEcFiles(ecFiles) - err = cm.AscendingVisit(func(value needle_map.NeedleValue) error { + err = nm.AscendingVisit(func(value needle_map.NeedleValue) error { return assertSame(datFile, fi.Size(), ecFiles, value.Offset, value.Size) }) if err != nil { From 9ed364f053ff3a5c8675d33358a4cc7f63a79985 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 9 Feb 2020 14:30:02 -0800 Subject: [PATCH 0107/2432] support acl --- weed/command/s3.go | 52 +- weed/pb/Makefile | 1 + weed/pb/iam.proto | 50 ++ weed/pb/iam_pb/iam.pb.go | 173 +++++++ weed/s3api/auth_credentials.go | 150 ++++++ weed/s3api/auth_credentials_test.go | 68 +++ weed/s3api/auth_signature_v4.go | 719 +++++++++++++++++++++++++++ weed/s3api/auto_signature_v4_test.go | 419 ++++++++++++++++ weed/s3api/chunked_reader_v4.go | 5 - weed/s3api/s3api_auth.go | 6 +- weed/s3api/s3api_errors.go | 139 ++++++ weed/s3api/s3api_server.go | 44 +- 12 files changed, 1798 insertions(+), 28 deletions(-) create mode 100644 weed/pb/iam.proto create mode 100644 weed/pb/iam_pb/iam.pb.go create mode 100644 weed/s3api/auth_credentials.go create mode 100644 weed/s3api/auth_credentials_test.go create mode 100644 weed/s3api/auth_signature_v4.go create mode 100644 weed/s3api/auto_signature_v4_test.go diff --git a/weed/command/s3.go b/weed/command/s3.go index 10a486657..4538f2135 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -22,6 +22,7 @@ type S3Options struct { filer *string filerBucketsPath *string port *int + config *string domainName *string tlsPrivateKey *string tlsCertificate *string @@ -33,15 +34,63 @@ func init() { s3StandaloneOptions.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port") s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}") + s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file") s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") } var cmdS3 = &Command{ - UsageLine: "s3 -port=8333 -filer=", + UsageLine: "s3 [-port=8333] [-filer=] [-config=]", Short: "start a s3 API compatible server that is backed by a filer", Long: `start a s3 API compatible server that is backed by a filer. + By default, you can use any access key and secret key to access the S3 APIs. + To enable credential based access, create a config.json file similar to this: + +{ + "identities": [ + { + "name": "some_name", + "credentials": [ + { + "accessKey": "some_access_key1", + "secretKey": "some_secret_key2" + } + ], + "actions": [ + "Admin", + "Read", + "Write" + ] + }, + { + "name": "some_read_only_user", + "credentials": [ + { + "accessKey": "some_access_key1", + "secretKey": "some_secret_key1" + } + ], + "actions": [ + "Read" + ] + }, + { + "name": "some_normal_user", + "credentials": [ + { + "accessKey": "some_access_key2", + "secretKey": "some_secret_key2" + } + ], + "actions": [ + "Read", + "Write" + ] + } + ] +} + `, } @@ -66,6 +115,7 @@ func (s3opt *S3Options) startS3Server() bool { _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{ Filer: *s3opt.filer, FilerGrpcAddress: filerGrpcAddress, + Config: *s3opt.config, DomainName: *s3opt.domainName, BucketsPath: *s3opt.filerBucketsPath, GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), diff --git a/weed/pb/Makefile b/weed/pb/Makefile index c50410574..edfcd9a72 100644 --- a/weed/pb/Makefile +++ b/weed/pb/Makefile @@ -6,5 +6,6 @@ gen: protoc master.proto --go_out=plugins=grpc:./master_pb protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb protoc filer.proto --go_out=plugins=grpc:./filer_pb + protoc iam.proto --go_out=plugins=grpc:./iam_pb # protoc filer.proto --java_out=../../other/java/client/src/main/java cp filer.proto ../../other/java/client/src/main/proto diff --git a/weed/pb/iam.proto b/weed/pb/iam.proto new file mode 100644 index 000000000..ddb4e5e5d --- /dev/null +++ b/weed/pb/iam.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package iam_pb; + +option java_package = "seaweedfs.client"; +option java_outer_classname = "IamProto"; + +////////////////////////////////////////////////// + +service SeaweedIdentityAccessManagement { + +} + +////////////////////////////////////////////////// + +message Identities { + repeated Identity identities = 1; +} + +message Identity { + string name = 1; + repeated Credential credentials = 2; + repeated string actions = 3; +} + +message Credential { + string access_key = 1; + string secret_key = 2; + // uint64 expiration = 3; + // bool is_disabled = 4; +} + +/* +message Policy { + repeated Statement statements = 1; +} + +message Statement { + repeated Action action = 1; + repeated Resource resource = 2; +} + +message Action { + string action = 1; +} +message Resource { + string bucket = 1; + // string path = 2; +} +*/ \ No newline at end of file diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go new file mode 100644 index 000000000..53cef9cc1 --- /dev/null +++ b/weed/pb/iam_pb/iam.pb.go @@ -0,0 +1,173 @@ +// Code generated by protoc-gen-go. +// source: iam.proto +// DO NOT EDIT! + +/* +Package iam_pb is a generated protocol buffer package. + +It is generated from these files: + iam.proto + +It has these top-level messages: + Identities + Identity + Credential +*/ +package iam_pb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Identities struct { + Identities []*Identity `protobuf:"bytes,1,rep,name=identities" json:"identities,omitempty"` +} + +func (m *Identities) Reset() { *m = Identities{} } +func (m *Identities) String() string { return proto.CompactTextString(m) } +func (*Identities) ProtoMessage() {} +func (*Identities) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Identities) GetIdentities() []*Identity { + if m != nil { + return m.Identities + } + return nil +} + +type Identity struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials" json:"credentials,omitempty"` + Actions []string `protobuf:"bytes,3,rep,name=actions" json:"actions,omitempty"` +} + +func (m *Identity) Reset() { *m = Identity{} } +func (m *Identity) String() string { return proto.CompactTextString(m) } +func (*Identity) ProtoMessage() {} +func (*Identity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Identity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Identity) GetCredentials() []*Credential { + if m != nil { + return m.Credentials + } + return nil +} + +func (m *Identity) GetActions() []string { + if m != nil { + return m.Actions + } + return nil +} + +type Credential struct { + AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey" json:"access_key,omitempty"` + SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey" json:"secret_key,omitempty"` +} + +func (m *Credential) Reset() { *m = Credential{} } +func (m *Credential) String() string { return proto.CompactTextString(m) } +func (*Credential) ProtoMessage() {} +func (*Credential) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Credential) GetAccessKey() string { + if m != nil { + return m.AccessKey + } + return "" +} + +func (m *Credential) GetSecretKey() string { + if m != nil { + return m.SecretKey + } + return "" +} + +func init() { + proto.RegisterType((*Identities)(nil), "iam_pb.Identities") + proto.RegisterType((*Identity)(nil), "iam_pb.Identity") + proto.RegisterType((*Credential)(nil), "iam_pb.Credential") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for SeaweedIdentityAccessManagement service + +type SeaweedIdentityAccessManagementClient interface { +} + +type seaweedIdentityAccessManagementClient struct { + cc *grpc.ClientConn +} + +func NewSeaweedIdentityAccessManagementClient(cc *grpc.ClientConn) SeaweedIdentityAccessManagementClient { + return &seaweedIdentityAccessManagementClient{cc} +} + +// Server API for SeaweedIdentityAccessManagement service + +type SeaweedIdentityAccessManagementServer interface { +} + +func RegisterSeaweedIdentityAccessManagementServer(s *grpc.Server, srv SeaweedIdentityAccessManagementServer) { + s.RegisterService(&_SeaweedIdentityAccessManagement_serviceDesc, srv) +} + +var _SeaweedIdentityAccessManagement_serviceDesc = grpc.ServiceDesc{ + ServiceName: "iam_pb.SeaweedIdentityAccessManagement", + HandlerType: (*SeaweedIdentityAccessManagementServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: "iam.proto", +} + +func init() { proto.RegisterFile("iam.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 238 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x90, 0xc1, 0x4a, 0xc3, 0x40, + 0x10, 0x86, 0x49, 0x23, 0xb5, 0x99, 0x5e, 0xca, 0x9c, 0xf6, 0xa0, 0x18, 0x73, 0xca, 0x29, 0x48, + 0xf5, 0x2c, 0xa8, 0xa7, 0x5a, 0x04, 0x89, 0x0f, 0x50, 0xa6, 0xdb, 0x51, 0x16, 0xbb, 0x9b, 0x92, + 0x59, 0x90, 0xbc, 0xbd, 0x64, 0xb7, 0xdb, 0xf6, 0xb6, 0xf3, 0x7f, 0xdf, 0xcc, 0x32, 0x03, 0x85, + 0x21, 0xdb, 0x1c, 0xfa, 0xce, 0x77, 0x38, 0x35, 0x64, 0x37, 0x87, 0x6d, 0xf5, 0x0c, 0xb0, 0xda, + 0xb1, 0xf3, 0xc6, 0x1b, 0x16, 0x7c, 0x00, 0x30, 0xa7, 0x4a, 0x65, 0x65, 0x5e, 0xcf, 0x97, 0x8b, + 0x26, 0xaa, 0xcd, 0xd1, 0x1b, 0xda, 0x0b, 0xa7, 0x72, 0x30, 0x4b, 0x39, 0x22, 0x5c, 0x39, 0xb2, + 0xac, 0xb2, 0x32, 0xab, 0x8b, 0x36, 0xbc, 0xf1, 0x09, 0xe6, 0xba, 0xe7, 0x60, 0xd0, 0x5e, 0xd4, + 0x24, 0x8c, 0xc4, 0x34, 0xf2, 0xed, 0x84, 0xda, 0x4b, 0x0d, 0x15, 0x5c, 0x93, 0xf6, 0xa6, 0x73, + 0xa2, 0xf2, 0x32, 0xaf, 0x8b, 0x36, 0x95, 0xd5, 0x3b, 0xc0, 0xb9, 0x09, 0x6f, 0x01, 0x48, 0x6b, + 0x16, 0xd9, 0xfc, 0xf2, 0x70, 0xfc, 0xb7, 0x88, 0xc9, 0x9a, 0x87, 0x11, 0x0b, 0xeb, 0x9e, 0x7d, + 0xc0, 0x93, 0x88, 0x63, 0xb2, 0xe6, 0x61, 0x79, 0x0f, 0x77, 0x5f, 0x4c, 0x7f, 0xcc, 0xbb, 0xb4, + 0xc2, 0x4b, 0x68, 0xfd, 0x20, 0x47, 0x3f, 0x6c, 0xd9, 0xf9, 0xd7, 0x1b, 0x58, 0x48, 0x54, 0xbe, + 0xa5, 0xd1, 0x7b, 0x33, 0x66, 0xb3, 0x15, 0xd9, 0xcf, 0xf1, 0x88, 0xdb, 0x69, 0xb8, 0xe5, 0xe3, + 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x65, 0xb9, 0x71, 0x4e, 0x58, 0x01, 0x00, 0x00, +} diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go new file mode 100644 index 000000000..9df1fdd29 --- /dev/null +++ b/weed/s3api/auth_credentials.go @@ -0,0 +1,150 @@ +package s3api + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" +) + +type Action string + +const ( + ACTION_READ = "Read" + ACTION_WRITE = "Write" + ACTION_ADMIN = "Admin" +) + +type Iam interface { + Check(f http.HandlerFunc, actions ...Action) http.HandlerFunc +} + +type IdentityAccessManagement struct { + identities []*Identity +} + +type Identity struct { + Name string + Credentials []*Credential + Actions []Action +} + +type Credential struct { + AccessKey string + SecretKey string +} + +func NewIdentityAccessManagement(fileName string) *IdentityAccessManagement { + iam := &IdentityAccessManagement{} + if fileName == "" { + return iam + } + if err := iam.loadIdentities(fileName); err != nil { + glog.Fatalf("fail to load config file %s: %v", fileName, err) + } + return iam +} + +func (iam *IdentityAccessManagement) loadIdentities(fileName string) error { + + identities := &iam_pb.Identities{} + + rawData, readErr := ioutil.ReadFile(fileName) + if readErr != nil { + glog.Warningf("fail to read %s : %v", fileName, readErr) + return fmt.Errorf("fail to read %s : %v", fileName, readErr) + } + + glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) + if err := jsonpb.Unmarshal(bytes.NewReader(rawData), identities); err != nil { + glog.Warningf("unmarshal error: %v", err) + return fmt.Errorf("unmarshal %s error: %v", fileName, err) + } + + for _, ident := range identities.Identities { + t := &Identity{ + Name: ident.Name, + Credentials: nil, + Actions: nil, + } + for _, action := range ident.Actions { + t.Actions = append(t.Actions, Action(action)) + } + for _, cred := range ident.Credentials { + t.Credentials = append(t.Credentials, &Credential{ + AccessKey: cred.AccessKey, + SecretKey: cred.SecretKey, + }) + } + iam.identities = append(iam.identities, t) + } + + return nil +} + +func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) { + for _, ident := range iam.identities { + for _, cred := range ident.Credentials { + if cred.AccessKey == accessKey { + return ident, cred, true + } + } + } + return nil, nil, false +} + +func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, actions ...Action) http.HandlerFunc { + + if len(iam.identities) == 0 { + return f + } + + return func(w http.ResponseWriter, r *http.Request) { + errCode := iam.authRequest(r, actions) + if errCode == ErrNone { + f(w, r) + return + } + writeErrorResponse(w, errCode, r.URL) + } +} + +// check whether the request has valid access keys +func (iam *IdentityAccessManagement) authRequest(r *http.Request, actions []Action) ErrorCode { + var identity *Identity + var s3Err ErrorCode + switch getRequestAuthType(r) { + case authTypeUnknown, authTypeStreamingSigned: + return ErrAccessDenied + case authTypePresignedV2, authTypeSignedV2: + return ErrNotImplemented + case authTypeSigned, authTypePresigned: + identity, s3Err = iam.reqSignatureV4Verify(r) + if s3Err != ErrNone { + return s3Err + } + } + + if !identity.canDo(actions) { + return ErrAccessDenied + } + + return ErrNone + +} + +func (identity *Identity) canDo(actions []Action) bool { + for _, a := range identity.Actions { + for _, b := range actions { + if a == b { + return true + } + } + } + return false +} diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go new file mode 100644 index 000000000..5e605bdd5 --- /dev/null +++ b/weed/s3api/auth_credentials_test.go @@ -0,0 +1,68 @@ +package s3api + +import ( + "testing" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" +) + +func TestIdentityListFileFormat(t *testing.T) { + + identities := &iam_pb.Identities{} + + identity1 := &iam_pb.Identity{ + Name: "some_name", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_ADMIN, + ACTION_READ, + ACTION_WRITE, + }, + } + identity2 := &iam_pb.Identity{ + Name: "some_read_only_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + }, + }, + Actions: []string{ + ACTION_READ, + }, + } + identity3 := &iam_pb.Identity{ + Name: "some_normal_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key2", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_READ, + ACTION_WRITE, + }, + } + + identities.Identities = append(identities.Identities, identity1) + identities.Identities = append(identities.Identities, identity2) + identities.Identities = append(identities.Identities, identity3) + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, _ := m.MarshalToString(identities) + + println(text) + +} diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go new file mode 100644 index 000000000..6da316abc --- /dev/null +++ b/weed/s3api/auth_signature_v4.go @@ -0,0 +1,719 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3api + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "encoding/hex" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, ErrorCode) { + sha256sum := getContentSha256Cksum(r) + switch { + case isRequestSignatureV4(r): + return iam.doesSignatureMatch(sha256sum, r) + case isRequestPresignedSignatureV4(r): + return iam.doesPresignedSignatureMatch(sha256sum, r) + } + return nil, ErrAccessDenied +} + +// Streaming AWS Signature Version '4' constants. +const ( + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + + // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the + // client did not calculate sha256 of the payload. + unsignedPayload = "UNSIGNED-PAYLOAD" +) + +// Returns SHA256 for calculating canonical-request. +func getContentSha256Cksum(r *http.Request) string { + var ( + defaultSha256Cksum string + v []string + ok bool + ) + + // For a presigned request we look at the query param for sha256. + if isRequestPresignedSignatureV4(r) { + // X-Amz-Content-Sha256, if not set in presigned requests, checksum + // will default to 'UNSIGNED-PAYLOAD'. + defaultSha256Cksum = unsignedPayload + v, ok = r.URL.Query()["X-Amz-Content-Sha256"] + if !ok { + v, ok = r.Header["X-Amz-Content-Sha256"] + } + } else { + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = emptySHA256 + v, ok = r.Header["X-Amz-Content-Sha256"] + } + + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] + } + + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum +} + +// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, err := parseSignV4(v4Auth) + if err != ErrNone { + return nil, err + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != ErrNone { + return nil, errCode + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // Extract date, if not present throw error. + var date string + if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" { + if date = r.Header.Get("Date"); date == "" { + return nil, ErrMissingDateHeader + } + } + // Parse date header. + t, e := time.Parse(iso8601Format, date) + if e != nil { + return nil, ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, ErrSignatureDoesNotMatch + } + + // Return error none. + return identity, ErrNone +} + +// credentialHeader data type represents structured form of Credential +// string from authorization header. +type credentialHeader struct { + accessKey string + scope struct { + date time.Time + region string + service string + request string + } +} + +// signValues data type represents structured form of AWS Signature V4 header. +type signValues struct { + Credential credentialHeader + SignedHeaders []string + Signature string +} + +// Return scope string. +func (c credentialHeader) getScope() string { + return strings.Join([]string{ + c.scope.date.Format(yyyymmdd), + c.scope.region, + c.scope.service, + c.scope.request, + }, "/") +} + +// Authorization: algorithm Credential=accessKeyID/credScope, \ +// SignedHeaders=signedHeaders, Signature=signature +// +func parseSignV4(v4Auth string) (sv signValues, aec ErrorCode) { + // Replace all spaced strings, some clients can send spaced + // parameters and some won't. So we pro-actively remove any spaces + // to make parsing easier. + v4Auth = strings.Replace(v4Auth, " ", "", -1) + if v4Auth == "" { + return sv, ErrAuthHeaderEmpty + } + + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v4Auth, signV4Algorithm) { + return sv, ErrSignatureVersionNotSupported + } + + // Strip off the Algorithm prefix. + v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) + authFields := strings.Split(strings.TrimSpace(v4Auth), ",") + if len(authFields) != 3 { + return sv, ErrMissingFields + } + + // Initialize signature version '4' structured header. + signV4Values := signValues{} + + var err ErrorCode + // Save credentail values. + signV4Values.Credential, err = parseCredentialHeader(authFields[0]) + if err != ErrNone { + return sv, err + } + + // Save signed headers. + signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1]) + if err != ErrNone { + return sv, err + } + + // Save signature. + signV4Values.Signature, err = parseSignature(authFields[2]) + if err != ErrNone { + return sv, err + } + + // Return the structure here. + return signV4Values, ErrNone +} + +// parse credentialHeader string into its structured form. +func parseCredentialHeader(credElement string) (ch credentialHeader, aec ErrorCode) { + creds := strings.Split(strings.TrimSpace(credElement), "=") + if len(creds) != 2 { + return ch, ErrMissingFields + } + if creds[0] != "Credential" { + return ch, ErrMissingCredTag + } + credElements := strings.Split(strings.TrimSpace(creds[1]), "/") + if len(credElements) != 5 { + return ch, ErrCredMalformed + } + // Save access key id. + cred := credentialHeader{ + accessKey: credElements[0], + } + var e error + cred.scope.date, e = time.Parse(yyyymmdd, credElements[1]) + if e != nil { + return ch, ErrMalformedCredentialDate + } + + cred.scope.region = credElements[2] + cred.scope.service = credElements[3] // "s3" + cred.scope.request = credElements[4] // "aws4_request" + return cred, ErrNone +} + +// Parse slice of signed headers from signed headers tag. +func parseSignedHeader(signedHdrElement string) ([]string, ErrorCode) { + signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") + if len(signedHdrFields) != 2 { + return nil, ErrMissingFields + } + if signedHdrFields[0] != "SignedHeaders" { + return nil, ErrMissingSignHeadersTag + } + if signedHdrFields[1] == "" { + return nil, ErrMissingFields + } + signedHeaders := strings.Split(signedHdrFields[1], ";") + return signedHeaders, ErrNone +} + +// Parse signature from signature tag. +func parseSignature(signElement string) (string, ErrorCode) { + signFields := strings.Split(strings.TrimSpace(signElement), "=") + if len(signFields) != 2 { + return "", ErrMissingFields + } + if signFields[0] != "Signature" { + return "", ErrMissingSignTag + } + if signFields[1] == "" { + return "", ErrMissingFields + } + signature := signFields[1] + return signature, ErrNone +} + +// check query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html +func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) { + + // Copy request + req := *r + + // Parse request query string. + pSignValues, err := parsePreSignV4(req.URL.Query()) + if err != ErrNone { + return nil, err + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) + if errCode != ErrNone { + return nil, errCode + } + // Construct new query. + query := make(url.Values) + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + query.Set("X-Amz-Content-Sha256", hashedPayload) + } + + query.Set("X-Amz-Algorithm", signV4Algorithm) + + now := time.Now().UTC() + + // If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the + // request should still be allowed. + if pSignValues.Date.After(now.Add(15 * time.Minute)) { + return nil, ErrRequestNotReadyYet + } + + if now.Sub(pSignValues.Date) > pSignValues.Expires { + return nil, ErrExpiredPresignRequest + } + + // Save the date and expires. + t := pSignValues.Date + expireSeconds := int(pSignValues.Expires / time.Second) + + // Construct the query. + query.Set("X-Amz-Date", t.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds)) + query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders)) + query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, pSignValues.Credential.scope.region)) + + // Save other headers available in the request parameters. + for k, v := range req.URL.Query() { + + // Handle the metadata in presigned put query string + if strings.Contains(strings.ToLower(k), "x-amz-meta-") { + query.Set(k, v[0]) + } + + if strings.HasPrefix(strings.ToLower(k), "x-amz") { + continue + } + query[k] = v + } + + // Get the encoded query. + encodedQuery := query.Encode() + + // Verify if date query is same. + if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if expires query is same. + if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if signed headers query is same. + if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if credential query is same. + if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if sha256 payload query is same. + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") { + return nil, ErrContentSHA256Mismatch + } + } + + /// Verify finally if signature is same. + + // Get canonical request. + presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) + + // Get hmac presigned signing key. + presignedSigningKey := getSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region) + + // Get new signature. + newSignature := getSignature(presignedSigningKey, presignedStringToSign) + + // Verify signature. + if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) { + return nil, ErrSignatureDoesNotMatch + } + return identity, ErrNone +} + +func contains(list []string, elem string) bool { + for _, t := range list { + if t == elem { + return true + } + } + return false +} + +// preSignValues data type represents structued form of AWS Signature V4 query string. +type preSignValues struct { + signValues + Date time.Time + Expires time.Duration +} + +// Parses signature version '4' query string of the following form. +// +// querystring = X-Amz-Algorithm=algorithm +// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) +// querystring += &X-Amz-Date=date +// querystring += &X-Amz-Expires=timeout interval +// querystring += &X-Amz-SignedHeaders=signed_headers +// querystring += &X-Amz-Signature=signature +// +// verifies if any of the necessary query params are missing in the presigned request. +func doesV4PresignParamsExist(query url.Values) ErrorCode { + v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"} + for _, v4PresignQueryParam := range v4PresignQueryParams { + if _, ok := query[v4PresignQueryParam]; !ok { + return ErrInvalidQueryParams + } + } + return ErrNone +} + +// Parses all the presigned signature values into separate elements. +func parsePreSignV4(query url.Values) (psv preSignValues, aec ErrorCode) { + var err ErrorCode + // verify whether the required query params exist. + err = doesV4PresignParamsExist(query) + if err != ErrNone { + return psv, err + } + + // Verify if the query algorithm is supported or not. + if query.Get("X-Amz-Algorithm") != signV4Algorithm { + return psv, ErrInvalidQuerySignatureAlgo + } + + // Initialize signature version '4' structured header. + preSignV4Values := preSignValues{} + + // Save credential. + preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential")) + if err != ErrNone { + return psv, err + } + + var e error + // Save date in native time.Time. + preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date")) + if e != nil { + return psv, ErrMalformedPresignedDate + } + + // Save expires in native time.Duration. + preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s") + if e != nil { + return psv, ErrMalformedExpires + } + + if preSignV4Values.Expires < 0 { + return psv, ErrNegativeExpires + } + + // Check if Expiry time is less than 7 days (value in seconds). + if preSignV4Values.Expires.Seconds() > 604800 { + return psv, ErrMaximumExpires + } + + // Save signed headers. + preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders")) + if err != ErrNone { + return psv, err + } + + // Save signature. + preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature")) + if err != ErrNone { + return psv, err + } + + // Return structed form of signature query string. + return preSignV4Values, ErrNone +} + +// extractSignedHeaders extract signed headers from Authorization header +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, ErrorCode) { + reqHeaders := r.Header + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, ErrUnsignedHeaders + } + extractedSignedHeaders := make(http.Header) + for _, header := range signedHeaders { + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if ok { + for _, enc := range val { + extractedSignedHeaders.Add(header, enc) + } + continue + } + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + for _, enc := range r.TransferEncoding { + extractedSignedHeaders.Add(header, enc) + } + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, ErrUnsignedHeaders + } + } + return extractedSignedHeaders, ErrNone +} + +// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func getSignedHeaders(signedHeaders http.Header) string { + var headers []string + for k := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + } + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getScope generate a string of a specific date, an AWS region, and a service. +func getScope(t time.Time, region string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + return scope +} + +// getCanonicalRequest generate a canonical request of style +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +// +func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { + rawQuery := strings.Replace(queryStr, "+", "%20", -1) + encodedPath := encodePath(urlPath) + canonicalRequest := strings.Join([]string{ + method, + encodedPath, + rawQuery, + getCanonicalHeaders(extractedSignedHeaders), + getSignedHeaders(extractedSignedHeaders), + payload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSign(canonicalRequest string, t time.Time, scope string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" + stringToSign = stringToSign + scope + "\n" + canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) + stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) + return stringToSign +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secretKey string, t time.Time, region string) []byte { + date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd))) + regionBytes := sumHMAC(date, []byte(region)) + service := sumHMAC(regionBytes, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getCanonicalHeaders generate a list of request headers with their values +func getCanonicalHeaders(signedHeaders http.Header) string { + var headers []string + vals := make(http.Header) + for k, vv := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + sort.Strings(headers) + + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + return buf.String() +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func encodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// compareSignatureV4 returns true if and only if both signatures +// are equal. The signatures are expected to be HEX encoded strings +// according to the AWS S3 signature V4 spec. +func compareSignatureV4(sig1, sig2 string) bool { + // The CTC using []byte(str) works because the hex encoding + // is unique for a sequence of bytes. See also compareSignatureV2. + return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 +} diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go new file mode 100644 index 000000000..7073814a2 --- /dev/null +++ b/weed/s3api/auto_signature_v4_test.go @@ -0,0 +1,419 @@ +package s3api + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "testing" + "time" + "unicode/utf8" +) + +// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection. +func TestIsRequestPresignedSignatureV4(t *testing.T) { + testCases := []struct { + inputQueryKey string + inputQueryValue string + expectedResult bool + }{ + // Test case - 1. + // Test case with query key ""X-Amz-Credential" set. + {"", "", false}, + // Test case - 2. + {"X-Amz-Credential", "", true}, + // Test case - 3. + {"X-Amz-Content-Sha256", "", false}, + } + + for i, testCase := range testCases { + // creating an input HTTP request. + // Only the query parameters are relevant for this particular test. + inputReq, err := http.NewRequest("GET", "http://example.com", nil) + if err != nil { + t.Fatalf("Error initializing input HTTP request: %v", err) + } + q := inputReq.URL.Query() + q.Add(testCase.inputQueryKey, testCase.inputQueryValue) + inputReq.URL.RawQuery = q.Encode() + + actualResult := isRequestPresignedSignatureV4(inputReq) + if testCase.expectedResult != actualResult { + t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult) + } + } +} + + +// Tests is requested authenticated function, tests replies for s3 errors. +func TestIsReqAuthenticated(t *testing.T) { + iam := NewIdentityAccessManagement("") + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", + }, + }, + Actions: nil, + }, + } + + // List of test cases for validating http request authentication. + testCases := []struct { + req *http.Request + s3Error ErrorCode + }{ + // When request is unsigned, access denied is returned. + {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied}, + // When request is properly signed, error is none. + {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone}, + } + + // Validates all testcases. + for i, testCase := range testCases { + if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error { + ioutil.ReadAll(testCase.req.Body) + t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error) + } + } +} + +func TestCheckAdminRequestAuthType(t *testing.T) { + iam := NewIdentityAccessManagement("") + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", + }, + }, + Actions: nil, + }, + } + + testCases := []struct { + Request *http.Request + ErrCode ErrorCode + }{ + {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied}, + {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone}, + {Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone}, + } + for i, testCase := range testCases { + if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode { + t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error) + } + } +} + +// Provides a fully populated http request instance, fails otherwise. +func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req, err := newTestRequest(method, urlStr, contentLength, body) + if err != nil { + t.Fatalf("Unable to initialize new http request %s", err) + } + return req +} + +// This is similar to mustNewRequest but additionally the request +// is signed with AWS Signature V4, fails if not able to do so. +func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req := mustNewRequest(method, urlStr, contentLength, body, t) + cred := &Credential{"access_key_1", "secret_key_1"} + if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// This is similar to mustNewRequest but additionally the request +// is presigned with AWS Signature V4, fails if not able to do so. +func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req := mustNewRequest(method, urlStr, contentLength, body, t) + cred := &Credential{"access_key_1", "secret_key_1"} + if err := preSignV4(req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// Returns new HTTP request object. +func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) { + if method == "" { + method = "POST" + } + + // Save for subsequent use + var hashedPayload string + var md5Base64 string + switch { + case body == nil: + hashedPayload = getSHA256Hash([]byte{}) + default: + payloadBytes, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + hashedPayload = getSHA256Hash(payloadBytes) + md5Base64 = getMD5HashBase64(payloadBytes) + } + // Seek back to beginning. + if body != nil { + body.Seek(0, 0) + } else { + body = bytes.NewReader([]byte("")) + } + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + if md5Base64 != "" { + req.Header.Set("Content-Md5", md5Base64) + } + req.Header.Set("x-amz-content-sha256", hashedPayload) + + // Add Content-Length + req.ContentLength = contentLength + + return req, nil +} + +// getSHA256Hash returns SHA-256 hash in hex encoding of given data. +func getSHA256Hash(data []byte) string { + return hex.EncodeToString(getSHA256Sum(data)) +} + +// getMD5HashBase64 returns MD5 hash in base64 encoding of given data. +func getMD5HashBase64(data []byte) string { + return base64.StdEncoding.EncodeToString(getMD5Sum(data)) +} + +// getSHA256Hash returns SHA-256 sum of given data. +func getSHA256Sum(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Sum returns MD5 sum of given data. +func getMD5Sum(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Hash returns MD5 hash in hex encoding of given data. +func getMD5Hash(data []byte) string { + return hex.EncodeToString(getMD5Sum(data)) +} + +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// Sign given request using Signature V4. +func signRequestV4(req *http.Request, accessKey, secretKey string) error { + // Get hashed payload. + hashedPayload := req.Header.Get("x-amz-content-sha256") + if hashedPayload == "" { + return fmt.Errorf("Invalid hashed payload") + } + + currTime := time.Now() + + // Set x-amz-date. + req.Header.Set("x-amz-date", currTime.Format(iso8601Format)) + + // Get header map. + headerMap := make(map[string][]string) + for k, vv := range req.Header { + // If request header key is not in ignored headers, then add it. + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok { + headerMap[strings.ToLower(k)] = vv + } + } + + // Get header keys. + headers := []string{"host"} + for k := range headerMap { + headers = append(headers, k) + } + sort.Strings(headers) + + region := "us-east-1" + + // Get canonical headers. + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(req.URL.Host) + fallthrough + default: + for idx, v := range headerMap[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + canonicalHeaders := buf.String() + + // Get signed headers. + signedHeaders := strings.Join(headers, ";") + + // Get canonical query string. + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + + // Get canonical URI. + canonicalURI := EncodePath(req.URL.Path) + + // Get canonical request. + // canonicalRequest = + // \n + // \n + // \n + // \n + // \n + // + // + canonicalRequest := strings.Join([]string{ + req.Method, + canonicalURI, + req.URL.RawQuery, + canonicalHeaders, + signedHeaders, + hashedPayload, + }, "\n") + + // Get scope. + scope := strings.Join([]string{ + currTime.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + + stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" + stringToSign = stringToSign + scope + "\n" + stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) + + date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) + regionHMAC := sumHMAC(date, []byte(region)) + service := sumHMAC(regionHMAC, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + + signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) + + // final Authorization header + parts := []string{ + "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return nil +} + +// preSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return errors.New("Presign cannot be generated without access and secret keys") + } + + region := "us-east-1" + date := time.Now().UTC() + scope := getScope(date, region) + credential := fmt.Sprintf("%s/%s", accessKeyID, scope) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", date.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", "host") + query.Set("X-Amz-Credential", credential) + query.Set("X-Amz-Content-Sha256", unsignedPayload) + + // "host" is the only header required to be signed for Presigned URLs. + extractedSignedHeaders := make(http.Header) + extractedSignedHeaders.Set("host", req.Host) + + queryStr := strings.Replace(query.Encode(), "+", "%20", -1) + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) + stringToSign := getStringToSign(canonicalRequest, date, scope) + signingKey := getSigningKey(secretAccessKey, date, region) + signature := getSignature(signingKey, stringToSign) + + req.URL.RawQuery = query.Encode() + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + url.QueryEscape(signature) + + // Construct the final presigned URL. + return nil +} + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go index 061fd4a92..ce9dad90c 100644 --- a/weed/s3api/chunked_reader_v4.go +++ b/weed/s3api/chunked_reader_v4.go @@ -27,11 +27,6 @@ import ( "net/http" ) -// Streaming AWS Signature Version '4' constants. -const ( - streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" -) - const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB // lineTooLong is generated as chunk header is bigger than 4KiB. diff --git a/weed/s3api/s3api_auth.go b/weed/s3api/s3api_auth.go index b680fe1e1..43afbaae5 100644 --- a/weed/s3api/s3api_auth.go +++ b/weed/s3api/s3api_auth.go @@ -9,6 +9,8 @@ import ( const ( signV4Algorithm = "AWS4-HMAC-SHA256" signV2Algorithm = "AWS" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" ) // Verify if request has JWT. @@ -23,8 +25,8 @@ func isRequestSignatureV4(r *http.Request) bool { // Verify if request has AWS Signature Version '2'. func isRequestSignatureV2(r *http.Request) bool { - return (!strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && - strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm)) + return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && + strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) } // Verify if request has AWS PreSign Version '4'. diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go index 96f8d9fd6..e4f551ac7 100644 --- a/weed/s3api/s3api_errors.go +++ b/weed/s3api/s3api_errors.go @@ -27,6 +27,7 @@ type ErrorCode int // Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html const ( ErrNone ErrorCode = iota + ErrAccessDenied ErrMethodNotAllowed ErrBucketNotEmpty ErrBucketAlreadyExists @@ -43,12 +44,39 @@ const ( ErrInternalError ErrInvalidCopyDest ErrInvalidCopySource + ErrAuthHeaderEmpty + ErrSignatureVersionNotSupported + ErrMissingFields + ErrMissingCredTag + ErrCredMalformed + ErrMalformedDate + ErrMalformedPresignedDate + ErrMalformedCredentialDate + ErrMissingSignHeadersTag + ErrMissingSignTag + ErrUnsignedHeaders + ErrInvalidQueryParams + ErrInvalidQuerySignatureAlgo + ErrExpiredPresignRequest + ErrMalformedExpires + ErrNegativeExpires + ErrMaximumExpires + ErrSignatureDoesNotMatch + ErrContentSHA256Mismatch + ErrInvalidAccessKeyID + ErrRequestNotReadyYet + ErrMissingDateHeader ErrNotImplemented ) // error code to APIError structure, these fields carry respective // descriptions for all the error responses. var errorCodeResponse = map[ErrorCode]APIError{ + ErrAccessDenied: { + Code: "AccessDenied", + Description: "Access Denied.", + HTTPStatusCode: http.StatusForbidden, + }, ErrMethodNotAllowed: { Code: "MethodNotAllowed", Description: "The specified method is not allowed against this resource.", @@ -132,6 +160,117 @@ var errorCodeResponse = map[ErrorCode]APIError{ HTTPStatusCode: http.StatusBadRequest, }, + ErrAuthHeaderEmpty: { + Code: "InvalidArgument", + Description: "Authorization header is invalid -- one and only one ' ' (space) required.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSignatureVersionNotSupported: { + Code: "InvalidRequest", + Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingFields: { + Code: "MissingFields", + Description: "Missing fields in request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingCredTag: { + Code: "InvalidRequest", + Description: "Missing Credential field for this request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrCredMalformed: { + Code: "AuthorizationQueryParametersError", + Description: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedDate: { + Code: "MalformedDate", + Description: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedPresignedDate: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignHeadersTag: { + Code: "InvalidArgument", + Description: "Signature header missing SignedHeaders field.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignTag: { + Code: "AccessDenied", + Description: "Signature header missing Signature field.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrUnsignedHeaders: { + Code: "AccessDenied", + Description: "There were headers present in the request which were not signed", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQueryParams: { + Code: "AuthorizationQueryParametersError", + Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQuerySignatureAlgo: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Algorithm only supports \"AWS4-HMAC-SHA256\".", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrExpiredPresignRequest: { + Code: "AccessDenied", + Description: "Request has expired", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMalformedExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires should be a number", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNegativeExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires must be non-negative", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMaximumExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrInvalidAccessKeyID: { + Code: "InvalidAccessKeyId", + Description: "The access key ID you provided does not exist in our records.", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrRequestNotReadyYet: { + Code: "AccessDenied", + Description: "Request is not valid yet", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrSignatureDoesNotMatch: { + Code: "SignatureDoesNotMatch", + Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrContentSHA256Mismatch: { + Code: "XAmzContentSHA256Mismatch", + Description: "The provided 'x-amz-content-sha256' header does not match what was computed.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingDateHeader: { + Code: "AccessDenied", + Description: "AWS authentication requires a valid Date or x-amz-date header", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNotImplemented: { Code: "NotImplemented", Description: "A header you provided implies functionality that is not implemented", diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index 2233c8384..34e527960 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -1,14 +1,16 @@ package s3api import ( + "net/http" + "github.com/gorilla/mux" "google.golang.org/grpc" - "net/http" ) type S3ApiServerOption struct { Filer string FilerGrpcAddress string + Config string DomainName string BucketsPath string GrpcDialOption grpc.DialOption @@ -16,11 +18,13 @@ type S3ApiServerOption struct { type S3ApiServer struct { option *S3ApiServerOption + iam *IdentityAccessManagement } func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) { s3ApiServer = &S3ApiServer{ option: option, + iam: NewIdentityAccessManagement(option.Config), } s3ApiServer.registerRouter(router) @@ -40,46 +44,46 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { for _, bucket := range routers { // HeadObject - bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.HeadObjectHandler) + bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ)) // HeadBucket - bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler) + bucket.Methods("HEAD").HandlerFunc(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN)) // CopyObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // PutObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // CompleteMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}") // NewMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.NewMultipartUploadHandler).Queries("uploads", "") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE)).Queries("uploads", "") // AbortMultipartUpload - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}") // ListObjectParts - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}") // ListMultipartUploads - bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "") + bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_WRITE)).Queries("uploads", "") // CopyObject - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler) + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE)) // PutObject - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler) + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE)) // PutBucket - bucket.Methods("PUT").HandlerFunc(s3a.PutBucketHandler) + bucket.Methods("PUT").HandlerFunc(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN)) // DeleteObject - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.DeleteObjectHandler) + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE)) // DeleteBucket - bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketHandler) + bucket.Methods("DELETE").HandlerFunc(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE)) // ListObjectsV2 - bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV2Handler).Queries("list-type", "2") + bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_READ)).Queries("list-type", "2") // GetObject, but directory listing is not supported - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectHandler) + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ)) // ListObjectsV1 (Legacy) - bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV1Handler) + bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_READ)) // DeleteMultipleObjects - bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "") + bucket.Methods("POST").HandlerFunc(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)).Queries("delete", "") /* // not implemented @@ -102,7 +106,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { } // ListBuckets - apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.ListBucketsHandler) + apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN)) // NotFound apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler) From e7b63d50b16e435dab31eda172896976c30d252c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 9 Feb 2020 14:31:51 -0800 Subject: [PATCH 0108/2432] add s3.config to server.go --- weed/command/server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/command/server.go b/weed/command/server.go index 6aa68b6d2..d7d768df1 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -97,6 +97,7 @@ func init() { s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}") s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file") s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file") + s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file") } From b90ad6f452381f5064c37b639588fb46377a7b15 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 9 Feb 2020 16:02:05 -0800 Subject: [PATCH 0109/2432] add v2 support --- weed/command/s3.go | 10 +- weed/s3api/auth_credentials.go | 26 +- weed/s3api/auth_signature_v2.go | 412 +++++++++++++++++++++++++++ weed/s3api/auto_signature_v4_test.go | 4 +- weed/s3api/s3api_errors.go | 7 +- weed/s3api/s3api_server.go | 2 +- 6 files changed, 445 insertions(+), 16 deletions(-) create mode 100644 weed/s3api/auth_signature_v2.go diff --git a/weed/command/s3.go b/weed/command/s3.go index 4538f2135..4dc4b82f6 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -54,7 +54,7 @@ var cmdS3 = &Command{ "credentials": [ { "accessKey": "some_access_key1", - "secretKey": "some_secret_key2" + "secretKey": "some_secret_key1" } ], "actions": [ @@ -67,8 +67,8 @@ var cmdS3 = &Command{ "name": "some_read_only_user", "credentials": [ { - "accessKey": "some_access_key1", - "secretKey": "some_secret_key1" + "accessKey": "some_access_key2", + "secretKey": "some_secret_key2" } ], "actions": [ @@ -79,8 +79,8 @@ var cmdS3 = &Command{ "name": "some_normal_user", "credentials": [ { - "accessKey": "some_access_key2", - "secretKey": "some_secret_key2" + "accessKey": "some_access_key3", + "secretKey": "some_secret_key3" } ], "actions": [ diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go index 9df1fdd29..e5d693951 100644 --- a/weed/s3api/auth_credentials.go +++ b/weed/s3api/auth_credentials.go @@ -26,6 +26,7 @@ type Iam interface { type IdentityAccessManagement struct { identities []*Identity + domain string } type Identity struct { @@ -39,8 +40,10 @@ type Credential struct { SecretKey string } -func NewIdentityAccessManagement(fileName string) *IdentityAccessManagement { - iam := &IdentityAccessManagement{} +func NewIdentityAccessManagement(fileName string, domain string) *IdentityAccessManagement { + iam := &IdentityAccessManagement{ + domain: domain, + } if fileName == "" { return iam } @@ -119,17 +122,26 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, actions []Acti var identity *Identity var s3Err ErrorCode switch getRequestAuthType(r) { - case authTypeUnknown, authTypeStreamingSigned: + case authTypeStreamingSigned: + return ErrNone + case authTypeUnknown: + glog.V(3).Infof("unknown auth type") return ErrAccessDenied case authTypePresignedV2, authTypeSignedV2: - return ErrNotImplemented + glog.V(3).Infof("v2 auth type") + identity, s3Err = iam.isReqAuthenticatedV2(r) case authTypeSigned, authTypePresigned: + glog.V(3).Infof("v4 auth type") identity, s3Err = iam.reqSignatureV4Verify(r) - if s3Err != ErrNone { - return s3Err - } } + glog.V(3).Infof("auth error: %v", s3Err) + if s3Err != ErrNone { + return s3Err + } + + glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions) + if !identity.canDo(actions) { return ErrAccessDenied } diff --git a/weed/s3api/auth_signature_v2.go b/weed/s3api/auth_signature_v2.go new file mode 100644 index 000000000..151a9ec26 --- /dev/null +++ b/weed/s3api/auth_signature_v2.go @@ -0,0 +1,412 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3api + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/url" + "path" + "sort" + "strconv" + "strings" + "time" +) + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// The list should be alphabetically sorted +var resourceList = []string{ + "acl", + "delete", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// Verify if request has valid AWS Signature Version '2'. +func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, ErrorCode) { + if isRequestSignatureV2(r) { + return iam.doesSignV2Match(r) + } + return iam.doesPresignV2SignatureMatch(r) +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// doesSignV2Match - Verify authorization header with calculated header in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html +// returns true if matches, false otherwise. if error is not nil then it is always false + +func validateV2AuthHeader(v2Auth string) (accessKey string, errCode ErrorCode) { + if v2Auth == "" { + return "", ErrAuthHeaderEmpty + } + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v2Auth, signV2Algorithm) { + return "", ErrSignatureVersionNotSupported + } + + // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string). + // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature + authFields := strings.Split(v2Auth, " ") + if len(authFields) != 2 { + return "", ErrMissingFields + } + + // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. + keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") + if len(keySignFields) != 2 { + return "", ErrMissingFields + } + + return keySignFields[0], ErrNone +} + +func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, ErrorCode) { + v2Auth := r.Header.Get("Authorization") + + accessKey, apiError := validateV2AuthHeader(v2Auth) + if apiError != ErrNone { + return nil, apiError + } + + // Access credentials. + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + unescapedQueries, err := unescapeQueries(encodedQuery) + if err != nil { + return nil, ErrInvalidQueryParams + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, ErrInvalidRequest + } + + prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) + if !strings.HasPrefix(v2Auth, prefix) { + return nil, ErrSignatureDoesNotMatch + } + v2Auth = v2Auth[len(prefix):] + expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) + if !compareSignatureV2(v2Auth, expectedAuth) { + return nil, ErrSignatureDoesNotMatch + } + return ident, ErrNone +} + +// doesPresignV2SignatureMatch - Verify query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth +// returns ErrNone if matches. S3 errors otherwise. +func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, ErrorCode) { + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + var ( + filteredQueries []string + gotSignature string + expires string + accessKey string + err error + ) + + var unescapedQueries []string + unescapedQueries, err = unescapeQueries(encodedQuery) + if err != nil { + return nil, ErrInvalidQueryParams + } + + // Extract the necessary values from presigned query, construct a list of new filtered queries. + for _, query := range unescapedQueries { + keyval := strings.SplitN(query, "=", 2) + if len(keyval) != 2 { + return nil, ErrInvalidQueryParams + } + switch keyval[0] { + case "AWSAccessKeyId": + accessKey = keyval[1] + case "Signature": + gotSignature = keyval[1] + case "Expires": + expires = keyval[1] + default: + filteredQueries = append(filteredQueries, query) + } + } + + // Invalid values returns error. + if accessKey == "" || gotSignature == "" || expires == "" { + return nil, ErrInvalidQueryParams + } + + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // Make sure the request has not expired. + expiresInt, err := strconv.ParseInt(expires, 10, 64) + if err != nil { + return nil, ErrMalformedExpires + } + + // Check if the presigned URL has expired. + if expiresInt < time.Now().UTC().Unix() { + return nil, ErrExpiredPresignRequest + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, ErrInvalidRequest + } + + expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) + if !compareSignatureV2(gotSignature, expectedSignature) { + return nil, ErrSignatureDoesNotMatch + } + + return ident, ErrNone +} + +// Escape encodedQuery string into unescaped list of query params, returns error +// if any while unescaping the values. +func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { + for _, query := range strings.Split(encodedQuery, "&") { + var unescapedQuery string + unescapedQuery, err = url.QueryUnescape(query) + if err != nil { + return nil, err + } + unescapedQueries = append(unescapedQueries, unescapedQuery) + } + return unescapedQueries, nil +} + +// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. +func getResource(path string, host string, domain string) (string, error) { + if domain == "" { + return path, nil + } + // If virtual-host-style is enabled construct the "resource" properly. + if strings.Contains(host, ":") { + // In bucket.mydomain.com:9000, strip out :9000 + var err error + if host, _, err = net.SplitHostPort(host); err != nil { + return "", err + } + } + if !strings.HasSuffix(host, "."+domain) { + return path, nil + } + bucket := strings.TrimSuffix(host, "."+domain) + return "/" + pathJoin(bucket, path), nil +} + +// pathJoin - like path.Join() but retains trailing "/" of the last element +func pathJoin(elem ...string) string { + trailingSlash := "" + if len(elem) > 0 { + if strings.HasSuffix(elem[len(elem)-1], "/") { + trailingSlash = "/" + } + } + return path.Join(elem...) + trailingSlash +} + +// Return the signature v2 of a given request. +func signatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "") + signature := calculateSignatureV2(stringToSign, cred.SecretKey) + return signature +} + +// Return string to sign under two different conditions. +// - if expires string is set then string to sign includes date instead of the Date header. +// - if expires string is empty then string to sign includes date header instead. +func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string { + canonicalHeaders := canonicalizedAmzHeadersV2(headers) + if len(canonicalHeaders) > 0 { + canonicalHeaders += "\n" + } + + date := expires // Date is set to expires date for presign operations. + if date == "" { + // If expires date is empty then request header Date is used. + date = headers.Get("Date") + } + + // From the Amazon docs: + // + // StringToSign = HTTP-Verb + "\n" + + // Content-Md5 + "\n" + + // Content-Type + "\n" + + // Date/Expires + "\n" + + // CanonicalizedProtocolHeaders + + // CanonicalizedResource; + stringToSign := strings.Join([]string{ + method, + headers.Get("Content-MD5"), + headers.Get("Content-Type"), + date, + canonicalHeaders, + }, "\n") + + return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery) +} + +// Return canonical resource string. +func canonicalizedResourceV2(encodedResource, encodedQuery string) string { + queries := strings.Split(encodedQuery, "&") + keyval := make(map[string]string) + for _, query := range queries { + key := query + val := "" + index := strings.Index(query, "=") + if index != -1 { + key = query[:index] + val = query[index+1:] + } + keyval[key] = val + } + + var canonicalQueries []string + for _, key := range resourceList { + val, ok := keyval[key] + if !ok { + continue + } + if val == "" { + canonicalQueries = append(canonicalQueries, key) + continue + } + canonicalQueries = append(canonicalQueries, key+"="+val) + } + + // The queries will be already sorted as resourceList is sorted, if canonicalQueries + // is empty strings.Join returns empty. + canonicalQuery := strings.Join(canonicalQueries, "&") + if canonicalQuery != "" { + return encodedResource + "?" + canonicalQuery + } + return encodedResource +} + +// Return canonical headers. +func canonicalizedAmzHeadersV2(headers http.Header) string { + var keys []string + keyval := make(map[string]string) + for key := range headers { + lkey := strings.ToLower(key) + if !strings.HasPrefix(lkey, "x-amz-") { + continue + } + keys = append(keys, lkey) + keyval[lkey] = strings.Join(headers[key], ",") + } + sort.Strings(keys) + var canonicalHeaders []string + for _, key := range keys { + canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key]) + } + return strings.Join(canonicalHeaders, "\n") +} + +func calculateSignatureV2(stringToSign string, secret string) string { + hm := hmac.New(sha1.New, []byte(secret)) + hm.Write([]byte(stringToSign)) + return base64.StdEncoding.EncodeToString(hm.Sum(nil)) +} + +// compareSignatureV2 returns true if and only if both signatures +// are equal. The signatures are expected to be base64 encoded strings +// according to the AWS S3 signature V2 spec. +func compareSignatureV2(sig1, sig2 string) bool { + // Decode signature string to binary byte-sequence representation is required + // as Base64 encoding of a value is not unique: + // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice. + signature1, err := base64.StdEncoding.DecodeString(sig1) + if err != nil { + return false + } + signature2, err := base64.StdEncoding.DecodeString(sig2) + if err != nil { + return false + } + return subtle.ConstantTimeCompare(signature1, signature2) == 1 +} + +// Return signature-v2 for the presigned request. +func preSignatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires) + return calculateSignatureV2(stringToSign, cred.SecretKey) +} diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go index 7073814a2..0502d105c 100644 --- a/weed/s3api/auto_signature_v4_test.go +++ b/weed/s3api/auto_signature_v4_test.go @@ -57,7 +57,7 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) { // Tests is requested authenticated function, tests replies for s3 errors. func TestIsReqAuthenticated(t *testing.T) { - iam := NewIdentityAccessManagement("") + iam := NewIdentityAccessManagement("", "") iam.identities = []*Identity{ { Name: "someone", @@ -92,7 +92,7 @@ func TestIsReqAuthenticated(t *testing.T) { } func TestCheckAdminRequestAuthType(t *testing.T) { - iam := NewIdentityAccessManagement("") + iam := NewIdentityAccessManagement("", "") iam.identities = []*Identity{ { Name: "someone", diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go index e4f551ac7..e0f89c2b0 100644 --- a/weed/s3api/s3api_errors.go +++ b/weed/s3api/s3api_errors.go @@ -66,6 +66,7 @@ const ( ErrInvalidAccessKeyID ErrRequestNotReadyYet ErrMissingDateHeader + ErrInvalidRequest ErrNotImplemented ) @@ -270,7 +271,11 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "AWS authentication requires a valid Date or x-amz-date header", HTTPStatusCode: http.StatusBadRequest, }, - + ErrInvalidRequest: { + Code: "InvalidRequest", + Description: "Invalid Request", + HTTPStatusCode: http.StatusBadRequest, + }, ErrNotImplemented: { Code: "NotImplemented", Description: "A header you provided implies functionality that is not implemented", diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index 34e527960..773094a5f 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -24,7 +24,7 @@ type S3ApiServer struct { func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) { s3ApiServer = &S3ApiServer{ option: option, - iam: NewIdentityAccessManagement(option.Config), + iam: NewIdentityAccessManagement(option.Config, option.DomainName), } s3ApiServer.registerRouter(router) From f3ce3166ad08b08b916efaf4833f4ff1e140325b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 9 Feb 2020 17:42:17 -0800 Subject: [PATCH 0110/2432] add streaming v4 --- weed/s3api/auth_signature_v4.go | 2 + weed/s3api/chunked_reader_v4.go | 155 ++++++++++++++++-- weed/s3api/s3api_object_handlers.go | 7 +- weed/s3api/s3api_object_multipart_handlers.go | 14 +- 4 files changed, 162 insertions(+), 16 deletions(-) diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go index 6da316abc..3bc5f6457 100644 --- a/weed/s3api/auth_signature_v4.go +++ b/weed/s3api/auth_signature_v4.go @@ -48,6 +48,8 @@ func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Ide const ( emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" + streamingContentEncoding = "aws-chunked" // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the // client did not calculate sha256 of the payload. diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go index ce9dad90c..76c4394c2 100644 --- a/weed/s3api/chunked_reader_v4.go +++ b/weed/s3api/chunked_reader_v4.go @@ -21,12 +21,115 @@ package s3api import ( "bufio" "bytes" + "crypto/sha256" + "encoding/hex" "errors" - "github.com/dustin/go-humanize" + "hash" "io" "net/http" + "time" + + "github.com/dustin/go-humanize" ) +// getChunkSignature - get chunk signature. +func getChunkSignature(secretKey string, seedSignature string, region string, date time.Time, hashedChunk string) string { + + // Calculate string to sign. + stringToSign := signV4ChunkedAlgorithm + "\n" + + date.Format(iso8601Format) + "\n" + + getScope(date, region) + "\n" + + seedSignature + "\n" + + emptySHA256 + "\n" + + hashedChunk + + // Get hmac signing key. + signingKey := getSigningKey(secretKey, date, region) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + return newSignature +} + +// calculateSeedSignature - Calculate seed signature in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html +// returns signature, error otherwise if the signature mismatches or any other +// error while parsing and validating. +func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, errCode := parseSignV4(v4Auth) + if errCode != ErrNone { + return nil, "", "", time.Time{}, errCode + } + + // Payload streaming. + payload := streamingContentSHA256 + + // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' + if payload != req.Header.Get("X-Amz-Content-Sha256") { + return nil, "", "", time.Time{}, ErrContentSHA256Mismatch + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != ErrNone { + return nil, "", "", time.Time{}, errCode + } + // Verify if the access key id matches. + _, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, "", "", time.Time{}, ErrInvalidAccessKeyID + } + + // Verify if region is valid. + region = signV4Values.Credential.scope.region + + // Extract date, if not present throw error. + var dateStr string + if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" { + if dateStr = r.Header.Get("Date"); dateStr == "" { + return nil, "", "", time.Time{}, ErrMissingDateHeader + } + } + // Parse date header. + var err error + date, err = time.Parse(iso8601Format, dateStr) + if err != nil { + return nil, "", "", time.Time{}, ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, "", "", time.Time{}, ErrSignatureDoesNotMatch + } + + // Return caculated signature. + return cred, newSignature, region, date, ErrNone +} + const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB // lineTooLong is generated as chunk header is bigger than 4KiB. @@ -38,22 +141,36 @@ var errMalformedEncoding = errors.New("malformed chunked encoding") // newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. // The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. -func newSignV4ChunkedReader(req *http.Request) io.ReadCloser { - return &s3ChunkedReader{ - reader: bufio.NewReader(req.Body), - state: readChunkHeader, +func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, ErrorCode) { + ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req) + if errCode != ErrNone { + return nil, errCode } + return &s3ChunkedReader{ + cred: ident, + reader: bufio.NewReader(req.Body), + seedSignature: seedSignature, + seedDate: seedDate, + region: region, + chunkSHA256Writer: sha256.New(), + state: readChunkHeader, + }, ErrNone } // Represents the overall state that is required for decoding a // AWS Signature V4 chunked reader. type s3ChunkedReader struct { - reader *bufio.Reader - state chunkState - lastChunk bool - chunkSignature string - n uint64 // Unread bytes in chunk - err error + cred *Credential + reader *bufio.Reader + seedSignature string + seedDate time.Time + region string + state chunkState + lastChunk bool + chunkSignature string + chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. + n uint64 // Unread bytes in chunk + err error } // Read chunk reads the chunk token signature portion. @@ -152,6 +269,9 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { return 0, cr.err } + // Calculate sha256. + cr.chunkSHA256Writer.Write(rbuf[:n0]) + // Update the bytes read into request buffer so far. n += n0 buf = buf[n0:] @@ -164,6 +284,19 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { continue } case verifyChunk: + // Calculate the hashed chunk. + hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil)) + // Calculate the chunk signature. + newSignature := getChunkSignature(cr.cred.SecretKey, cr.seedSignature, cr.region, cr.seedDate, hashedChunk) + if !compareSignatureV4(cr.chunkSignature, newSignature) { + // Chunk signature doesn't match we return signature does not match. + cr.err = errors.New("chunk signature does not match") + return 0, cr.err + } + // Newly calculated signature becomes the seed for the next chunk + // this follows the chaining. + cr.seedSignature = newSignature + cr.chunkSHA256Writer.Reset() if cr.lastChunk { cr.state = eofChunk } else { diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 8dc733eb9..ccfcc0109 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -41,8 +41,13 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) rAuthType := getRequestAuthType(r) dataReader := r.Body + var s3ErrCode ErrorCode if rAuthType == authTypeStreamingSigned { - dataReader = newSignV4ChunkedReader(r) + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + } + if s3ErrCode != ErrNone { + writeErrorResponse(w, s3ErrCode, r.URL) + return } uploadUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 72a25e4a5..c59fccbfa 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -3,13 +3,14 @@ package s3api import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/gorilla/mux" "net/http" "net/url" "strconv" "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/gorilla/mux" ) const ( @@ -195,9 +196,14 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ return } + var s3ErrCode ErrorCode dataReader := r.Body if rAuthType == authTypeStreamingSigned { - dataReader = newSignV4ChunkedReader(r) + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + } + if s3ErrCode != ErrNone { + writeErrorResponse(w, s3ErrCode, r.URL) + return } uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", From b4abe3c0814f95a08dfb0b57ac0fa6dae71dde5c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 9 Feb 2020 18:02:17 -0800 Subject: [PATCH 0111/2432] unused --- weed/s3api/auth_signature_v4.go | 1 - 1 file changed, 1 deletion(-) diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go index 3bc5f6457..757016a55 100644 --- a/weed/s3api/auth_signature_v4.go +++ b/weed/s3api/auth_signature_v4.go @@ -49,7 +49,6 @@ const ( emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" - streamingContentEncoding = "aws-chunked" // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the // client did not calculate sha256 of the payload. From 29945fad51320deb7c72f57d1c7a84bcc51429da Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 9 Feb 2020 18:07:17 -0800 Subject: [PATCH 0112/2432] 1.54 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 3d61b2006..25b986653 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 53) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 54) ) From 58f126fd27bb2f366f76f42223b93ba3b31a0bd8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 10 Feb 2020 09:13:29 -0800 Subject: [PATCH 0113/2432] HEAD operation changes to fasthttp --- go.mod | 2 +- go.sum | 8 ++++++++ weed/operation/assign_file_id.go | 20 ++++++++++++++------ weed/util/http_util.go | 25 +++++++++++++++++-------- weed/util/http_util_test.go | 19 +++++++++++++++++++ 5 files changed, 59 insertions(+), 15 deletions(-) create mode 100644 weed/util/http_util_test.go diff --git a/go.mod b/go.mod index 48879fd8c..0bf76980e 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,6 @@ require ( github.com/jcmturner/gofork v1.0.0 // indirect github.com/karlseguin/ccache v2.0.3+incompatible github.com/karlseguin/expect v1.0.1 // indirect - github.com/klauspost/cpuid v1.2.1 // indirect github.com/klauspost/crc32 v1.2.0 github.com/klauspost/reedsolomon v1.9.2 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect @@ -76,6 +75,7 @@ require ( github.com/uber-go/atomic v1.4.0 // indirect github.com/uber/jaeger-client-go v2.17.0+incompatible // indirect github.com/uber/jaeger-lib v2.0.0+incompatible // indirect + github.com/valyala/fasthttp v1.9.0 github.com/willf/bitset v1.1.10 // indirect github.com/willf/bloom v2.0.3+incompatible github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect diff --git a/go.sum b/go.sum index d16280568..132119abd 100644 --- a/go.sum +++ b/go.sum @@ -282,6 +282,8 @@ github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHY github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2 h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -545,6 +547,11 @@ github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4 github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.9.0 h1:hNpmUdy/+ZXYpGy0OBfm7K0UQTzb73W0T0U4iJIVrMw= +github.com/valyala/fasthttp v1.9.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA= @@ -626,6 +633,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index b67d8b708..3f3bb13e0 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -1,13 +1,17 @@ package operation import ( + "bytes" "context" "fmt" + + "github.com/valyala/fasthttp" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "strings" ) type VolumeAssignRequest struct { @@ -89,12 +93,16 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum func LookupJwt(master string, fileId string) security.EncodedJwt { tokenStr := "" + lookupUrl := fmt.Sprintf("http://%s/dir/lookup?fileId=%s", master, fileId) - if h, e := util.Head(fmt.Sprintf("http://%s/dir/lookup?fileId=%s", master, fileId)); e == nil { - bearer := h.Get("Authorization") - if len(bearer) > 7 && strings.ToUpper(bearer[0:6]) == "BEARER" { - tokenStr = bearer[7:] + err := util.Head(lookupUrl, func(header fasthttp.ResponseHeader) { + bearer := header.Peek("Authorization") + if len(bearer) > 7 && string(bytes.ToUpper(bearer[0:6])) == "BEARER" { + tokenStr = string(bearer[7:]) } + }) + if err != nil { + glog.V(0).Infof("failed to lookup jwt %s: %v", lookupUrl, err) } return security.EncodedJwt(tokenStr) diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 08007a038..b74e30ad7 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -12,6 +12,8 @@ import ( "net/url" "strings" + "github.com/valyala/fasthttp" + "github.com/chrislusf/seaweedfs/weed/glog" ) @@ -83,16 +85,23 @@ func Get(url string) ([]byte, error) { return b, nil } -func Head(url string) (http.Header, error) { - r, err := client.Head(url) - if err != nil { - return nil, err +func Head(url string, fn func(header fasthttp.ResponseHeader)) error { + req := fasthttp.AcquireRequest() + resp := fasthttp.AcquireResponse() + defer fasthttp.ReleaseRequest(req) // <- do not forget to release + defer fasthttp.ReleaseResponse(resp) // <- do not forget to release + + c := fasthttp.Client{} + req.SetRequestURI(url) + req.Header.SetMethod(fasthttp.MethodHead) + if err := c.Do(req, resp); err != nil { + return err } - defer r.Body.Close() - if r.StatusCode >= 400 { - return nil, fmt.Errorf("%s: %s", url, r.Status) + if resp.StatusCode() >= 400 { + return fmt.Errorf("%s: %d", url, resp.StatusCode()) } - return r.Header, nil + fn(resp.Header) + return nil } func Delete(url string, jwt string) error { diff --git a/weed/util/http_util_test.go b/weed/util/http_util_test.go new file mode 100644 index 000000000..a8a1172d2 --- /dev/null +++ b/weed/util/http_util_test.go @@ -0,0 +1,19 @@ +package util + +import ( + "testing" + + "github.com/valyala/fasthttp" +) + +func TestFasthttpClientHead(t *testing.T) { + err := Head("https://www.google.com", func(header fasthttp.ResponseHeader) { + header.VisitAll(func(key, value []byte) { + println(string(key) + ": " + string(value)) + }) + }) + if err != nil { + println(err.Error()) + } + +} From 33b3bd467cfa222a910d0298bf3e2a77428ce843 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 10 Feb 2020 13:43:53 -0800 Subject: [PATCH 0114/2432] Revert "HEAD operation changes to fasthttp" This reverts commit 58f126fd27bb2f366f76f42223b93ba3b31a0bd8. --- go.mod | 2 +- go.sum | 8 -------- weed/operation/assign_file_id.go | 20 ++++++-------------- weed/util/http_util.go | 25 ++++++++----------------- weed/util/http_util_test.go | 19 ------------------- 5 files changed, 15 insertions(+), 59 deletions(-) delete mode 100644 weed/util/http_util_test.go diff --git a/go.mod b/go.mod index 0bf76980e..48879fd8c 100644 --- a/go.mod +++ b/go.mod @@ -36,6 +36,7 @@ require ( github.com/jcmturner/gofork v1.0.0 // indirect github.com/karlseguin/ccache v2.0.3+incompatible github.com/karlseguin/expect v1.0.1 // indirect + github.com/klauspost/cpuid v1.2.1 // indirect github.com/klauspost/crc32 v1.2.0 github.com/klauspost/reedsolomon v1.9.2 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect @@ -75,7 +76,6 @@ require ( github.com/uber-go/atomic v1.4.0 // indirect github.com/uber/jaeger-client-go v2.17.0+incompatible // indirect github.com/uber/jaeger-lib v2.0.0+incompatible // indirect - github.com/valyala/fasthttp v1.9.0 github.com/willf/bitset v1.1.10 // indirect github.com/willf/bloom v2.0.3+incompatible github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect diff --git a/go.sum b/go.sum index 132119abd..d16280568 100644 --- a/go.sum +++ b/go.sum @@ -282,8 +282,6 @@ github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHY github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.8.2 h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -547,11 +545,6 @@ github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4 github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.9.0 h1:hNpmUdy/+ZXYpGy0OBfm7K0UQTzb73W0T0U4iJIVrMw= -github.com/valyala/fasthttp v1.9.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA= @@ -633,7 +626,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 3f3bb13e0..b67d8b708 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -1,17 +1,13 @@ package operation import ( - "bytes" "context" "fmt" - - "github.com/valyala/fasthttp" - "google.golang.org/grpc" - - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + "strings" ) type VolumeAssignRequest struct { @@ -93,16 +89,12 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum func LookupJwt(master string, fileId string) security.EncodedJwt { tokenStr := "" - lookupUrl := fmt.Sprintf("http://%s/dir/lookup?fileId=%s", master, fileId) - err := util.Head(lookupUrl, func(header fasthttp.ResponseHeader) { - bearer := header.Peek("Authorization") - if len(bearer) > 7 && string(bytes.ToUpper(bearer[0:6])) == "BEARER" { - tokenStr = string(bearer[7:]) + if h, e := util.Head(fmt.Sprintf("http://%s/dir/lookup?fileId=%s", master, fileId)); e == nil { + bearer := h.Get("Authorization") + if len(bearer) > 7 && strings.ToUpper(bearer[0:6]) == "BEARER" { + tokenStr = bearer[7:] } - }) - if err != nil { - glog.V(0).Infof("failed to lookup jwt %s: %v", lookupUrl, err) } return security.EncodedJwt(tokenStr) diff --git a/weed/util/http_util.go b/weed/util/http_util.go index b74e30ad7..08007a038 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -12,8 +12,6 @@ import ( "net/url" "strings" - "github.com/valyala/fasthttp" - "github.com/chrislusf/seaweedfs/weed/glog" ) @@ -85,23 +83,16 @@ func Get(url string) ([]byte, error) { return b, nil } -func Head(url string, fn func(header fasthttp.ResponseHeader)) error { - req := fasthttp.AcquireRequest() - resp := fasthttp.AcquireResponse() - defer fasthttp.ReleaseRequest(req) // <- do not forget to release - defer fasthttp.ReleaseResponse(resp) // <- do not forget to release - - c := fasthttp.Client{} - req.SetRequestURI(url) - req.Header.SetMethod(fasthttp.MethodHead) - if err := c.Do(req, resp); err != nil { - return err +func Head(url string) (http.Header, error) { + r, err := client.Head(url) + if err != nil { + return nil, err } - if resp.StatusCode() >= 400 { - return fmt.Errorf("%s: %d", url, resp.StatusCode()) + defer r.Body.Close() + if r.StatusCode >= 400 { + return nil, fmt.Errorf("%s: %s", url, r.Status) } - fn(resp.Header) - return nil + return r.Header, nil } func Delete(url string, jwt string) error { diff --git a/weed/util/http_util_test.go b/weed/util/http_util_test.go deleted file mode 100644 index a8a1172d2..000000000 --- a/weed/util/http_util_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package util - -import ( - "testing" - - "github.com/valyala/fasthttp" -) - -func TestFasthttpClientHead(t *testing.T) { - err := Head("https://www.google.com", func(header fasthttp.ResponseHeader) { - header.VisitAll(func(key, value []byte) { - println(string(key) + ": " + string(value)) - }) - }) - if err != nil { - println(err.Error()) - } - -} From 2a6db0fd436a62afdadbb03d55e8acd51d67791a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 11 Feb 2020 09:45:02 -0800 Subject: [PATCH 0115/2432] volume: return 204 for unchanged file uploads fix https://github.com/chrislusf/seaweedfs/issues/1196 --- weed/server/volume_server_handlers_write.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index cd35255e5..ac8fa4f42 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -52,9 +52,10 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { ret := operation.UploadResult{} _, isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, needle, r) - // http 304 status code does not allow body + // http 204 status code does not allow body if writeError == nil && isUnchanged { - w.WriteHeader(http.StatusNotModified) + setEtag(w, needle.Etag()) + w.WriteHeader(http.StatusNoContent) return } From 83836f4299ddc53eef79745602b9bc61ff52cf0d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 11 Feb 2020 22:54:10 -0800 Subject: [PATCH 0116/2432] filer: recursively batch delete file chunks fix https://github.com/chrislusf/seaweedfs/issues/1197 --- weed/filer2/filer_delete_entry.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go index 75a09e7ef..ab7119042 100644 --- a/weed/filer2/filer_delete_entry.go +++ b/weed/filer2/filer_delete_entry.go @@ -63,13 +63,13 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry var dirChunks []*filer_pb.FileChunk if sub.IsDirectory() { dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks) + chunks = append(chunks, dirChunks...) + } else { + chunks = append(chunks, sub.Chunks...) } if err != nil && !ignoreRecursiveError { return nil, err } - if shouldDeleteChunks { - chunks = append(chunks, dirChunks...) - } } if len(entries) < PaginationSize { @@ -79,7 +79,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry f.cacheDelDirectory(string(entry.FullPath)) - glog.V(3).Infof("deleting directory %v", entry.FullPath) + glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks) if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr) @@ -91,7 +91,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool) (err error) { - glog.V(3).Infof("deleting entry %v", entry.FullPath) + glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil { return fmt.Errorf("filer store delete: %v", storeDeletionErr) From 62c34454d84dd012ffbc150932cc3476efcf88ea Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 11 Feb 2020 23:00:31 -0800 Subject: [PATCH 0117/2432] 1.55 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 25b986653..646a7d96d 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 54) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 55) ) From 7b3764fd9e2308bddb3373420958ddc3d470f08c Mon Sep 17 00:00:00 2001 From: panyc16 Date: Thu, 13 Feb 2020 15:19:00 +0800 Subject: [PATCH 0118/2432] make loadExistingVolume() return bool --- weed/storage/disk_location.go | 51 ++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index a12a68cbc..f15303282 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -50,29 +50,39 @@ func parseCollectionVolumeId(base string) (collection string, vid needle.VolumeI return collection, vol, err } -func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) { +func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind NeedleMapType) bool { name := fileInfo.Name() if !fileInfo.IsDir() && strings.HasSuffix(name, ".idx") { vid, collection, err := l.volumeIdFromPath(fileInfo) - if err == nil { - l.volumesLock.RLock() - _, found := l.volumes[vid] - l.volumesLock.RUnlock() - if !found { - if v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0); e == nil { - l.volumesLock.Lock() - l.volumes[vid] = v - l.volumesLock.Unlock() - size, _, _ := v.FileStat() - glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", - l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), size, v.Ttl.String()) - // println("volume", vid, "last append at", v.lastAppendAtNs) - } else { - glog.V(0).Infof("new volume %s error %s", name, e) - } - } + if err != nil { + glog.Warningf("get volume id failed, %s, err : %s", name, err) + return false + } + + // void loading one volume more than once + l.volumesLock.RLock() + _, found := l.volumes[vid] + l.volumesLock.RUnlock() + if found { + glog.V(1).Infof("loaded volume, %v", vid) + return true + } + + v, e := NewVolume(l.Directory, collection, vid, needleMapKind, nil, nil, 0, 0) + if e != nil { + glog.V(0).Infof("new volume %s error %s", name, e) + return false } + + l.volumesLock.Lock() + l.volumes[vid] = v + l.volumesLock.Unlock() + size, _, _ := v.FileStat() + glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s", + l.Directory+"/"+name, v.ReplicaPlacement, v.Version(), size, v.Ttl.String()) + return true } + return false } func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, concurrency int) { @@ -93,7 +103,7 @@ func (l *DiskLocation) concurrentLoadingVolumes(needleMapKind NeedleMapType, con go func() { defer wg.Done() for dir := range task_queue { - l.loadExistingVolume(dir, needleMapKind) + _ = l.loadExistingVolume(dir, needleMapKind) } }() } @@ -172,8 +182,7 @@ func (l *DiskLocation) deleteVolumeById(vid needle.VolumeId) (e error) { func (l *DiskLocation) LoadVolume(vid needle.VolumeId, needleMapKind NeedleMapType) bool { if fileInfo, found := l.LocateVolume(vid); found { - l.loadExistingVolume(fileInfo, needleMapKind) - return true + return l.loadExistingVolume(fileInfo, needleMapKind) } return false } From 9b6296e77a66533664507c833209aefd5de56337 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 14 Feb 2020 00:37:32 -0800 Subject: [PATCH 0119/2432] volume: add grpc file read operation This is added more for performance benchmarking. --- weed/command/benchmark.go | 57 +- weed/pb/volume_server.proto | 20 + weed/pb/volume_server_pb/volume_server.pb.go | 714 ++++++++++++------- weed/server/volume_grpc_file.go | 130 ++++ 4 files changed, 647 insertions(+), 274 deletions(-) create mode 100644 weed/server/volume_grpc_file.go diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 382e7c850..9adcb6f33 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -19,6 +19,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" @@ -40,6 +41,7 @@ type BenchmarkOptions struct { maxCpu *int grpcDialOption grpc.DialOption masterClient *wdclient.MasterClient + grpcRead *bool } var ( @@ -64,6 +66,7 @@ func init() { b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type") b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") + b.grpcRead = cmdBenchmark.Flag.Bool("grpcRead", false, "use grpc API to read") sharedBytes = make([]byte, 1024) } @@ -278,23 +281,61 @@ func readFiles(fileIdLineChan chan string, s *stat) { fmt.Printf("reading file %s\n", fid) } start := time.Now() - url, err := b.masterClient.LookupFileId(fid) - if err != nil { - s.failed++ - println("!!!! ", fid, " location not found!!!!!") - continue + var bytesRead int + var err error + if *b.grpcRead { + volumeServer, err := b.masterClient.LookupVolumeServer(fid) + if err != nil { + s.failed++ + println("!!!! ", fid, " location not found!!!!!") + continue + } + bytesRead, err = grpcFileGet(volumeServer, fid, b.grpcDialOption) + } else { + url, err := b.masterClient.LookupFileId(fid) + if err != nil { + s.failed++ + println("!!!! ", fid, " location not found!!!!!") + continue + } + var bytes []byte + bytes, err = util.Get(url) + bytesRead = len(bytes) } - if bytesRead, err := util.Get(url); err == nil { + if err == nil { s.completed++ - s.transferred += int64(len(bytesRead)) + s.transferred += int64(bytesRead) readStats.addSample(time.Now().Sub(start)) } else { s.failed++ - fmt.Printf("Failed to read %s error:%v\n", url, err) + fmt.Printf("Failed to read %s error:%v\n", fid, err) } } } +func grpcFileGet(volumeServer, fid string, grpcDialOption grpc.DialOption) (bytesRead int, err error) { + err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + fileGetClient, err := client.FileGet(ctx, &volume_server_pb.FileGetRequest{FileId: fid}) + if err != nil { + return err + } + + for { + resp, respErr := fileGetClient.Recv() + if resp != nil { + bytesRead += len(resp.Data) + } + if respErr != nil { + if respErr == io.EOF { + return nil + } + return respErr + } + } + }) + return +} + func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) { file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 405d41e9c..284e00633 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -8,6 +8,10 @@ service VolumeServer { //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) { } + + rpc FileGet (FileGetRequest) returns (stream FileGetResponse) { + } + rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) { } rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (VacuumVolumeCompactResponse) { @@ -100,6 +104,22 @@ message DeleteResult { uint32 version = 5; } +message FileGetRequest { + string file_id = 1; + bool accept_gzip = 2; +} +message FileGetResponse { + bytes data = 1; + uint32 content_length = 2; + string content_type = 3; + uint64 last_modified = 4; + string filename = 5; + string etag = 6; + bool is_gzipped = 7; + map headers = 8; + int32 errorCode = 9; +} + message Empty { } diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 2a8f91bc5..ec196a1d9 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -12,6 +12,8 @@ It has these top-level messages: BatchDeleteRequest BatchDeleteResponse DeleteResult + FileGetRequest + FileGetResponse Empty VacuumVolumeCheckRequest VacuumVolumeCheckResponse @@ -180,13 +182,117 @@ func (m *DeleteResult) GetVersion() uint32 { return 0 } +type FileGetRequest struct { + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` + AcceptGzip bool `protobuf:"varint,2,opt,name=accept_gzip,json=acceptGzip" json:"accept_gzip,omitempty"` +} + +func (m *FileGetRequest) Reset() { *m = FileGetRequest{} } +func (m *FileGetRequest) String() string { return proto.CompactTextString(m) } +func (*FileGetRequest) ProtoMessage() {} +func (*FileGetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *FileGetRequest) GetFileId() string { + if m != nil { + return m.FileId + } + return "" +} + +func (m *FileGetRequest) GetAcceptGzip() bool { + if m != nil { + return m.AcceptGzip + } + return false +} + +type FileGetResponse struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + ContentLength uint32 `protobuf:"varint,2,opt,name=content_length,json=contentLength" json:"content_length,omitempty"` + ContentType string `protobuf:"bytes,3,opt,name=content_type,json=contentType" json:"content_type,omitempty"` + LastModified uint64 `protobuf:"varint,4,opt,name=last_modified,json=lastModified" json:"last_modified,omitempty"` + Filename string `protobuf:"bytes,5,opt,name=filename" json:"filename,omitempty"` + Etag string `protobuf:"bytes,6,opt,name=etag" json:"etag,omitempty"` + IsGzipped bool `protobuf:"varint,7,opt,name=is_gzipped,json=isGzipped" json:"is_gzipped,omitempty"` + Headers map[string]string `protobuf:"bytes,8,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ErrorCode int32 `protobuf:"varint,9,opt,name=errorCode" json:"errorCode,omitempty"` +} + +func (m *FileGetResponse) Reset() { *m = FileGetResponse{} } +func (m *FileGetResponse) String() string { return proto.CompactTextString(m) } +func (*FileGetResponse) ProtoMessage() {} +func (*FileGetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *FileGetResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *FileGetResponse) GetContentLength() uint32 { + if m != nil { + return m.ContentLength + } + return 0 +} + +func (m *FileGetResponse) GetContentType() string { + if m != nil { + return m.ContentType + } + return "" +} + +func (m *FileGetResponse) GetLastModified() uint64 { + if m != nil { + return m.LastModified + } + return 0 +} + +func (m *FileGetResponse) GetFilename() string { + if m != nil { + return m.Filename + } + return "" +} + +func (m *FileGetResponse) GetEtag() string { + if m != nil { + return m.Etag + } + return "" +} + +func (m *FileGetResponse) GetIsGzipped() bool { + if m != nil { + return m.IsGzipped + } + return false +} + +func (m *FileGetResponse) GetHeaders() map[string]string { + if m != nil { + return m.Headers + } + return nil +} + +func (m *FileGetResponse) GetErrorCode() int32 { + if m != nil { + return m.ErrorCode + } + return 0 +} + type Empty struct { } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } type VacuumVolumeCheckRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -195,7 +301,7 @@ type VacuumVolumeCheckRequest struct { func (m *VacuumVolumeCheckRequest) Reset() { *m = VacuumVolumeCheckRequest{} } func (m *VacuumVolumeCheckRequest) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCheckRequest) ProtoMessage() {} -func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *VacuumVolumeCheckRequest) GetVolumeId() uint32 { if m != nil { @@ -211,7 +317,7 @@ type VacuumVolumeCheckResponse struct { func (m *VacuumVolumeCheckResponse) Reset() { *m = VacuumVolumeCheckResponse{} } func (m *VacuumVolumeCheckResponse) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCheckResponse) ProtoMessage() {} -func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { if m != nil { @@ -228,7 +334,7 @@ type VacuumVolumeCompactRequest struct { func (m *VacuumVolumeCompactRequest) Reset() { *m = VacuumVolumeCompactRequest{} } func (m *VacuumVolumeCompactRequest) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCompactRequest) ProtoMessage() {} -func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *VacuumVolumeCompactRequest) GetVolumeId() uint32 { if m != nil { @@ -250,7 +356,7 @@ type VacuumVolumeCompactResponse struct { func (m *VacuumVolumeCompactResponse) Reset() { *m = VacuumVolumeCompactResponse{} } func (m *VacuumVolumeCompactResponse) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCompactResponse) ProtoMessage() {} -func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } type VacuumVolumeCommitRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -259,7 +365,7 @@ type VacuumVolumeCommitRequest struct { func (m *VacuumVolumeCommitRequest) Reset() { *m = VacuumVolumeCommitRequest{} } func (m *VacuumVolumeCommitRequest) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCommitRequest) ProtoMessage() {} -func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } func (m *VacuumVolumeCommitRequest) GetVolumeId() uint32 { if m != nil { @@ -274,7 +380,7 @@ type VacuumVolumeCommitResponse struct { func (m *VacuumVolumeCommitResponse) Reset() { *m = VacuumVolumeCommitResponse{} } func (m *VacuumVolumeCommitResponse) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCommitResponse) ProtoMessage() {} -func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } type VacuumVolumeCleanupRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -283,7 +389,7 @@ type VacuumVolumeCleanupRequest struct { func (m *VacuumVolumeCleanupRequest) Reset() { *m = VacuumVolumeCleanupRequest{} } func (m *VacuumVolumeCleanupRequest) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCleanupRequest) ProtoMessage() {} -func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { if m != nil { @@ -298,7 +404,7 @@ type VacuumVolumeCleanupResponse struct { func (m *VacuumVolumeCleanupResponse) Reset() { *m = VacuumVolumeCleanupResponse{} } func (m *VacuumVolumeCleanupResponse) String() string { return proto.CompactTextString(m) } func (*VacuumVolumeCleanupResponse) ProtoMessage() {} -func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } type DeleteCollectionRequest struct { Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"` @@ -307,7 +413,7 @@ type DeleteCollectionRequest struct { func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *DeleteCollectionRequest) GetCollection() string { if m != nil { @@ -322,7 +428,7 @@ type DeleteCollectionResponse struct { func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } type AllocateVolumeRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -336,7 +442,7 @@ type AllocateVolumeRequest struct { func (m *AllocateVolumeRequest) Reset() { *m = AllocateVolumeRequest{} } func (m *AllocateVolumeRequest) String() string { return proto.CompactTextString(m) } func (*AllocateVolumeRequest) ProtoMessage() {} -func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *AllocateVolumeRequest) GetVolumeId() uint32 { if m != nil { @@ -386,7 +492,7 @@ type AllocateVolumeResponse struct { func (m *AllocateVolumeResponse) Reset() { *m = AllocateVolumeResponse{} } func (m *AllocateVolumeResponse) String() string { return proto.CompactTextString(m) } func (*AllocateVolumeResponse) ProtoMessage() {} -func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } type VolumeSyncStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -395,7 +501,7 @@ type VolumeSyncStatusRequest struct { func (m *VolumeSyncStatusRequest) Reset() { *m = VolumeSyncStatusRequest{} } func (m *VolumeSyncStatusRequest) String() string { return proto.CompactTextString(m) } func (*VolumeSyncStatusRequest) ProtoMessage() {} -func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *VolumeSyncStatusRequest) GetVolumeId() uint32 { if m != nil { @@ -417,7 +523,7 @@ type VolumeSyncStatusResponse struct { func (m *VolumeSyncStatusResponse) Reset() { *m = VolumeSyncStatusResponse{} } func (m *VolumeSyncStatusResponse) String() string { return proto.CompactTextString(m) } func (*VolumeSyncStatusResponse) ProtoMessage() {} -func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *VolumeSyncStatusResponse) GetVolumeId() uint32 { if m != nil { @@ -476,7 +582,7 @@ type VolumeIncrementalCopyRequest struct { func (m *VolumeIncrementalCopyRequest) Reset() { *m = VolumeIncrementalCopyRequest{} } func (m *VolumeIncrementalCopyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeIncrementalCopyRequest) ProtoMessage() {} -func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } func (m *VolumeIncrementalCopyRequest) GetVolumeId() uint32 { if m != nil { @@ -499,7 +605,7 @@ type VolumeIncrementalCopyResponse struct { func (m *VolumeIncrementalCopyResponse) Reset() { *m = VolumeIncrementalCopyResponse{} } func (m *VolumeIncrementalCopyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeIncrementalCopyResponse) ProtoMessage() {} -func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *VolumeIncrementalCopyResponse) GetFileContent() []byte { if m != nil { @@ -515,7 +621,7 @@ type VolumeMountRequest struct { func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} } func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeMountRequest) ProtoMessage() {} -func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *VolumeMountRequest) GetVolumeId() uint32 { if m != nil { @@ -530,7 +636,7 @@ type VolumeMountResponse struct { func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} } func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeMountResponse) ProtoMessage() {} -func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } type VolumeUnmountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -539,7 +645,7 @@ type VolumeUnmountRequest struct { func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} } func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeUnmountRequest) ProtoMessage() {} -func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *VolumeUnmountRequest) GetVolumeId() uint32 { if m != nil { @@ -554,7 +660,7 @@ type VolumeUnmountResponse struct { func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} } func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeUnmountResponse) ProtoMessage() {} -func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } type VolumeDeleteRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -563,7 +669,7 @@ type VolumeDeleteRequest struct { func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} } func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeDeleteRequest) ProtoMessage() {} -func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *VolumeDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -578,7 +684,7 @@ type VolumeDeleteResponse struct { func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} } func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeDeleteResponse) ProtoMessage() {} -func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } type VolumeMarkReadonlyRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -587,7 +693,7 @@ type VolumeMarkReadonlyRequest struct { func (m *VolumeMarkReadonlyRequest) Reset() { *m = VolumeMarkReadonlyRequest{} } func (m *VolumeMarkReadonlyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeMarkReadonlyRequest) ProtoMessage() {} -func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } func (m *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { if m != nil { @@ -602,7 +708,7 @@ type VolumeMarkReadonlyResponse struct { func (m *VolumeMarkReadonlyResponse) Reset() { *m = VolumeMarkReadonlyResponse{} } func (m *VolumeMarkReadonlyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeMarkReadonlyResponse) ProtoMessage() {} -func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } type VolumeConfigureRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -612,7 +718,7 @@ type VolumeConfigureRequest struct { func (m *VolumeConfigureRequest) Reset() { *m = VolumeConfigureRequest{} } func (m *VolumeConfigureRequest) String() string { return proto.CompactTextString(m) } func (*VolumeConfigureRequest) ProtoMessage() {} -func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } func (m *VolumeConfigureRequest) GetVolumeId() uint32 { if m != nil { @@ -635,7 +741,7 @@ type VolumeConfigureResponse struct { func (m *VolumeConfigureResponse) Reset() { *m = VolumeConfigureResponse{} } func (m *VolumeConfigureResponse) String() string { return proto.CompactTextString(m) } func (*VolumeConfigureResponse) ProtoMessage() {} -func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } func (m *VolumeConfigureResponse) GetError() string { if m != nil { @@ -655,7 +761,7 @@ type VolumeCopyRequest struct { func (m *VolumeCopyRequest) Reset() { *m = VolumeCopyRequest{} } func (m *VolumeCopyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeCopyRequest) ProtoMessage() {} -func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *VolumeCopyRequest) GetVolumeId() uint32 { if m != nil { @@ -699,7 +805,7 @@ type VolumeCopyResponse struct { func (m *VolumeCopyResponse) Reset() { *m = VolumeCopyResponse{} } func (m *VolumeCopyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeCopyResponse) ProtoMessage() {} -func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 { if m != nil { @@ -721,7 +827,7 @@ type CopyFileRequest struct { func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} } func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) } func (*CopyFileRequest) ProtoMessage() {} -func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } func (m *CopyFileRequest) GetVolumeId() uint32 { if m != nil { @@ -779,7 +885,7 @@ type CopyFileResponse struct { func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} } func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) } func (*CopyFileResponse) ProtoMessage() {} -func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } func (m *CopyFileResponse) GetFileContent() []byte { if m != nil { @@ -797,7 +903,7 @@ type VolumeTailSenderRequest struct { func (m *VolumeTailSenderRequest) Reset() { *m = VolumeTailSenderRequest{} } func (m *VolumeTailSenderRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTailSenderRequest) ProtoMessage() {} -func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } func (m *VolumeTailSenderRequest) GetVolumeId() uint32 { if m != nil { @@ -829,7 +935,7 @@ type VolumeTailSenderResponse struct { func (m *VolumeTailSenderResponse) Reset() { *m = VolumeTailSenderResponse{} } func (m *VolumeTailSenderResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTailSenderResponse) ProtoMessage() {} -func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } func (m *VolumeTailSenderResponse) GetNeedleHeader() []byte { if m != nil { @@ -862,7 +968,7 @@ type VolumeTailReceiverRequest struct { func (m *VolumeTailReceiverRequest) Reset() { *m = VolumeTailReceiverRequest{} } func (m *VolumeTailReceiverRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTailReceiverRequest) ProtoMessage() {} -func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } +func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } func (m *VolumeTailReceiverRequest) GetVolumeId() uint32 { if m != nil { @@ -898,7 +1004,7 @@ type VolumeTailReceiverResponse struct { func (m *VolumeTailReceiverResponse) Reset() { *m = VolumeTailReceiverResponse{} } func (m *VolumeTailReceiverResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTailReceiverResponse) ProtoMessage() {} -func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } type VolumeEcShardsGenerateRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -908,7 +1014,7 @@ type VolumeEcShardsGenerateRequest struct { func (m *VolumeEcShardsGenerateRequest) Reset() { *m = VolumeEcShardsGenerateRequest{} } func (m *VolumeEcShardsGenerateRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} -func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } +func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } func (m *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { if m != nil { @@ -930,7 +1036,7 @@ type VolumeEcShardsGenerateResponse struct { func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } type VolumeEcShardsRebuildRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -940,7 +1046,7 @@ type VolumeEcShardsRebuildRequest struct { func (m *VolumeEcShardsRebuildRequest) Reset() { *m = VolumeEcShardsRebuildRequest{} } func (m *VolumeEcShardsRebuildRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} -func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } +func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } func (m *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { if m != nil { @@ -963,7 +1069,7 @@ type VolumeEcShardsRebuildResponse struct { func (m *VolumeEcShardsRebuildResponse) Reset() { *m = VolumeEcShardsRebuildResponse{} } func (m *VolumeEcShardsRebuildResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} -func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } func (m *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { if m != nil { @@ -985,7 +1091,7 @@ type VolumeEcShardsCopyRequest struct { func (m *VolumeEcShardsCopyRequest) Reset() { *m = VolumeEcShardsCopyRequest{} } func (m *VolumeEcShardsCopyRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsCopyRequest) ProtoMessage() {} -func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } +func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } func (m *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { if m != nil { @@ -1042,7 +1148,7 @@ type VolumeEcShardsCopyResponse struct { func (m *VolumeEcShardsCopyResponse) Reset() { *m = VolumeEcShardsCopyResponse{} } func (m *VolumeEcShardsCopyResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsCopyResponse) ProtoMessage() {} -func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } +func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } type VolumeEcShardsDeleteRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1053,7 +1159,7 @@ type VolumeEcShardsDeleteRequest struct { func (m *VolumeEcShardsDeleteRequest) Reset() { *m = VolumeEcShardsDeleteRequest{} } func (m *VolumeEcShardsDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} -func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } func (m *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -1082,7 +1188,7 @@ type VolumeEcShardsDeleteResponse struct { func (m *VolumeEcShardsDeleteResponse) Reset() { *m = VolumeEcShardsDeleteResponse{} } func (m *VolumeEcShardsDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} -func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } +func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } type VolumeEcShardsMountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1093,7 +1199,7 @@ type VolumeEcShardsMountRequest struct { func (m *VolumeEcShardsMountRequest) Reset() { *m = VolumeEcShardsMountRequest{} } func (m *VolumeEcShardsMountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsMountRequest) ProtoMessage() {} -func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } +func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } func (m *VolumeEcShardsMountRequest) GetVolumeId() uint32 { if m != nil { @@ -1122,7 +1228,7 @@ type VolumeEcShardsMountResponse struct { func (m *VolumeEcShardsMountResponse) Reset() { *m = VolumeEcShardsMountResponse{} } func (m *VolumeEcShardsMountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsMountResponse) ProtoMessage() {} -func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } type VolumeEcShardsUnmountRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1132,7 +1238,7 @@ type VolumeEcShardsUnmountRequest struct { func (m *VolumeEcShardsUnmountRequest) Reset() { *m = VolumeEcShardsUnmountRequest{} } func (m *VolumeEcShardsUnmountRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} -func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } func (m *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { if m != nil { @@ -1154,7 +1260,7 @@ type VolumeEcShardsUnmountResponse struct { func (m *VolumeEcShardsUnmountResponse) Reset() { *m = VolumeEcShardsUnmountResponse{} } func (m *VolumeEcShardsUnmountResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} -func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } +func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } type VolumeEcShardReadRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1167,7 +1273,7 @@ type VolumeEcShardReadRequest struct { func (m *VolumeEcShardReadRequest) Reset() { *m = VolumeEcShardReadRequest{} } func (m *VolumeEcShardReadRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardReadRequest) ProtoMessage() {} -func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } +func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } func (m *VolumeEcShardReadRequest) GetVolumeId() uint32 { if m != nil { @@ -1212,7 +1318,7 @@ type VolumeEcShardReadResponse struct { func (m *VolumeEcShardReadResponse) Reset() { *m = VolumeEcShardReadResponse{} } func (m *VolumeEcShardReadResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardReadResponse) ProtoMessage() {} -func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } func (m *VolumeEcShardReadResponse) GetData() []byte { if m != nil { @@ -1238,7 +1344,7 @@ type VolumeEcBlobDeleteRequest struct { func (m *VolumeEcBlobDeleteRequest) Reset() { *m = VolumeEcBlobDeleteRequest{} } func (m *VolumeEcBlobDeleteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} -func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } +func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } func (m *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { if m != nil { @@ -1274,7 +1380,7 @@ type VolumeEcBlobDeleteResponse struct { func (m *VolumeEcBlobDeleteResponse) Reset() { *m = VolumeEcBlobDeleteResponse{} } func (m *VolumeEcBlobDeleteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} -func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } type VolumeEcShardsToVolumeRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1284,7 +1390,7 @@ type VolumeEcShardsToVolumeRequest struct { func (m *VolumeEcShardsToVolumeRequest) Reset() { *m = VolumeEcShardsToVolumeRequest{} } func (m *VolumeEcShardsToVolumeRequest) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} -func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } +func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } func (m *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { if m != nil { @@ -1306,7 +1412,7 @@ type VolumeEcShardsToVolumeResponse struct { func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} -func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } type ReadVolumeFileStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1315,7 +1421,7 @@ type ReadVolumeFileStatusRequest struct { func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} } func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusRequest) ProtoMessage() {} -func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { if m != nil { @@ -1338,7 +1444,7 @@ type ReadVolumeFileStatusResponse struct { func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) } func (*ReadVolumeFileStatusResponse) ProtoMessage() {} -func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { if m != nil { @@ -1406,7 +1512,7 @@ type DiskStatus struct { func (m *DiskStatus) Reset() { *m = DiskStatus{} } func (m *DiskStatus) String() string { return proto.CompactTextString(m) } func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } +func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} } func (m *DiskStatus) GetDir() string { if m != nil { @@ -1449,7 +1555,7 @@ type MemStatus struct { func (m *MemStatus) Reset() { *m = MemStatus{} } func (m *MemStatus) String() string { return proto.CompactTextString(m) } func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } +func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} } func (m *MemStatus) GetGoroutines() int32 { if m != nil { @@ -1514,7 +1620,7 @@ type RemoteFile struct { func (m *RemoteFile) Reset() { *m = RemoteFile{} } func (m *RemoteFile) String() string { return proto.CompactTextString(m) } func (*RemoteFile) ProtoMessage() {} -func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{60} } +func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{62} } func (m *RemoteFile) GetBackendType() string { if m != nil { @@ -1574,7 +1680,7 @@ type VolumeInfo struct { func (m *VolumeInfo) Reset() { *m = VolumeInfo{} } func (m *VolumeInfo) String() string { return proto.CompactTextString(m) } func (*VolumeInfo) ProtoMessage() {} -func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{61} } +func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{63} } func (m *VolumeInfo) GetFiles() []*RemoteFile { if m != nil { @@ -1608,7 +1714,7 @@ func (m *VolumeTierMoveDatToRemoteRequest) Reset() { *m = VolumeTierMove func (m *VolumeTierMoveDatToRemoteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{62} + return fileDescriptor0, []int{64} } func (m *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { @@ -1648,7 +1754,7 @@ func (m *VolumeTierMoveDatToRemoteResponse) Reset() { *m = VolumeTierMov func (m *VolumeTierMoveDatToRemoteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{63} + return fileDescriptor0, []int{65} } func (m *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { @@ -1675,7 +1781,7 @@ func (m *VolumeTierMoveDatFromRemoteRequest) Reset() { *m = VolumeTierMo func (m *VolumeTierMoveDatFromRemoteRequest) String() string { return proto.CompactTextString(m) } func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64} + return fileDescriptor0, []int{66} } func (m *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { @@ -1708,7 +1814,7 @@ func (m *VolumeTierMoveDatFromRemoteResponse) Reset() { *m = VolumeTierM func (m *VolumeTierMoveDatFromRemoteResponse) String() string { return proto.CompactTextString(m) } func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{65} + return fileDescriptor0, []int{67} } func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { @@ -1737,7 +1843,7 @@ type QueryRequest struct { func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (m *QueryRequest) String() string { return proto.CompactTextString(m) } func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66} } +func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68} } func (m *QueryRequest) GetSelections() []string { if m != nil { @@ -1783,7 +1889,7 @@ type QueryRequest_Filter struct { func (m *QueryRequest_Filter) Reset() { *m = QueryRequest_Filter{} } func (m *QueryRequest_Filter) String() string { return proto.CompactTextString(m) } func (*QueryRequest_Filter) ProtoMessage() {} -func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{66, 0} } +func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68, 0} } func (m *QueryRequest_Filter) GetField() string { if m != nil { @@ -1818,7 +1924,7 @@ func (m *QueryRequest_InputSerialization) Reset() { *m = QueryRequest_In func (m *QueryRequest_InputSerialization) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization) ProtoMessage() {} func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{66, 1} + return fileDescriptor0, []int{68, 1} } func (m *QueryRequest_InputSerialization) GetCompressionType() string { @@ -1866,7 +1972,7 @@ func (m *QueryRequest_InputSerialization_CSVInput) Reset() { func (m *QueryRequest_InputSerialization_CSVInput) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{66, 1, 0} + return fileDescriptor0, []int{68, 1, 0} } func (m *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { @@ -1928,7 +2034,7 @@ func (m *QueryRequest_InputSerialization_JSONInput) Reset() { func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{66, 1, 1} + return fileDescriptor0, []int{68, 1, 1} } func (m *QueryRequest_InputSerialization_JSONInput) GetType() string { @@ -1949,7 +2055,7 @@ func (m *QueryRequest_InputSerialization_ParquetInput) String() string { } func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{66, 1, 2} + return fileDescriptor0, []int{68, 1, 2} } type QueryRequest_OutputSerialization struct { @@ -1961,7 +2067,7 @@ func (m *QueryRequest_OutputSerialization) Reset() { *m = QueryRequest_O func (m *QueryRequest_OutputSerialization) String() string { return proto.CompactTextString(m) } func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{66, 2} + return fileDescriptor0, []int{68, 2} } func (m *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { @@ -1994,7 +2100,7 @@ func (m *QueryRequest_OutputSerialization_CSVOutput) String() string { } func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{66, 2, 0} + return fileDescriptor0, []int{68, 2, 0} } func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { @@ -2044,7 +2150,7 @@ func (m *QueryRequest_OutputSerialization_JSONOutput) String() string { } func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{66, 2, 1} + return fileDescriptor0, []int{68, 2, 1} } func (m *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { @@ -2061,7 +2167,7 @@ type QueriedStripe struct { func (m *QueriedStripe) Reset() { *m = QueriedStripe{} } func (m *QueriedStripe) String() string { return proto.CompactTextString(m) } func (*QueriedStripe) ProtoMessage() {} -func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{67} } +func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{69} } func (m *QueriedStripe) GetRecords() []byte { if m != nil { @@ -2074,6 +2180,8 @@ func init() { proto.RegisterType((*BatchDeleteRequest)(nil), "volume_server_pb.BatchDeleteRequest") proto.RegisterType((*BatchDeleteResponse)(nil), "volume_server_pb.BatchDeleteResponse") proto.RegisterType((*DeleteResult)(nil), "volume_server_pb.DeleteResult") + proto.RegisterType((*FileGetRequest)(nil), "volume_server_pb.FileGetRequest") + proto.RegisterType((*FileGetResponse)(nil), "volume_server_pb.FileGetResponse") proto.RegisterType((*Empty)(nil), "volume_server_pb.Empty") proto.RegisterType((*VacuumVolumeCheckRequest)(nil), "volume_server_pb.VacuumVolumeCheckRequest") proto.RegisterType((*VacuumVolumeCheckResponse)(nil), "volume_server_pb.VacuumVolumeCheckResponse") @@ -2162,6 +2270,7 @@ const _ = grpc.SupportPackageIsVersion4 type VolumeServerClient interface { // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) + FileGet(ctx context.Context, in *FileGetRequest, opts ...grpc.CallOption) (VolumeServer_FileGetClient, error) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) @@ -2215,6 +2324,38 @@ func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteReq return out, nil } +func (c *volumeServerClient) FileGet(ctx context.Context, in *FileGetRequest, opts ...grpc.CallOption) (VolumeServer_FileGetClient, error) { + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/FileGet", opts...) + if err != nil { + return nil, err + } + x := &volumeServerFileGetClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_FileGetClient interface { + Recv() (*FileGetResponse, error) + grpc.ClientStream +} + +type volumeServerFileGetClient struct { + grpc.ClientStream +} + +func (x *volumeServerFileGetClient) Recv() (*FileGetResponse, error) { + m := new(FileGetResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) { out := new(VacuumVolumeCheckResponse) err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, c.cc, opts...) @@ -2279,7 +2420,7 @@ func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyn } func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) if err != nil { return nil, err } @@ -2374,7 +2515,7 @@ func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadV } func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) if err != nil { return nil, err } @@ -2406,7 +2547,7 @@ func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { } func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) if err != nil { return nil, err } @@ -2501,7 +2642,7 @@ func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *Volu } func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) if err != nil { return nil, err } @@ -2551,7 +2692,7 @@ func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *Vol } func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[5], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) if err != nil { return nil, err } @@ -2583,7 +2724,7 @@ func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDat } func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[5], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[6], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) if err != nil { return nil, err } @@ -2615,7 +2756,7 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveD } func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[6], c.cc, "/volume_server_pb.VolumeServer/Query", opts...) + stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[7], c.cc, "/volume_server_pb.VolumeServer/Query", opts...) if err != nil { return nil, err } @@ -2651,6 +2792,7 @@ func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) { type VolumeServerServer interface { // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) + FileGet(*FileGetRequest, VolumeServer_FileGetServer) error VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) @@ -2709,6 +2851,27 @@ func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _VolumeServer_FileGet_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(FileGetRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).FileGet(m, &volumeServerFileGetServer{stream}) +} + +type VolumeServer_FileGetServer interface { + Send(*FileGetResponse) error + grpc.ServerStream +} + +type volumeServerFileGetServer struct { + grpc.ServerStream +} + +func (x *volumeServerFileGetServer) Send(m *FileGetResponse) error { + return x.ServerStream.SendMsg(m) +} + func _VolumeServer_VacuumVolumeCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VacuumVolumeCheckRequest) if err := dec(in); err != nil { @@ -3372,6 +3535,11 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{ + { + StreamName: "FileGet", + Handler: _VolumeServer_FileGet_Handler, + ServerStreams: true, + }, { StreamName: "VolumeIncrementalCopy", Handler: _VolumeServer_VolumeIncrementalCopy_Handler, @@ -3414,190 +3582,204 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 2959 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x5a, 0x4b, 0x73, 0xdc, 0xc6, - 0x11, 0xe6, 0x72, 0xf9, 0xd8, 0xed, 0x5d, 0x8a, 0xd4, 0x90, 0xa6, 0xd6, 0x20, 0x25, 0xd1, 0x90, - 0x1f, 0xa4, 0x6c, 0x91, 0x32, 0x6d, 0xc7, 0x8e, 0x1d, 0x3b, 0x91, 0x28, 0x29, 0x51, 0x6c, 0x51, - 0x36, 0x28, 0xcb, 0x4e, 0xec, 0x0a, 0x0a, 0x04, 0x66, 0x45, 0x98, 0x00, 0x06, 0x02, 0x66, 0x69, - 0xae, 0xca, 0x39, 0x39, 0x87, 0x54, 0xa5, 0x92, 0x43, 0x2a, 0x97, 0x9c, 0x73, 0xf7, 0x35, 0x7f, - 0xc1, 0x7f, 0x20, 0x55, 0x39, 0xe5, 0x92, 0x73, 0x0e, 0xb9, 0xa5, 0x2a, 0x97, 0xd4, 0xbc, 0xb0, - 0x78, 0x72, 0x41, 0x8b, 0xa9, 0x54, 0x6e, 0x83, 0x9e, 0x9e, 0xee, 0x99, 0x9e, 0xee, 0x9e, 0xe9, - 0xf9, 0x00, 0x8b, 0x47, 0xc4, 0x1b, 0xf8, 0xd8, 0x8c, 0x71, 0x74, 0x84, 0xa3, 0xcd, 0x30, 0x22, - 0x94, 0xa0, 0x85, 0x0c, 0xd1, 0x0c, 0xf7, 0xf5, 0x2d, 0x40, 0x37, 0x2d, 0x6a, 0x1f, 0xdc, 0xc2, - 0x1e, 0xa6, 0xd8, 0xc0, 0x8f, 0x07, 0x38, 0xa6, 0xe8, 0x59, 0x68, 0xf5, 0x5d, 0x0f, 0x9b, 0xae, - 0x13, 0xf7, 0x1a, 0x6b, 0xcd, 0xf5, 0xb6, 0x31, 0xcb, 0xbe, 0xef, 0x3a, 0xb1, 0x7e, 0x1f, 0x16, - 0x33, 0x03, 0xe2, 0x90, 0x04, 0x31, 0x46, 0x6f, 0xc1, 0x6c, 0x84, 0xe3, 0x81, 0x47, 0xc5, 0x80, - 0xce, 0xf6, 0xa5, 0xcd, 0xbc, 0xae, 0xcd, 0x64, 0xc8, 0xc0, 0xa3, 0x86, 0x62, 0xd7, 0xbf, 0x6e, - 0x40, 0x37, 0xdd, 0x83, 0x2e, 0xc0, 0xac, 0x54, 0xde, 0x6b, 0xac, 0x35, 0xd6, 0xdb, 0xc6, 0x8c, - 0xd0, 0x8d, 0x96, 0x61, 0x26, 0xa6, 0x16, 0x1d, 0xc4, 0xbd, 0xc9, 0xb5, 0xc6, 0xfa, 0xb4, 0x21, - 0xbf, 0xd0, 0x12, 0x4c, 0xe3, 0x28, 0x22, 0x51, 0xaf, 0xc9, 0xd9, 0xc5, 0x07, 0x42, 0x30, 0x15, - 0xbb, 0x4f, 0x70, 0x6f, 0x6a, 0xad, 0xb1, 0x3e, 0x67, 0xf0, 0x36, 0xea, 0xc1, 0xec, 0x11, 0x8e, - 0x62, 0x97, 0x04, 0xbd, 0x69, 0x4e, 0x56, 0x9f, 0xfa, 0x2c, 0x4c, 0xdf, 0xf6, 0x43, 0x3a, 0xd4, - 0xdf, 0x84, 0xde, 0x43, 0xcb, 0x1e, 0x0c, 0xfc, 0x87, 0x7c, 0xfa, 0x3b, 0x07, 0xd8, 0x3e, 0x54, - 0x66, 0x59, 0x81, 0xb6, 0x5c, 0x94, 0x9c, 0xdb, 0x9c, 0xd1, 0x12, 0x84, 0xbb, 0x8e, 0xfe, 0x23, - 0x78, 0xb6, 0x64, 0xa0, 0x34, 0xcf, 0x15, 0x98, 0x7b, 0x64, 0x45, 0xfb, 0xd6, 0x23, 0x6c, 0x46, - 0x16, 0x75, 0x09, 0x1f, 0xdd, 0x30, 0xba, 0x92, 0x68, 0x30, 0x9a, 0xfe, 0x19, 0x68, 0x19, 0x09, - 0xc4, 0x0f, 0x2d, 0x9b, 0xd6, 0x51, 0x8e, 0xd6, 0xa0, 0x13, 0x46, 0xd8, 0xf2, 0x3c, 0x62, 0x5b, - 0x14, 0x73, 0xfb, 0x34, 0x8d, 0x34, 0x49, 0xbf, 0x08, 0x2b, 0xa5, 0xc2, 0xc5, 0x04, 0xf5, 0xb7, - 0x72, 0xb3, 0x27, 0xbe, 0xef, 0xd6, 0x52, 0xad, 0xaf, 0x16, 0x66, 0xcd, 0x47, 0x4a, 0xb9, 0xdf, - 0xcf, 0xf5, 0x7a, 0xd8, 0x0a, 0x06, 0x61, 0x2d, 0xc1, 0xf9, 0x19, 0xab, 0xa1, 0x89, 0xe4, 0x0b, - 0xc2, 0x6d, 0x76, 0x88, 0xe7, 0x61, 0x9b, 0xba, 0x24, 0x50, 0x62, 0x2f, 0x01, 0xd8, 0x09, 0x51, - 0x3a, 0x51, 0x8a, 0xa2, 0x6b, 0xd0, 0x2b, 0x0e, 0x95, 0x62, 0xff, 0xd6, 0x80, 0x67, 0x6e, 0x48, - 0xa3, 0x09, 0xc5, 0xb5, 0x36, 0x20, 0xab, 0x72, 0x32, 0xaf, 0x32, 0xbf, 0x41, 0xcd, 0xc2, 0x06, - 0x31, 0x8e, 0x08, 0x87, 0x9e, 0x6b, 0x5b, 0x5c, 0xc4, 0x14, 0x17, 0x91, 0x26, 0xa1, 0x05, 0x68, - 0x52, 0xea, 0x71, 0xcf, 0x6d, 0x1b, 0xac, 0x89, 0xb6, 0x61, 0xd9, 0xc7, 0x3e, 0x89, 0x86, 0xa6, - 0x6f, 0x85, 0xa6, 0x6f, 0x1d, 0x9b, 0xcc, 0xcd, 0x4d, 0x7f, 0xbf, 0x37, 0xc3, 0xe7, 0x87, 0x44, - 0xef, 0x3d, 0x2b, 0xbc, 0x67, 0x1d, 0xef, 0xb9, 0x4f, 0xf0, 0xbd, 0x7d, 0xbd, 0x07, 0xcb, 0xf9, - 0xf5, 0xc9, 0xa5, 0x7f, 0x0f, 0x2e, 0x08, 0xca, 0xde, 0x30, 0xb0, 0xf7, 0x78, 0x6c, 0xd5, 0xda, - 0xa8, 0x7f, 0x37, 0xa0, 0x57, 0x1c, 0x28, 0x3d, 0xff, 0x69, 0xad, 0x76, 0x6a, 0x9b, 0x5c, 0x86, - 0x0e, 0xb5, 0x5c, 0xcf, 0x24, 0xfd, 0x7e, 0x8c, 0x29, 0x37, 0xc4, 0x94, 0x01, 0x8c, 0x74, 0x9f, - 0x53, 0xd0, 0x06, 0x2c, 0xd8, 0xc2, 0xfb, 0xcd, 0x08, 0x1f, 0xb9, 0x3c, 0x1b, 0xcc, 0xf2, 0x89, - 0xcd, 0xdb, 0x2a, 0x2a, 0x04, 0x19, 0xe9, 0x30, 0xe7, 0x3a, 0xc7, 0x26, 0x4f, 0x47, 0x3c, 0x99, - 0xb4, 0xb8, 0xb4, 0x8e, 0xeb, 0x1c, 0xdf, 0x71, 0x3d, 0xcc, 0x2c, 0xaa, 0x3f, 0x84, 0x55, 0xb1, - 0xf8, 0xbb, 0x81, 0x1d, 0x61, 0x1f, 0x07, 0xd4, 0xf2, 0x76, 0x48, 0x38, 0xac, 0xe5, 0x36, 0xcf, - 0x42, 0x2b, 0x76, 0x03, 0x1b, 0x9b, 0x81, 0x48, 0x6a, 0x53, 0xc6, 0x2c, 0xff, 0xde, 0x8d, 0xf5, - 0x9b, 0x70, 0xb1, 0x42, 0xae, 0xb4, 0xec, 0x73, 0xd0, 0xe5, 0x13, 0xb3, 0x49, 0x40, 0x71, 0x40, - 0xb9, 0xec, 0xae, 0xd1, 0x61, 0xb4, 0x1d, 0x41, 0xd2, 0x5f, 0x05, 0x24, 0x64, 0xdc, 0x23, 0x83, - 0xa0, 0x5e, 0x38, 0x3f, 0x03, 0x8b, 0x99, 0x21, 0xd2, 0x37, 0x5e, 0x83, 0x25, 0x41, 0xfe, 0x38, - 0xf0, 0x6b, 0xcb, 0xba, 0x00, 0xcf, 0xe4, 0x06, 0x49, 0x69, 0xdb, 0x4a, 0x49, 0xf6, 0xd8, 0x39, - 0x51, 0xd8, 0xb2, 0x9a, 0x41, 0xf6, 0xe4, 0xe1, 0x99, 0x4b, 0x4c, 0xd8, 0x8a, 0x0e, 0x0d, 0x6c, - 0x39, 0x24, 0xf0, 0x86, 0xb5, 0x33, 0x57, 0xc9, 0x48, 0x29, 0xf7, 0x13, 0x58, 0x56, 0x19, 0x2d, - 0xe8, 0xbb, 0x8f, 0x06, 0x11, 0xae, 0x9b, 0x89, 0xd3, 0x2e, 0x3b, 0x59, 0x70, 0x59, 0x7d, 0x4b, - 0x85, 0x59, 0x4a, 0xb0, 0xdc, 0xd2, 0xe4, 0x24, 0x6b, 0xa4, 0x4e, 0x32, 0xfd, 0x9b, 0x06, 0x9c, - 0x57, 0x23, 0x6a, 0xfa, 0xd5, 0x29, 0x03, 0xab, 0x59, 0x19, 0x58, 0x53, 0xa3, 0xc0, 0x5a, 0x87, - 0x85, 0x98, 0x0c, 0x22, 0x1b, 0x9b, 0x8e, 0x45, 0x2d, 0x33, 0x20, 0x0e, 0x96, 0x71, 0x77, 0x4e, - 0xd0, 0x6f, 0x59, 0xd4, 0xda, 0x25, 0x0e, 0xd6, 0x7f, 0xa8, 0xdc, 0x2e, 0xe3, 0xaf, 0x1b, 0x70, - 0xde, 0xb3, 0x62, 0x6a, 0x5a, 0x61, 0x88, 0x03, 0xc7, 0xb4, 0x28, 0x73, 0xfa, 0x06, 0x77, 0xfa, - 0x73, 0xac, 0xe3, 0x06, 0xa7, 0xdf, 0xa0, 0xbb, 0xb1, 0xfe, 0x87, 0x49, 0x98, 0x67, 0x63, 0x59, - 0x90, 0xd5, 0x5a, 0xef, 0x02, 0x34, 0xf1, 0x31, 0x95, 0x0b, 0x65, 0x4d, 0xb4, 0x05, 0x8b, 0x32, - 0x9a, 0x5d, 0x12, 0x8c, 0x02, 0xbd, 0x29, 0xf2, 0xe2, 0xa8, 0x2b, 0x89, 0xf5, 0xcb, 0xd0, 0x89, - 0x29, 0x09, 0x55, 0xde, 0x98, 0x12, 0x79, 0x83, 0x91, 0x64, 0xde, 0xc8, 0xda, 0x74, 0xba, 0xc4, - 0xa6, 0x5d, 0x37, 0x36, 0xb1, 0x6d, 0x8a, 0x59, 0xf1, 0xcc, 0xd3, 0x32, 0xc0, 0x8d, 0x6f, 0xdb, - 0xc2, 0x1a, 0xe8, 0x3d, 0x58, 0x75, 0x1f, 0x05, 0x24, 0xc2, 0xa6, 0x34, 0x24, 0x8f, 0xdf, 0x80, - 0x50, 0xb3, 0x4f, 0x06, 0x81, 0xc3, 0xb3, 0x50, 0xcb, 0xe8, 0x09, 0x9e, 0x3d, 0xce, 0xc2, 0x2c, - 0xb0, 0x4b, 0xe8, 0x1d, 0xd6, 0xaf, 0xbf, 0x01, 0x0b, 0x23, 0xab, 0xd4, 0xcf, 0x02, 0x5f, 0x37, - 0x94, 0xc7, 0x3d, 0xb0, 0x5c, 0x6f, 0x0f, 0x07, 0x0e, 0x8e, 0x9e, 0x32, 0x3b, 0xa1, 0xeb, 0xb0, - 0xe4, 0x3a, 0x1e, 0x36, 0xa9, 0xeb, 0x63, 0x32, 0xa0, 0x66, 0x8c, 0x6d, 0x12, 0x38, 0xb1, 0xb2, - 0x2f, 0xeb, 0x7b, 0x20, 0xba, 0xf6, 0x44, 0x8f, 0xfe, 0xab, 0xe4, 0x94, 0x48, 0xcf, 0x62, 0x74, - 0x3f, 0x0a, 0x30, 0x66, 0x02, 0x0f, 0xb0, 0xe5, 0xe0, 0x48, 0x2e, 0xa3, 0x2b, 0x88, 0x3f, 0xe1, - 0x34, 0xb6, 0x43, 0x92, 0x69, 0x9f, 0x38, 0x43, 0x3e, 0xa3, 0xae, 0x01, 0x82, 0x74, 0x93, 0x38, - 0x43, 0x9e, 0xae, 0x63, 0x93, 0x3b, 0x99, 0x7d, 0x30, 0x08, 0x0e, 0xf9, 0x6c, 0x5a, 0x46, 0xc7, - 0x8d, 0x3f, 0xb0, 0x62, 0xba, 0xc3, 0x48, 0xfa, 0x9f, 0x1b, 0x2a, 0x5f, 0xb0, 0x69, 0x18, 0xd8, - 0xc6, 0xee, 0xd1, 0xff, 0xc0, 0x1c, 0x6c, 0x84, 0x74, 0x82, 0xcc, 0x3d, 0x59, 0x06, 0x1c, 0x12, - 0x7d, 0xf2, 0x54, 0xe5, 0x3d, 0xa3, 0x74, 0x95, 0x9d, 0xb8, 0x4c, 0x57, 0x9f, 0xab, 0xe3, 0xe2, - 0xb6, 0xbd, 0x77, 0x60, 0x45, 0x4e, 0xfc, 0x63, 0x1c, 0xe0, 0xc8, 0xa2, 0x67, 0x72, 0x7d, 0xd1, - 0xd7, 0xe0, 0x52, 0x95, 0x74, 0xa9, 0xff, 0x33, 0x75, 0x0c, 0x2a, 0x0e, 0x03, 0xef, 0x0f, 0x5c, - 0xcf, 0x39, 0x13, 0xf5, 0xef, 0xe7, 0x17, 0x97, 0x08, 0x97, 0xfe, 0x73, 0x15, 0xce, 0x47, 0x9c, - 0x44, 0xcd, 0x98, 0x31, 0x24, 0x95, 0xcb, 0x9c, 0x31, 0x2f, 0x3b, 0xf8, 0x40, 0x56, 0xc1, 0xfc, - 0x66, 0x52, 0x79, 0x80, 0x92, 0x76, 0x66, 0x69, 0x75, 0x05, 0xda, 0x23, 0xf5, 0x4d, 0xae, 0xbe, - 0x15, 0x4b, 0xbd, 0xcc, 0x3b, 0x6d, 0x12, 0x0e, 0x4d, 0x6c, 0x8b, 0x1b, 0x05, 0xdf, 0xea, 0x96, - 0xd1, 0x61, 0xc4, 0xdb, 0x36, 0xbf, 0x50, 0xd4, 0xcf, 0xb1, 0x29, 0x69, 0x5f, 0x08, 0x69, 0x33, - 0x69, 0x69, 0x5f, 0x70, 0x69, 0x8a, 0xe7, 0xc8, 0xed, 0x0b, 0x9e, 0xd9, 0x11, 0xcf, 0x43, 0xb7, - 0xcf, 0x78, 0x46, 0x5e, 0x95, 0x35, 0x86, 0xdc, 0xd5, 0x2f, 0x61, 0x25, 0xdb, 0x5b, 0xff, 0xc0, - 0x7e, 0x2a, 0x63, 0xe9, 0x97, 0xf2, 0xee, 0x94, 0x3b, 0xf5, 0x8f, 0xf2, 0xd3, 0xae, 0x7d, 0xc3, - 0x79, 0xba, 0x79, 0x5d, 0xcc, 0x1b, 0x24, 0x7b, 0x4d, 0xfa, 0x34, 0x3f, 0xed, 0x53, 0x5c, 0x97, - 0x4e, 0x56, 0x7c, 0x39, 0x1f, 0x02, 0xf9, 0x3b, 0xd5, 0x1f, 0x93, 0xfc, 0x2a, 0x39, 0xd8, 0x8d, - 0xa6, 0x76, 0x5e, 0x93, 0x7a, 0xb9, 0x39, 0xe6, 0x8c, 0x59, 0xa9, 0x96, 0x95, 0xdc, 0xf2, 0x3c, - 0x14, 0x15, 0x8b, 0xfc, 0xca, 0x14, 0xd7, 0x4d, 0x59, 0x5c, 0xab, 0x47, 0x83, 0x43, 0x3c, 0xe4, - 0x3e, 0x3b, 0x25, 0x1e, 0x0d, 0xde, 0xc7, 0x43, 0x7d, 0x37, 0x17, 0x71, 0x62, 0x6a, 0x32, 0x76, - 0x11, 0x4c, 0x31, 0x67, 0x97, 0x29, 0x9f, 0xb7, 0xd1, 0x45, 0x00, 0x37, 0x36, 0x1d, 0xbe, 0xe7, - 0x62, 0x52, 0x2d, 0xa3, 0xed, 0x4a, 0x27, 0x70, 0xf4, 0xdf, 0x36, 0x46, 0x02, 0x6f, 0x7a, 0x64, - 0xff, 0x0c, 0xbd, 0x32, 0xbd, 0x8a, 0x66, 0x66, 0x15, 0xe9, 0xd7, 0x83, 0xa9, 0xec, 0xeb, 0x41, - 0x2a, 0x88, 0xd2, 0xd3, 0xa9, 0x4a, 0xcd, 0x0f, 0xc8, 0xd9, 0x55, 0x96, 0xc5, 0xd4, 0x3c, 0x92, - 0x2e, 0xf5, 0xbf, 0x0d, 0x2b, 0xcc, 0xe0, 0x82, 0xca, 0xeb, 0x96, 0xfa, 0xb5, 0xdd, 0x3f, 0x26, - 0x61, 0xb5, 0x7c, 0x70, 0x9d, 0xfa, 0xee, 0x1d, 0xd0, 0x92, 0xfa, 0x89, 0x1d, 0x8d, 0x31, 0xb5, - 0xfc, 0x30, 0x39, 0x1c, 0xc5, 0x19, 0x7a, 0x41, 0x16, 0x53, 0x0f, 0x54, 0xbf, 0x3a, 0x21, 0x0b, - 0xc5, 0x57, 0xb3, 0x50, 0x7c, 0x31, 0x05, 0x8e, 0x45, 0xab, 0x14, 0x88, 0x3b, 0xdc, 0x05, 0xc7, - 0xa2, 0x55, 0x0a, 0x92, 0xc1, 0x5c, 0x81, 0xf0, 0xda, 0x8e, 0xe4, 0xe7, 0x0a, 0x2e, 0x02, 0xc8, - 0xeb, 0xd5, 0x20, 0x50, 0xc5, 0x64, 0x5b, 0x5c, 0xae, 0x06, 0x41, 0xe5, 0x2d, 0x73, 0xb6, 0xf2, - 0x96, 0x99, 0xdd, 0xcd, 0x56, 0x61, 0x37, 0x3f, 0x05, 0xb8, 0xe5, 0xc6, 0x87, 0xc2, 0xc8, 0xec, - 0x5a, 0xeb, 0xb8, 0xaa, 0x1a, 0x60, 0x4d, 0x46, 0xb1, 0x3c, 0x4f, 0x9a, 0x8e, 0x35, 0x59, 0xf8, - 0x0c, 0x62, 0xec, 0x48, 0xeb, 0xf0, 0x36, 0xa3, 0xf5, 0x23, 0x8c, 0xa5, 0x01, 0x78, 0x5b, 0xff, - 0x53, 0x03, 0xda, 0xf7, 0xb0, 0x2f, 0x25, 0x5f, 0x02, 0x78, 0x44, 0x22, 0x32, 0xa0, 0x6e, 0x80, - 0xc5, 0x2d, 0x7c, 0xda, 0x48, 0x51, 0xbe, 0xbb, 0x1e, 0x9e, 0x1a, 0xb0, 0xd7, 0x97, 0xc6, 0xe4, - 0x6d, 0x46, 0x3b, 0xc0, 0x56, 0x28, 0xed, 0xc7, 0xdb, 0xac, 0xd6, 0x89, 0xa9, 0x65, 0x1f, 0x72, - 0x63, 0x4d, 0x19, 0xe2, 0x43, 0xff, 0x6b, 0x03, 0xc0, 0xc0, 0x3e, 0xa1, 0xdc, 0xd7, 0xd8, 0xed, - 0x76, 0xdf, 0xb2, 0x0f, 0x59, 0xbd, 0x40, 0x87, 0x21, 0x96, 0x96, 0xe8, 0x48, 0xda, 0x83, 0x61, - 0xc8, 0x77, 0x48, 0xb1, 0xc8, 0xfc, 0xd5, 0x36, 0xda, 0x92, 0x22, 0x2a, 0x03, 0x15, 0xca, 0x6d, - 0x83, 0x35, 0x53, 0x39, 0x4d, 0x4c, 0x5b, 0xe5, 0xb4, 0x15, 0x68, 0xe7, 0x5d, 0x81, 0xa7, 0x02, - 0xee, 0x07, 0x57, 0x60, 0xce, 0x27, 0x8e, 0xdb, 0x77, 0xb1, 0xc3, 0x1d, 0x4d, 0x2e, 0xa5, 0xab, - 0x88, 0xcc, 0xb9, 0xd0, 0x2a, 0xb4, 0xf1, 0x31, 0xc5, 0x41, 0xe2, 0x03, 0x6d, 0x63, 0x44, 0xd0, - 0xbf, 0x02, 0x50, 0x05, 0x7d, 0x9f, 0xa0, 0x6d, 0x98, 0x66, 0xc2, 0xd5, 0x73, 0xe9, 0x6a, 0xf1, - 0xb9, 0x74, 0x64, 0x06, 0x43, 0xb0, 0xa6, 0x13, 0xd0, 0x64, 0x26, 0x01, 0x8d, 0xaf, 0xe7, 0xf4, - 0x6f, 0x1b, 0xb0, 0x26, 0xaf, 0x8f, 0x2e, 0x8e, 0xee, 0x91, 0x23, 0x76, 0x95, 0x78, 0x40, 0x84, - 0x92, 0x33, 0xc9, 0x9c, 0x6f, 0x41, 0xcf, 0xc1, 0x31, 0x75, 0x03, 0xae, 0xd0, 0x54, 0x9b, 0x12, - 0x58, 0x3e, 0x96, 0x13, 0x5a, 0x4e, 0xf5, 0xdf, 0x14, 0xdd, 0xbb, 0x96, 0x8f, 0xd1, 0x35, 0x58, - 0x3c, 0xc4, 0x38, 0x34, 0x3d, 0x62, 0x5b, 0x9e, 0xa9, 0x62, 0x52, 0xde, 0x8f, 0x16, 0x58, 0xd7, - 0x07, 0xac, 0xe7, 0x96, 0x88, 0x4b, 0x3d, 0x86, 0xe7, 0x4e, 0x58, 0x89, 0xcc, 0x4b, 0xab, 0xd0, - 0x0e, 0x23, 0x62, 0xe3, 0x98, 0xf9, 0x6c, 0x83, 0x1f, 0x53, 0x23, 0x02, 0xba, 0x0e, 0x8b, 0xc9, - 0xc7, 0x87, 0x38, 0xb2, 0x71, 0x40, 0xad, 0x47, 0xe2, 0xdd, 0x74, 0xd2, 0x28, 0xeb, 0xd2, 0x7f, - 0xdf, 0x00, 0xbd, 0xa0, 0xf5, 0x4e, 0x44, 0xfc, 0x33, 0xb4, 0xe0, 0x16, 0x2c, 0x71, 0x3b, 0x44, - 0x5c, 0xe4, 0xc8, 0x10, 0xa2, 0x8c, 0x39, 0xcf, 0xfa, 0x84, 0x36, 0x65, 0x89, 0x01, 0x5c, 0x39, - 0x71, 0x4e, 0xff, 0x25, 0x5b, 0xfc, 0xab, 0x0b, 0xdd, 0x8f, 0x06, 0x38, 0x1a, 0xa6, 0x1e, 0x5c, - 0x63, 0x2c, 0x57, 0xa1, 0x10, 0x83, 0x14, 0x85, 0x65, 0xda, 0x7e, 0x44, 0x7c, 0x33, 0x01, 0x15, - 0x26, 0x39, 0x4b, 0x87, 0x11, 0xef, 0x08, 0x60, 0x01, 0xbd, 0x0b, 0x33, 0x7d, 0xd7, 0xa3, 0x58, - 0x3c, 0xe3, 0x77, 0xb6, 0x5f, 0x28, 0x46, 0x44, 0x5a, 0xe7, 0xe6, 0x1d, 0xce, 0x6c, 0xc8, 0x41, - 0x68, 0x1f, 0x16, 0xdd, 0x20, 0xe4, 0xa5, 0x57, 0xe4, 0x5a, 0x9e, 0xfb, 0x64, 0xf4, 0x64, 0xd8, - 0xd9, 0x7e, 0x75, 0x8c, 0xac, 0xbb, 0x6c, 0xe4, 0x5e, 0x7a, 0xa0, 0x81, 0xdc, 0x02, 0x0d, 0x61, - 0x58, 0x22, 0x03, 0x5a, 0x54, 0x32, 0xcd, 0x95, 0x6c, 0x8f, 0x51, 0x72, 0x9f, 0x0f, 0xcd, 0x6a, - 0x59, 0x24, 0x45, 0xa2, 0xb6, 0x0b, 0x33, 0x62, 0x71, 0x2c, 0x47, 0xf6, 0x5d, 0xec, 0x29, 0x20, - 0x44, 0x7c, 0xb0, 0x34, 0x40, 0x42, 0x1c, 0x59, 0x81, 0x4a, 0x77, 0xea, 0x93, 0xf1, 0x1f, 0x59, - 0xde, 0x40, 0xc5, 0x9b, 0xf8, 0xd0, 0xfe, 0x32, 0x0d, 0xa8, 0xb8, 0x42, 0xf5, 0x0e, 0x1a, 0xe1, - 0x98, 0xa5, 0x90, 0x74, 0x7e, 0x9d, 0x4f, 0xd1, 0x79, 0x8e, 0xfd, 0x04, 0xda, 0x76, 0x7c, 0x64, - 0x72, 0x93, 0x70, 0x9d, 0x9d, 0xed, 0xb7, 0x4f, 0x6d, 0xd2, 0xcd, 0x9d, 0xbd, 0x87, 0x9c, 0x6a, - 0xb4, 0xec, 0xf8, 0x88, 0xb7, 0xd0, 0xcf, 0x01, 0xbe, 0x88, 0x49, 0x20, 0x25, 0x8b, 0x8d, 0x7f, - 0xe7, 0xf4, 0x92, 0x7f, 0xba, 0x77, 0x7f, 0x57, 0x88, 0x6e, 0x33, 0x71, 0x42, 0xb6, 0x0d, 0x73, - 0xa1, 0x15, 0x3d, 0x1e, 0x60, 0x2a, 0xc5, 0x0b, 0x5f, 0x78, 0xef, 0xf4, 0xe2, 0x3f, 0x14, 0x62, - 0x84, 0x86, 0x6e, 0x98, 0xfa, 0xd2, 0xbe, 0x9d, 0x84, 0x96, 0x5a, 0x17, 0xab, 0xde, 0xb8, 0x87, - 0x8b, 0x37, 0x0c, 0xd3, 0x0d, 0xfa, 0x44, 0x5a, 0xf4, 0x1c, 0xa3, 0x8b, 0x67, 0x0c, 0x9e, 0xfd, - 0x37, 0x60, 0x21, 0xc2, 0x36, 0x89, 0x1c, 0x76, 0xc7, 0x75, 0x7d, 0x97, 0xb9, 0xbd, 0xd8, 0xcb, - 0x79, 0x41, 0xbf, 0xa5, 0xc8, 0xe8, 0x25, 0x98, 0xe7, 0xdb, 0x9e, 0xe2, 0x6c, 0x2a, 0x99, 0xd8, - 0x4b, 0x31, 0x6e, 0xc0, 0xc2, 0xe3, 0x01, 0xcb, 0x1b, 0xf6, 0x81, 0x15, 0x59, 0x36, 0x25, 0xc9, - 0x6b, 0xc2, 0x3c, 0xa7, 0xef, 0x24, 0x64, 0xf4, 0x3a, 0x2c, 0x0b, 0x56, 0x1c, 0xdb, 0x56, 0x98, - 0x8c, 0xc0, 0x91, 0x2c, 0x36, 0x97, 0x78, 0xef, 0x6d, 0xde, 0xb9, 0xa3, 0xfa, 0x90, 0x06, 0x2d, - 0x9b, 0xf8, 0x3e, 0x0e, 0x68, 0xcc, 0x8f, 0xbf, 0xb6, 0x91, 0x7c, 0xa3, 0x1b, 0x70, 0xd1, 0xf2, - 0x3c, 0xf2, 0xa5, 0xc9, 0x47, 0x3a, 0x66, 0x61, 0x75, 0xa2, 0xf4, 0xd4, 0x38, 0xd3, 0x47, 0x9c, - 0xc7, 0xc8, 0x2e, 0x54, 0xbb, 0x0c, 0xed, 0x64, 0x1f, 0xd9, 0x8d, 0x21, 0xe5, 0x90, 0xbc, 0xad, - 0x9d, 0x83, 0x6e, 0x7a, 0x27, 0xb4, 0x7f, 0x36, 0x61, 0xb1, 0x24, 0xa8, 0xd0, 0x67, 0x00, 0xcc, - 0x5b, 0x45, 0x68, 0x49, 0x77, 0xfd, 0xc1, 0xe9, 0x83, 0x93, 0xf9, 0xab, 0x20, 0x1b, 0xcc, 0xfb, - 0x45, 0x13, 0xfd, 0x02, 0x3a, 0xdc, 0x63, 0xa5, 0x74, 0xe1, 0xb2, 0xef, 0x7e, 0x07, 0xe9, 0x6c, - 0xad, 0x52, 0x3c, 0x8f, 0x01, 0xd1, 0xd6, 0xfe, 0xde, 0x80, 0x76, 0xa2, 0x98, 0xdd, 0x7f, 0xc4, - 0x46, 0xf1, 0xbd, 0x8e, 0xd5, 0xfd, 0x87, 0xd3, 0xee, 0x70, 0xd2, 0xff, 0xa5, 0x2b, 0x69, 0x6f, - 0x02, 0x8c, 0xd6, 0x5f, 0xba, 0x84, 0x46, 0xe9, 0x12, 0xf4, 0x0d, 0x98, 0x63, 0x96, 0x75, 0xb1, - 0xb3, 0x47, 0x23, 0x37, 0xe4, 0x90, 0xae, 0xe0, 0x89, 0x65, 0x01, 0xa9, 0x3e, 0xb7, 0xbf, 0x59, - 0x81, 0x6e, 0xfa, 0x01, 0x0d, 0x7d, 0x0e, 0x9d, 0x14, 0x74, 0x8d, 0x9e, 0x2f, 0x6e, 0x5a, 0x11, - 0x0a, 0xd7, 0x5e, 0x18, 0xc3, 0x25, 0x6b, 0xac, 0x09, 0x14, 0xc0, 0xf9, 0x02, 0xfe, 0x8b, 0xae, - 0x16, 0x47, 0x57, 0xa1, 0xcb, 0xda, 0xcb, 0xb5, 0x78, 0x13, 0x7d, 0x14, 0x16, 0x4b, 0x00, 0x5d, - 0xf4, 0xca, 0x18, 0x29, 0x19, 0x50, 0x59, 0xbb, 0x56, 0x93, 0x3b, 0xd1, 0xfa, 0x18, 0x50, 0x11, - 0xed, 0x45, 0x2f, 0x8f, 0x15, 0x33, 0x42, 0x93, 0xb5, 0x57, 0xea, 0x31, 0x57, 0x2e, 0x54, 0xe0, - 0xc0, 0x63, 0x17, 0x9a, 0x41, 0x9a, 0xc7, 0x2e, 0x34, 0x07, 0x2e, 0x4f, 0xa0, 0x43, 0x58, 0xc8, - 0x63, 0xc4, 0x68, 0xa3, 0xea, 0x9f, 0x86, 0x02, 0x04, 0xad, 0x5d, 0xad, 0xc3, 0x9a, 0x28, 0xc3, - 0x70, 0x2e, 0x8b, 0xc9, 0xa2, 0x97, 0x8a, 0xe3, 0x4b, 0x51, 0x69, 0x6d, 0x7d, 0x3c, 0x63, 0x7a, - 0x4d, 0x79, 0x9c, 0xb6, 0x6c, 0x4d, 0x15, 0x20, 0x70, 0xd9, 0x9a, 0xaa, 0x60, 0x5f, 0x7d, 0x02, - 0x7d, 0xa5, 0xc0, 0xbf, 0x1c, 0x7e, 0x89, 0x36, 0xab, 0xc4, 0x94, 0x03, 0xa8, 0xda, 0x56, 0x6d, - 0x7e, 0xa5, 0xfb, 0x7a, 0x83, 0xc5, 0x7a, 0x0a, 0xc6, 0x2c, 0x8b, 0xf5, 0x22, 0x30, 0x5a, 0x16, - 0xeb, 0x65, 0x58, 0xe8, 0x04, 0xda, 0x87, 0xb9, 0x0c, 0xb0, 0x89, 0x5e, 0xac, 0x1a, 0x99, 0x7d, - 0xff, 0xd3, 0x5e, 0x1a, 0xcb, 0x97, 0xe8, 0x30, 0x55, 0xf6, 0x92, 0xe9, 0xaa, 0x72, 0x72, 0xd9, - 0x7c, 0xf5, 0xe2, 0x38, 0xb6, 0x4c, 0x28, 0x17, 0xe0, 0xcf, 0xd2, 0x50, 0xae, 0x82, 0x57, 0x4b, - 0x43, 0xb9, 0x1a, 0x51, 0x9d, 0x40, 0x07, 0x30, 0x9f, 0x83, 0x3e, 0xd1, 0x7a, 0x95, 0x88, 0x3c, - 0xec, 0xaa, 0x6d, 0xd4, 0xe0, 0x4c, 0x34, 0xfd, 0x4c, 0x15, 0xdb, 0xdc, 0xe5, 0xae, 0x54, 0x0f, - 0x1d, 0xf9, 0xd9, 0xf3, 0x27, 0x33, 0x25, 0xa2, 0xbf, 0x84, 0xa5, 0xb2, 0x17, 0x31, 0x74, 0xad, - 0xac, 0x84, 0xaf, 0x7c, 0x76, 0xd3, 0x36, 0xeb, 0xb2, 0x27, 0x8a, 0x3f, 0x86, 0x96, 0x82, 0xff, - 0xd0, 0x73, 0xc5, 0xd1, 0x39, 0xc0, 0x54, 0xd3, 0x4f, 0x62, 0x49, 0x85, 0x8a, 0xaf, 0xb2, 0xc2, - 0x08, 0x97, 0xab, 0xce, 0x0a, 0x05, 0x04, 0xb1, 0x3a, 0x2b, 0x14, 0x61, 0x3e, 0xae, 0x2e, 0x71, - 0xbb, 0x34, 0x8c, 0x55, 0xed, 0x76, 0x25, 0x28, 0x5d, 0xb5, 0xdb, 0x95, 0x22, 0x63, 0x13, 0xe8, - 0x97, 0x0a, 0xca, 0xcf, 0xa3, 0x57, 0xa8, 0x32, 0xb7, 0x54, 0xa0, 0x68, 0xda, 0xf5, 0xfa, 0x03, - 0x12, 0xf5, 0x4f, 0x54, 0x26, 0xcc, 0xa1, 0x57, 0xd5, 0x99, 0xb0, 0x1c, 0x43, 0xd3, 0xb6, 0x6a, - 0xf3, 0x17, 0x83, 0x3c, 0x0d, 0xef, 0x54, 0x5b, 0xbb, 0x04, 0x11, 0xab, 0xb6, 0x76, 0x29, 0x62, - 0xc4, 0xe3, 0xa3, 0x0c, 0xba, 0x29, 0x8b, 0x8f, 0x13, 0xb0, 0x25, 0x6d, 0xb3, 0x2e, 0x7b, 0xe6, - 0xa2, 0x50, 0xc4, 0x66, 0xd0, 0xd8, 0xf9, 0x67, 0xce, 0x80, 0x6b, 0x35, 0xb9, 0xab, 0x77, 0x57, - 0x9d, 0x09, 0x63, 0x17, 0x90, 0x3b, 0x1b, 0xb6, 0x6a, 0xf3, 0x27, 0xba, 0x43, 0xf5, 0x63, 0x48, - 0x0a, 0x57, 0x41, 0x57, 0xc7, 0xc8, 0x49, 0xe1, 0x42, 0xda, 0xcb, 0xb5, 0x78, 0xcb, 0xa2, 0x37, - 0x8d, 0x74, 0x9c, 0xe4, 0x4f, 0x05, 0x78, 0xe6, 0x24, 0x7f, 0x2a, 0x01, 0x4f, 0x4a, 0xa2, 0x57, - 0x01, 0x1c, 0xe3, 0xa3, 0x37, 0x07, 0xb4, 0x8c, 0x8f, 0xde, 0x02, 0x76, 0x32, 0x81, 0x7e, 0x3d, - 0xfa, 0x61, 0xa0, 0xf8, 0xdc, 0x88, 0xb6, 0x2b, 0x53, 0x51, 0xe5, 0x2b, 0xab, 0xf6, 0xda, 0xa9, - 0xc6, 0xa4, 0x8c, 0xff, 0xbb, 0x86, 0x42, 0x1f, 0x4b, 0xdf, 0xfb, 0xd0, 0xeb, 0x35, 0x04, 0x17, - 0x9e, 0x2c, 0xb5, 0x37, 0x4e, 0x39, 0x2a, 0x35, 0xa1, 0x0f, 0x60, 0x9a, 0xd7, 0xb9, 0xe8, 0xd2, - 0xc9, 0x05, 0xb0, 0x76, 0xb9, 0xbc, 0x3f, 0x29, 0xe3, 0x98, 0xb4, 0xfd, 0x19, 0xfe, 0x93, 0xf2, - 0x6b, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x66, 0x23, 0x9f, 0xad, 0xbb, 0x2c, 0x00, 0x00, + // 3173 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x3a, 0x4b, 0x73, 0xdc, 0xc6, + 0xd1, 0x5c, 0x2e, 0x1f, 0xbb, 0xbd, 0x4b, 0x91, 0x1a, 0xd2, 0xd4, 0x1a, 0x22, 0x25, 0x0a, 0xf2, + 0x83, 0x94, 0x2d, 0x52, 0xa6, 0xed, 0xcf, 0xfa, 0xe4, 0xcf, 0xfe, 0x22, 0x51, 0x0f, 0xcb, 0x16, + 0x29, 0x1b, 0x94, 0x65, 0x27, 0x76, 0x05, 0x05, 0x02, 0xb3, 0x24, 0x4c, 0x2c, 0x06, 0x02, 0x66, + 0x69, 0xad, 0xca, 0xb9, 0xc4, 0xa9, 0x4a, 0xaa, 0x52, 0xc9, 0x21, 0x95, 0x4b, 0xce, 0xb9, 0xe7, + 0x9a, 0xbf, 0xe0, 0x3f, 0x90, 0xaa, 0x9c, 0x72, 0xc9, 0x39, 0x87, 0xdc, 0x52, 0x95, 0x4b, 0x6a, + 0x5e, 0x58, 0x3c, 0xb9, 0xa0, 0xc5, 0x54, 0x2a, 0x37, 0x4c, 0x4f, 0x3f, 0xa6, 0x7b, 0xba, 0x7b, + 0x7a, 0xa6, 0x01, 0xf3, 0x47, 0xc4, 0xeb, 0xf7, 0xb0, 0x19, 0xe1, 0xf0, 0x08, 0x87, 0xeb, 0x41, + 0x48, 0x28, 0x41, 0x73, 0x29, 0xa0, 0x19, 0xec, 0xe9, 0x1b, 0x80, 0x6e, 0x59, 0xd4, 0x3e, 0xb8, + 0x8d, 0x3d, 0x4c, 0xb1, 0x81, 0x9f, 0xf4, 0x71, 0x44, 0xd1, 0x8b, 0xd0, 0xe8, 0xba, 0x1e, 0x36, + 0x5d, 0x27, 0xea, 0xd4, 0x56, 0xea, 0xab, 0x4d, 0x63, 0x9a, 0x8d, 0xef, 0x3b, 0x91, 0xfe, 0x10, + 0xe6, 0x53, 0x04, 0x51, 0x40, 0xfc, 0x08, 0xa3, 0xeb, 0x30, 0x1d, 0xe2, 0xa8, 0xef, 0x51, 0x41, + 0xd0, 0xda, 0xbc, 0xb0, 0x9e, 0x95, 0xb5, 0x1e, 0x93, 0xf4, 0x3d, 0x6a, 0x28, 0x74, 0xfd, 0xdb, + 0x1a, 0xb4, 0x93, 0x33, 0xe8, 0x1c, 0x4c, 0x4b, 0xe1, 0x9d, 0xda, 0x4a, 0x6d, 0xb5, 0x69, 0x4c, + 0x09, 0xd9, 0x68, 0x11, 0xa6, 0x22, 0x6a, 0xd1, 0x7e, 0xd4, 0x19, 0x5f, 0xa9, 0xad, 0x4e, 0x1a, + 0x72, 0x84, 0x16, 0x60, 0x12, 0x87, 0x21, 0x09, 0x3b, 0x75, 0x8e, 0x2e, 0x06, 0x08, 0xc1, 0x44, + 0xe4, 0x3e, 0xc3, 0x9d, 0x89, 0x95, 0xda, 0xea, 0x8c, 0xc1, 0xbf, 0x51, 0x07, 0xa6, 0x8f, 0x70, + 0x18, 0xb9, 0xc4, 0xef, 0x4c, 0x72, 0xb0, 0x1a, 0xea, 0x1f, 0xc2, 0x99, 0xbb, 0xae, 0x87, 0xef, + 0x61, 0xaa, 0x6c, 0x50, 0xba, 0x8c, 0x8b, 0xd0, 0xb2, 0x6c, 0x1b, 0x07, 0xd4, 0xdc, 0x7f, 0xe6, + 0x06, 0x7c, 0x2d, 0x0d, 0x03, 0x04, 0xe8, 0xde, 0x33, 0x37, 0xd0, 0x7f, 0x5e, 0x87, 0xd9, 0x98, + 0x99, 0xb4, 0x0f, 0x82, 0x09, 0xc7, 0xa2, 0x16, 0x67, 0xd5, 0x36, 0xf8, 0x37, 0x7a, 0x19, 0xce, + 0xd8, 0xc4, 0xa7, 0xd8, 0xa7, 0xa6, 0x87, 0xfd, 0x7d, 0x7a, 0xc0, 0x79, 0xcd, 0x18, 0x33, 0x12, + 0xfa, 0x80, 0x03, 0xd1, 0x25, 0x68, 0x2b, 0x34, 0x3a, 0x08, 0xb0, 0xd4, 0xb2, 0x25, 0x61, 0x8f, + 0x06, 0x01, 0x46, 0x97, 0x61, 0xc6, 0xb3, 0x22, 0x6a, 0xf6, 0x88, 0xe3, 0x76, 0x5d, 0xec, 0x70, + 0xa5, 0x27, 0x8c, 0x36, 0x03, 0x6e, 0x4b, 0x18, 0xd2, 0xc4, 0xa6, 0xfa, 0x56, 0x0f, 0x73, 0xed, + 0x9b, 0x46, 0x3c, 0x66, 0xcb, 0xc3, 0xd4, 0xda, 0xef, 0x4c, 0x71, 0x38, 0xff, 0x46, 0xcb, 0x00, + 0x6e, 0xc4, 0x75, 0x0c, 0xb0, 0xd3, 0x99, 0xe6, 0x6a, 0x36, 0xdd, 0xe8, 0x9e, 0x00, 0xa0, 0x0f, + 0x60, 0xfa, 0x00, 0x5b, 0x0e, 0x0e, 0xa3, 0x4e, 0x83, 0xef, 0xf8, 0x7a, 0x7e, 0xc7, 0x33, 0x56, + 0x58, 0xff, 0x40, 0x10, 0xdc, 0xf1, 0x69, 0x38, 0x30, 0x14, 0x39, 0x5a, 0x82, 0x26, 0xdf, 0xb2, + 0x2d, 0xe2, 0xe0, 0x4e, 0x93, 0x6f, 0xed, 0x10, 0xa0, 0xdd, 0x80, 0x76, 0x92, 0x0c, 0xcd, 0x41, + 0xfd, 0x10, 0x0f, 0xe4, 0x9e, 0xb0, 0x4f, 0xb6, 0xff, 0x47, 0x96, 0xd7, 0xc7, 0xdc, 0x7c, 0x4d, + 0x43, 0x0c, 0x6e, 0x8c, 0x5f, 0xaf, 0xe9, 0xd3, 0x30, 0x79, 0xa7, 0x17, 0xd0, 0x81, 0xfe, 0x0e, + 0x74, 0x1e, 0x5b, 0x76, 0xbf, 0xdf, 0x7b, 0xcc, 0x97, 0xb8, 0x75, 0x80, 0xed, 0x43, 0xb5, 0xd1, + 0xe7, 0xa1, 0x29, 0x17, 0x2e, 0xb7, 0x7a, 0xc6, 0x68, 0x08, 0xc0, 0x7d, 0x47, 0xff, 0x01, 0xbc, + 0x58, 0x40, 0x28, 0x37, 0xf5, 0x32, 0xcc, 0xec, 0x5b, 0xe1, 0x9e, 0xb5, 0x8f, 0xcd, 0xd0, 0xa2, + 0x2e, 0xe1, 0xd4, 0x35, 0xa3, 0x2d, 0x81, 0x06, 0x83, 0xe9, 0x5f, 0x80, 0x96, 0xe2, 0x40, 0x7a, + 0x81, 0x65, 0xd3, 0x2a, 0xc2, 0xd1, 0x0a, 0xb4, 0x82, 0x10, 0x5b, 0x9e, 0x47, 0x6c, 0x8b, 0x0a, + 0xf5, 0xea, 0x46, 0x12, 0xa4, 0x2f, 0xc3, 0xf9, 0x42, 0xe6, 0x62, 0x81, 0xfa, 0xf5, 0xcc, 0xea, + 0x49, 0xaf, 0xe7, 0x56, 0x12, 0xad, 0x2f, 0xe5, 0x56, 0xcd, 0x29, 0x25, 0xdf, 0xff, 0xcd, 0xcc, + 0x7a, 0xd8, 0xf2, 0xfb, 0x41, 0x25, 0xc6, 0xd9, 0x15, 0x2b, 0xd2, 0x98, 0xf3, 0x39, 0x91, 0x0c, + 0xb6, 0x88, 0xe7, 0x61, 0x9b, 0xba, 0xc4, 0x57, 0x6c, 0x2f, 0x00, 0xd8, 0x31, 0x50, 0xee, 0x7f, + 0x02, 0xa2, 0x6b, 0xd0, 0xc9, 0x93, 0x4a, 0xb6, 0x7f, 0xa9, 0xc1, 0x0b, 0x37, 0xa5, 0xd1, 0x84, + 0xe0, 0x4a, 0x1b, 0x90, 0x16, 0x39, 0x9e, 0x15, 0x99, 0xdd, 0xa0, 0x7a, 0x6e, 0x83, 0x18, 0x46, + 0x88, 0x03, 0xcf, 0xb5, 0x2d, 0xce, 0x62, 0x42, 0xc4, 0x6e, 0x02, 0xc4, 0xfc, 0x99, 0x52, 0x4f, + 0x46, 0x24, 0xfb, 0x44, 0x9b, 0xb0, 0xd8, 0xc3, 0x3d, 0x12, 0x0e, 0xcc, 0x9e, 0x15, 0x98, 0x3d, + 0xeb, 0xa9, 0xc9, 0x92, 0x97, 0xd9, 0xdb, 0xe3, 0xe1, 0x39, 0x63, 0x20, 0x31, 0xbb, 0x6d, 0x05, + 0xdb, 0xd6, 0xd3, 0x5d, 0xf7, 0x19, 0xde, 0xde, 0xd3, 0x3b, 0xb0, 0x98, 0xd5, 0x4f, 0xaa, 0xfe, + 0x3f, 0x70, 0x4e, 0x40, 0x76, 0x07, 0xbe, 0xbd, 0xcb, 0x33, 0x66, 0xa5, 0x8d, 0xfa, 0x67, 0x0d, + 0x3a, 0x79, 0x42, 0xe9, 0xf9, 0xcf, 0x6b, 0xb5, 0x13, 0xdb, 0xe4, 0x22, 0xb4, 0xa8, 0xe5, 0x7a, + 0x26, 0xe9, 0x76, 0x23, 0x4c, 0xb9, 0x21, 0x26, 0x0c, 0x60, 0xa0, 0x87, 0x1c, 0x82, 0xd6, 0x60, + 0xce, 0x16, 0xde, 0x6f, 0x86, 0xf8, 0xc8, 0xe5, 0x39, 0x7e, 0x9a, 0x2f, 0x6c, 0xd6, 0x56, 0x51, + 0x21, 0xc0, 0x48, 0x87, 0x19, 0xd7, 0x79, 0x6a, 0xf2, 0xec, 0xce, 0x8f, 0x88, 0x06, 0xe7, 0xd6, + 0x72, 0x9d, 0xa7, 0x2c, 0x61, 0x31, 0x8b, 0xea, 0x8f, 0x61, 0x49, 0x28, 0x7f, 0xdf, 0xb7, 0x43, + 0xdc, 0xc3, 0x3e, 0xb5, 0xbc, 0x2d, 0x12, 0x0c, 0x2a, 0xb9, 0xcd, 0x8b, 0xd0, 0x88, 0x5c, 0xdf, + 0xc6, 0xa6, 0x2f, 0x8e, 0xaa, 0x09, 0x63, 0x9a, 0x8f, 0x77, 0x22, 0xfd, 0x16, 0x2c, 0x97, 0xf0, + 0x95, 0x96, 0xbd, 0x04, 0x6d, 0xbe, 0x30, 0x99, 0xde, 0xe5, 0x81, 0xd1, 0x62, 0xb0, 0x2d, 0x01, + 0xd2, 0xdf, 0x00, 0x24, 0x78, 0x6c, 0x93, 0xbe, 0x5f, 0x2d, 0x9c, 0x5f, 0x80, 0xf9, 0x14, 0x89, + 0xf4, 0x8d, 0x37, 0x61, 0x41, 0x80, 0x3f, 0xf5, 0x7b, 0x95, 0x79, 0x9d, 0x83, 0x17, 0x32, 0x44, + 0x92, 0xdb, 0xa6, 0x12, 0x92, 0x2e, 0x26, 0x8e, 0x65, 0xb6, 0xa8, 0x56, 0x90, 0xae, 0x27, 0x78, + 0xe6, 0x12, 0x0b, 0xb6, 0xc2, 0x43, 0x03, 0x5b, 0x0e, 0xf1, 0xbd, 0x41, 0xe5, 0xcc, 0x55, 0x40, + 0x29, 0xf9, 0x7e, 0x06, 0x8b, 0x2a, 0xa3, 0xf9, 0x5d, 0x77, 0xbf, 0x1f, 0xe2, 0xaa, 0x99, 0x38, + 0xe9, 0xb2, 0xe3, 0x39, 0x97, 0xd5, 0x37, 0x54, 0x98, 0x25, 0x18, 0xcb, 0x2d, 0x8d, 0xeb, 0x93, + 0x5a, 0xa2, 0x3e, 0xd1, 0xff, 0x50, 0x83, 0xb3, 0x8a, 0xa2, 0xa2, 0x5f, 0x9d, 0x30, 0xb0, 0xea, + 0xa5, 0x81, 0x35, 0x31, 0x0c, 0xac, 0x55, 0x98, 0x8b, 0x48, 0x3f, 0xb4, 0xb1, 0xc9, 0x6a, 0x12, + 0xd3, 0x67, 0x67, 0xb0, 0x88, 0xbb, 0x33, 0x02, 0x7e, 0xdb, 0xa2, 0xd6, 0x0e, 0x71, 0xb0, 0xfe, + 0xff, 0xca, 0xed, 0x52, 0xfe, 0xba, 0x06, 0x67, 0x79, 0xe9, 0x61, 0x05, 0x01, 0xf6, 0x1d, 0xd3, + 0xa2, 0xcc, 0xe9, 0x6b, 0xdc, 0xe9, 0xcf, 0xb0, 0x89, 0x9b, 0x1c, 0x7e, 0x93, 0xee, 0x44, 0xfa, + 0x6f, 0xc7, 0x61, 0x96, 0xd1, 0xb2, 0x20, 0xab, 0xa4, 0xef, 0x1c, 0xd4, 0xf1, 0x53, 0x2a, 0x15, + 0x65, 0x9f, 0x68, 0x03, 0xe6, 0x65, 0x34, 0xbb, 0xc4, 0x1f, 0x06, 0x7a, 0x5d, 0xe4, 0xc5, 0xe1, + 0x54, 0x1c, 0xeb, 0x17, 0xa1, 0x15, 0x51, 0x12, 0xa8, 0xbc, 0x21, 0xea, 0x22, 0x60, 0x20, 0x99, + 0x37, 0xd2, 0x36, 0x9d, 0x2c, 0xb0, 0x69, 0xdb, 0x8d, 0x4c, 0x6c, 0x9b, 0x62, 0x55, 0x3c, 0xf3, + 0x34, 0x0c, 0x70, 0xa3, 0x3b, 0xb6, 0xb0, 0x06, 0x7a, 0x1f, 0x96, 0xdc, 0x7d, 0x9f, 0x84, 0xd8, + 0x94, 0x86, 0xe4, 0xf1, 0xeb, 0x13, 0x6a, 0x76, 0x49, 0xdf, 0x57, 0x95, 0x53, 0x47, 0xe0, 0xec, + 0x72, 0x14, 0x66, 0x81, 0x1d, 0x42, 0xef, 0xb2, 0x79, 0xfd, 0x6d, 0x98, 0x1b, 0x5a, 0xa5, 0x7a, + 0x16, 0xf8, 0xb6, 0xa6, 0x3c, 0xee, 0x91, 0xe5, 0x7a, 0xbb, 0xd8, 0x77, 0x70, 0xf8, 0x9c, 0xd9, + 0x09, 0x5d, 0x83, 0x05, 0xd7, 0xf1, 0xb0, 0x49, 0xdd, 0x1e, 0x26, 0x7d, 0x6a, 0x46, 0xd8, 0x26, + 0xbe, 0x13, 0x29, 0xfb, 0xb2, 0xb9, 0x47, 0x62, 0x6a, 0x57, 0xcc, 0xe8, 0x3f, 0x8b, 0x4f, 0x89, + 0xe4, 0x2a, 0x86, 0xf5, 0x91, 0x8f, 0x31, 0x63, 0x28, 0x4a, 0x3d, 0xa9, 0x46, 0x5b, 0x00, 0x45, + 0x55, 0xc7, 0x76, 0x48, 0x22, 0xed, 0x11, 0x67, 0xc0, 0x57, 0xd4, 0x36, 0x40, 0x80, 0x6e, 0x11, + 0x67, 0xc0, 0xd3, 0x75, 0x64, 0x72, 0x27, 0xb3, 0x0f, 0xfa, 0xfe, 0x21, 0x5f, 0x4d, 0xc3, 0x68, + 0xb9, 0xd1, 0x03, 0x2b, 0xa2, 0x5b, 0x0c, 0xa4, 0xff, 0xb1, 0xa6, 0xf2, 0x05, 0x5b, 0x86, 0x81, + 0x6d, 0xec, 0x1e, 0xfd, 0x07, 0xcc, 0xc1, 0x28, 0xa4, 0x13, 0xa4, 0x6a, 0x61, 0x19, 0x70, 0x48, + 0xcc, 0xc9, 0x53, 0x95, 0xcf, 0x0c, 0xd3, 0x55, 0x7a, 0xe1, 0x32, 0x5d, 0x7d, 0xa9, 0x8e, 0x8b, + 0x3b, 0xf6, 0xee, 0x81, 0x15, 0x3a, 0xd1, 0x3d, 0xec, 0xe3, 0xd0, 0xa2, 0xa7, 0x52, 0xbe, 0xe8, + 0x2b, 0x70, 0xa1, 0x8c, 0xbb, 0x94, 0xff, 0x85, 0x3a, 0x06, 0x15, 0x86, 0x81, 0xf7, 0xfa, 0xae, + 0xe7, 0x9c, 0x8a, 0xf8, 0x8f, 0xb2, 0xca, 0xc5, 0xcc, 0xa5, 0xff, 0x5c, 0x81, 0xb3, 0x21, 0x07, + 0x51, 0x33, 0x62, 0x08, 0xf1, 0x7d, 0x74, 0xc6, 0x98, 0x95, 0x13, 0x9c, 0x90, 0xdd, 0x4b, 0x7f, + 0x39, 0xae, 0x3c, 0x40, 0x71, 0x3b, 0xb5, 0xb4, 0x7a, 0x1e, 0x9a, 0x43, 0xf1, 0x75, 0x2e, 0xbe, + 0x11, 0x49, 0xb9, 0xcc, 0x3b, 0x6d, 0x12, 0x0c, 0x4c, 0x6c, 0x8b, 0x8a, 0x82, 0x6f, 0x75, 0x83, + 0x5d, 0xcf, 0x82, 0xc1, 0x1d, 0x9b, 0x17, 0x14, 0xd5, 0x73, 0x6c, 0x82, 0xdb, 0x57, 0x82, 0xdb, + 0x54, 0x92, 0xdb, 0x57, 0x9c, 0x9b, 0xc2, 0x39, 0x72, 0xbb, 0x02, 0x67, 0x7a, 0x88, 0xf3, 0xd8, + 0xed, 0x32, 0x9c, 0xa1, 0x57, 0xa5, 0x8d, 0x21, 0x77, 0xf5, 0x6b, 0x38, 0x9f, 0x9e, 0xad, 0x7e, + 0x60, 0x3f, 0x97, 0xb1, 0xf4, 0x0b, 0x59, 0x77, 0xca, 0x9c, 0xfa, 0x47, 0xd9, 0x65, 0x57, 0xae, + 0x70, 0x9e, 0x6f, 0x5d, 0xcb, 0x59, 0x83, 0xa4, 0xcb, 0xa4, 0xcf, 0xb3, 0xcb, 0x3e, 0x41, 0xb9, + 0x74, 0xbc, 0xe0, 0x8b, 0xd9, 0x10, 0xc8, 0xd6, 0x54, 0xbf, 0x8b, 0xf3, 0xab, 0xc4, 0x60, 0x15, + 0x4d, 0xe5, 0xbc, 0x26, 0xe5, 0xca, 0x77, 0x85, 0x69, 0x29, 0x16, 0x2d, 0xc2, 0x94, 0x3c, 0x0f, + 0xc5, 0x8d, 0x45, 0x8e, 0x52, 0x4f, 0x26, 0x75, 0xf9, 0x64, 0xa2, 0x9e, 0x82, 0xd8, 0x9d, 0x7b, + 0x52, 0xa4, 0x47, 0x36, 0xfe, 0x08, 0x0f, 0xf4, 0x9d, 0x4c, 0xc4, 0x89, 0xa5, 0x1d, 0xf3, 0xe0, + 0x21, 0x5e, 0x14, 0x1c, 0xbe, 0xe7, 0x8e, 0x7c, 0x38, 0x69, 0xba, 0xd2, 0x09, 0x1c, 0xfd, 0x57, + 0xb5, 0x21, 0xc3, 0x5b, 0x1e, 0xd9, 0x3b, 0x45, 0xaf, 0x4c, 0x6a, 0x51, 0x4f, 0x69, 0x91, 0x7c, + 0x13, 0x9a, 0x48, 0xbf, 0x09, 0x25, 0x82, 0x28, 0xb9, 0x9c, 0xb2, 0xd4, 0xfc, 0x88, 0x9c, 0xde, + 0xcd, 0x32, 0x9f, 0x9a, 0x87, 0xdc, 0xa5, 0xfc, 0x1b, 0x70, 0x9e, 0x19, 0x5c, 0x40, 0xf9, 0xbd, + 0xa5, 0xfa, 0xdd, 0xee, 0x6f, 0xe3, 0xb0, 0x54, 0x4c, 0x5c, 0xe5, 0x7e, 0xf7, 0x2e, 0x68, 0xf1, + 0xfd, 0x89, 0x1d, 0x8d, 0x11, 0xb5, 0x7a, 0x41, 0x7c, 0x38, 0x8a, 0x33, 0xf4, 0x9c, 0xbc, 0x4c, + 0x3d, 0x52, 0xf3, 0xea, 0x84, 0xcc, 0x5d, 0xbe, 0xea, 0xb9, 0xcb, 0x17, 0x13, 0xe0, 0x58, 0xb4, + 0x4c, 0x80, 0xa8, 0xe1, 0xce, 0x39, 0x16, 0x2d, 0x13, 0x10, 0x13, 0x73, 0x01, 0xc2, 0x6b, 0x5b, + 0x12, 0x9f, 0x0b, 0x58, 0x06, 0x90, 0xe5, 0x55, 0xdf, 0x57, 0x97, 0xc9, 0xa6, 0x28, 0xae, 0xfa, + 0x7e, 0x69, 0x95, 0x39, 0x5d, 0x5a, 0x65, 0xa6, 0x77, 0xb3, 0x91, 0xdb, 0xcd, 0xcf, 0x01, 0x6e, + 0xbb, 0xd1, 0xa1, 0x30, 0x32, 0x2b, 0x6b, 0x1d, 0x57, 0xdd, 0x06, 0xd8, 0x27, 0x83, 0x58, 0x9e, + 0x27, 0x4d, 0xc7, 0x3e, 0x59, 0xf8, 0xf4, 0x23, 0xec, 0x48, 0xeb, 0xf0, 0x6f, 0x06, 0xeb, 0x86, + 0x18, 0x4b, 0x03, 0xf0, 0x6f, 0xfd, 0xf7, 0x35, 0x68, 0x6e, 0xe3, 0x9e, 0xe4, 0x7c, 0x01, 0x60, + 0x9f, 0x84, 0xa4, 0x4f, 0x5d, 0x1f, 0x8b, 0x2a, 0x7c, 0xd2, 0x48, 0x40, 0xbe, 0xbf, 0x1c, 0x9e, + 0x1a, 0xb0, 0xd7, 0x95, 0xc6, 0xe4, 0xdf, 0x0c, 0x76, 0x80, 0xad, 0x40, 0xda, 0x8f, 0x7f, 0xb3, + 0xbb, 0x4e, 0x44, 0x2d, 0xfb, 0x90, 0x1b, 0x6b, 0xc2, 0x10, 0x03, 0xfd, 0xcf, 0x35, 0x00, 0x03, + 0xf7, 0x08, 0xe5, 0xbe, 0xc6, 0xaa, 0xdb, 0x3d, 0xcb, 0x3e, 0x64, 0xf7, 0x05, 0xfe, 0xa2, 0x29, + 0x2c, 0xd1, 0x92, 0x30, 0xfe, 0xa2, 0xb9, 0x0c, 0xa0, 0x50, 0x64, 0xfe, 0x6a, 0x1a, 0x4d, 0x09, + 0x11, 0x37, 0x03, 0x15, 0xca, 0xf2, 0x11, 0x70, 0x98, 0xd3, 0xc4, 0xb2, 0x55, 0x4e, 0x3b, 0x0f, + 0xcd, 0xac, 0x2b, 0xf0, 0x54, 0xc0, 0xfd, 0xe0, 0x32, 0xcc, 0xa8, 0x27, 0x53, 0xee, 0x68, 0x52, + 0x95, 0xb6, 0x02, 0x32, 0xe7, 0xe2, 0xcf, 0x93, 0x4f, 0x29, 0xf6, 0x63, 0x1f, 0x68, 0x1a, 0x43, + 0x80, 0xfe, 0x0d, 0x80, 0xba, 0xd0, 0x77, 0x09, 0xda, 0x84, 0x49, 0xc6, 0x5c, 0x3d, 0x82, 0x2f, + 0xe5, 0x9f, 0x44, 0x87, 0x66, 0x30, 0x04, 0x6a, 0x32, 0x01, 0x8d, 0xa7, 0x12, 0xd0, 0xe8, 0xfb, + 0x9c, 0xfe, 0x5d, 0x0d, 0x56, 0x64, 0xf9, 0xe8, 0xe2, 0x70, 0x9b, 0x1c, 0xb1, 0x52, 0xe2, 0x11, + 0x11, 0x42, 0x4e, 0x25, 0x73, 0x5e, 0x87, 0x8e, 0x83, 0x23, 0xea, 0xfa, 0x5c, 0xa0, 0xa9, 0x36, + 0x85, 0xbf, 0x22, 0x8b, 0x05, 0x2d, 0x26, 0xe6, 0x6f, 0x89, 0xe9, 0x1d, 0xab, 0x87, 0xd1, 0x55, + 0x98, 0x3f, 0xc4, 0x38, 0x30, 0x3d, 0x62, 0x5b, 0x9e, 0xa9, 0x62, 0x52, 0xd6, 0x47, 0x73, 0x6c, + 0xea, 0x01, 0x9b, 0xb9, 0x2d, 0xe2, 0x52, 0x8f, 0xe0, 0xd2, 0x31, 0x9a, 0xc8, 0xbc, 0xb4, 0x04, + 0xcd, 0x20, 0x24, 0x36, 0x8e, 0x98, 0xcf, 0xd6, 0xf8, 0x31, 0x35, 0x04, 0xa0, 0x6b, 0x30, 0x1f, + 0x0f, 0x3e, 0xc6, 0xa1, 0x8d, 0x7d, 0x6a, 0xed, 0x8b, 0x77, 0xd3, 0x71, 0xa3, 0x68, 0x4a, 0xff, + 0x4d, 0x0d, 0xf4, 0x9c, 0xd4, 0xbb, 0x21, 0xe9, 0x9d, 0xa2, 0x05, 0x37, 0x60, 0x81, 0xdb, 0x21, + 0xe4, 0x2c, 0x87, 0x86, 0x10, 0xd7, 0x98, 0xb3, 0x6c, 0x4e, 0x48, 0x53, 0x96, 0xe8, 0xc3, 0xe5, + 0x63, 0xd7, 0xf4, 0x6f, 0xb2, 0xc5, 0x3f, 0xda, 0xd0, 0xfe, 0xa4, 0x8f, 0xc3, 0x41, 0xe2, 0xc1, + 0x35, 0xc2, 0x52, 0x0b, 0xd5, 0x07, 0x4a, 0x40, 0x58, 0xa6, 0xed, 0x86, 0xa4, 0x67, 0xc6, 0xad, + 0xa2, 0x71, 0x8e, 0xd2, 0x62, 0xc0, 0xbb, 0xa2, 0x5d, 0x84, 0xde, 0x83, 0xa9, 0xae, 0xeb, 0x51, + 0x2c, 0x9a, 0x33, 0xad, 0xcd, 0x97, 0xf3, 0x11, 0x91, 0x94, 0xb9, 0x7e, 0x97, 0x23, 0x1b, 0x92, + 0x08, 0xed, 0xc1, 0xbc, 0xeb, 0x07, 0xfc, 0xea, 0x15, 0xba, 0x96, 0xe7, 0x3e, 0x1b, 0x3e, 0x19, + 0xb6, 0x36, 0xdf, 0x18, 0xc1, 0xeb, 0x3e, 0xa3, 0xdc, 0x4d, 0x12, 0x1a, 0xc8, 0xcd, 0xc1, 0x10, + 0x86, 0x05, 0xd2, 0xa7, 0x79, 0x21, 0x93, 0x5c, 0xc8, 0xe6, 0x08, 0x21, 0x0f, 0x39, 0x69, 0x5a, + 0xca, 0x3c, 0xc9, 0x03, 0xb5, 0x1d, 0x98, 0x12, 0xca, 0xb1, 0x1c, 0xd9, 0x75, 0xb1, 0xa7, 0xfa, + 0x4a, 0x62, 0xc0, 0xd2, 0x00, 0x09, 0x70, 0x68, 0xf9, 0x2a, 0xdd, 0xa9, 0xe1, 0xb0, 0xbf, 0x51, + 0x4f, 0xf4, 0x37, 0xb4, 0x3f, 0x4d, 0x02, 0xca, 0x6b, 0xa8, 0xde, 0x41, 0x43, 0x1c, 0xb1, 0x14, + 0x92, 0xcc, 0xaf, 0xb3, 0x09, 0x38, 0xcf, 0xb1, 0x9f, 0x41, 0xd3, 0x8e, 0x8e, 0x4c, 0x6e, 0x12, + 0x2e, 0xb3, 0xb5, 0x79, 0xe3, 0xc4, 0x26, 0x5d, 0xdf, 0xda, 0x7d, 0xcc, 0xa1, 0x46, 0xc3, 0x8e, + 0x8e, 0xf8, 0x17, 0xfa, 0x11, 0xc0, 0x57, 0x11, 0xf1, 0x25, 0x67, 0xb1, 0xf1, 0xef, 0x9e, 0x9c, + 0xf3, 0x87, 0xbb, 0x0f, 0x77, 0x04, 0xeb, 0x26, 0x63, 0x27, 0x78, 0xdb, 0x30, 0x13, 0x58, 0xe1, + 0x93, 0x3e, 0xa6, 0x92, 0xbd, 0xf0, 0x85, 0xf7, 0x4f, 0xce, 0xfe, 0x63, 0xc1, 0x46, 0x48, 0x68, + 0x07, 0x89, 0x91, 0xf6, 0xdd, 0x38, 0x34, 0x94, 0x5e, 0xec, 0xf6, 0xc6, 0x3d, 0x5c, 0xbc, 0x61, + 0x98, 0xae, 0xdf, 0x25, 0xd2, 0xa2, 0x67, 0x18, 0x5c, 0x3c, 0x63, 0xf0, 0xec, 0xbf, 0x06, 0x73, + 0x21, 0xb6, 0x49, 0xe8, 0xb0, 0x1a, 0xd7, 0xed, 0xb9, 0xcc, 0xed, 0xc5, 0x5e, 0xce, 0x0a, 0xf8, + 0x6d, 0x05, 0x46, 0xaf, 0xc2, 0x2c, 0xdf, 0xf6, 0x04, 0x66, 0x5d, 0xf1, 0xc4, 0x5e, 0x02, 0x71, + 0x0d, 0xe6, 0x9e, 0xf4, 0x59, 0xde, 0xb0, 0x0f, 0xac, 0xd0, 0xb2, 0x29, 0x89, 0x5f, 0x13, 0x66, + 0x39, 0x7c, 0x2b, 0x06, 0xa3, 0xb7, 0x60, 0x51, 0xa0, 0xe2, 0xc8, 0xb6, 0x82, 0x98, 0x02, 0x87, + 0xf2, 0xb2, 0xb9, 0xc0, 0x67, 0xef, 0xf0, 0xc9, 0x2d, 0x35, 0x87, 0x34, 0x68, 0xd8, 0xa4, 0xd7, + 0xc3, 0x3e, 0x8d, 0x64, 0xfb, 0x2f, 0x1e, 0xa3, 0x9b, 0xb0, 0x6c, 0x79, 0x1e, 0xf9, 0xda, 0xe4, + 0x94, 0x8e, 0x99, 0xd3, 0x4e, 0x5c, 0x3d, 0x35, 0x8e, 0xf4, 0x09, 0xc7, 0x31, 0xd2, 0x8a, 0x6a, + 0x17, 0xa1, 0x19, 0xef, 0x23, 0xab, 0x18, 0x12, 0x0e, 0xc9, 0xbf, 0xb5, 0x33, 0xd0, 0x4e, 0xee, + 0x84, 0xf6, 0xf7, 0x3a, 0xcc, 0x17, 0x04, 0x15, 0xfa, 0x02, 0x80, 0x79, 0xab, 0x08, 0x2d, 0xe9, + 0xae, 0xff, 0x77, 0xf2, 0xe0, 0x64, 0xfe, 0x2a, 0xc0, 0x06, 0xf3, 0x7e, 0xf1, 0x89, 0x7e, 0x0c, + 0x2d, 0xee, 0xb1, 0x92, 0xbb, 0x70, 0xd9, 0xf7, 0xbe, 0x07, 0x77, 0xa6, 0xab, 0x64, 0xcf, 0x63, + 0x40, 0x7c, 0x6b, 0x7f, 0xad, 0x41, 0x33, 0x16, 0xcc, 0xea, 0x1f, 0xb1, 0x51, 0x7c, 0xaf, 0x23, + 0x55, 0xff, 0x70, 0xd8, 0x5d, 0x0e, 0xfa, 0xaf, 0x74, 0x25, 0xed, 0x1d, 0x80, 0xa1, 0xfe, 0x85, + 0x2a, 0xd4, 0x0a, 0x55, 0xd0, 0xd7, 0x60, 0x86, 0x59, 0xd6, 0xc5, 0xce, 0x2e, 0x0d, 0xdd, 0x80, + 0x37, 0xea, 0x05, 0x4e, 0x24, 0x2f, 0x90, 0x6a, 0xb8, 0xf9, 0xd3, 0x25, 0x68, 0x27, 0x1f, 0xd0, + 0xd0, 0x97, 0xd0, 0x4a, 0xfc, 0x90, 0x80, 0x5e, 0xca, 0x6f, 0x5a, 0xfe, 0x07, 0x07, 0xed, 0xe5, + 0x11, 0x58, 0xf2, 0x8e, 0x35, 0x86, 0x0c, 0x98, 0x96, 0x4d, 0x6c, 0xb4, 0x72, 0x4c, 0x7f, 0x5b, + 0x70, 0xbd, 0x34, 0xb2, 0x03, 0xae, 0x8f, 0x5d, 0xab, 0x21, 0x1f, 0xce, 0xe6, 0x7a, 0xca, 0xe8, + 0x4a, 0x9e, 0xb6, 0xac, 0x63, 0xad, 0xbd, 0x56, 0x09, 0x37, 0xd6, 0x81, 0xc2, 0x7c, 0x41, 0x93, + 0x18, 0xbd, 0x3e, 0x82, 0x4b, 0xaa, 0x51, 0xad, 0x5d, 0xad, 0x88, 0x1d, 0x4b, 0x7d, 0x02, 0x28, + 0xdf, 0x41, 0x46, 0xaf, 0x8d, 0x64, 0x33, 0xec, 0x50, 0x6b, 0xaf, 0x57, 0x43, 0x2e, 0x55, 0x54, + 0xf4, 0x96, 0x47, 0x2a, 0x9a, 0xea, 0x5e, 0x8f, 0x54, 0x34, 0xd3, 0xb0, 0x1e, 0x43, 0x87, 0x30, + 0x97, 0xed, 0x3b, 0xa3, 0xb5, 0xb2, 0xbf, 0x5f, 0x72, 0x6d, 0x6d, 0xed, 0x4a, 0x15, 0xd4, 0x58, + 0x18, 0x86, 0x33, 0xe9, 0x3e, 0x2f, 0x7a, 0x35, 0x4f, 0x5f, 0xd8, 0xe9, 0xd6, 0x56, 0x47, 0x23, + 0x26, 0x75, 0xca, 0xf6, 0x7e, 0x8b, 0x74, 0x2a, 0x69, 0x2c, 0x17, 0xe9, 0x54, 0xd6, 0x4a, 0xd6, + 0xc7, 0xd0, 0x37, 0xaa, 0xa1, 0x98, 0xe9, 0x89, 0xa2, 0xf5, 0x32, 0x36, 0xc5, 0x4d, 0x59, 0x6d, + 0xa3, 0x32, 0x7e, 0x22, 0x1a, 0xbf, 0x84, 0x56, 0xa2, 0x35, 0x5a, 0x94, 0x3f, 0xf2, 0xcd, 0xd6, + 0xa2, 0xfc, 0x51, 0xd4, 0x5f, 0x1d, 0x43, 0x7b, 0x30, 0x93, 0x6a, 0x96, 0xa2, 0x57, 0xca, 0x28, + 0xd3, 0x6f, 0x8a, 0xda, 0xab, 0x23, 0xf1, 0x62, 0x19, 0xa6, 0xca, 0x88, 0x32, 0x05, 0x96, 0x2e, + 0x2e, 0x9d, 0x03, 0x5f, 0x19, 0x85, 0x96, 0x0a, 0xe5, 0x5c, 0x4b, 0xb5, 0x30, 0x94, 0xcb, 0x5a, + 0xb6, 0x85, 0xa1, 0x5c, 0xde, 0xa5, 0x1d, 0x43, 0x07, 0x30, 0x9b, 0x69, 0xa7, 0xa2, 0xd5, 0x32, + 0x16, 0xd9, 0x56, 0xae, 0xb6, 0x56, 0x01, 0x33, 0x96, 0xf4, 0x43, 0x75, 0x81, 0xe7, 0x2e, 0x77, + 0xb9, 0x9c, 0x74, 0xe8, 0x67, 0x2f, 0x1d, 0x8f, 0x14, 0xb3, 0xfe, 0x1a, 0x16, 0x8a, 0x5e, 0xd9, + 0xd0, 0xd5, 0xa2, 0x67, 0x81, 0xd2, 0xa7, 0x3c, 0x6d, 0xbd, 0x2a, 0x7a, 0x2c, 0xf8, 0x53, 0x68, + 0xa8, 0x96, 0x22, 0x2a, 0x38, 0x94, 0x32, 0x4d, 0x58, 0x4d, 0x3f, 0x0e, 0x25, 0x11, 0x2a, 0x3d, + 0x95, 0x15, 0x86, 0xbd, 0xbe, 0xf2, 0xac, 0x90, 0xeb, 0x4a, 0x96, 0x67, 0x85, 0x7c, 0xeb, 0x90, + 0x8b, 0x8b, 0xdd, 0x2e, 0xd9, 0x1a, 0x2b, 0x77, 0xbb, 0x82, 0xce, 0x5f, 0xb9, 0xdb, 0x15, 0x76, + 0xdb, 0xc6, 0xd0, 0x4f, 0xd4, 0xef, 0x01, 0xd9, 0x8e, 0x18, 0x2a, 0xcd, 0x2d, 0x25, 0x9d, 0x39, + 0xed, 0x5a, 0x75, 0x82, 0x58, 0xfc, 0x33, 0x95, 0x09, 0x33, 0x1d, 0xb1, 0xf2, 0x4c, 0x58, 0xdc, + 0x97, 0xd3, 0x36, 0x2a, 0xe3, 0xe7, 0x83, 0x3c, 0xd9, 0x32, 0x2a, 0xb7, 0x76, 0x41, 0x97, 0xad, + 0xdc, 0xda, 0x85, 0x5d, 0x28, 0x1e, 0x1f, 0x45, 0xed, 0xa0, 0xa2, 0xf8, 0x38, 0xa6, 0x5f, 0xa5, + 0xad, 0x57, 0x45, 0x4f, 0x15, 0x0a, 0xf9, 0x7e, 0x0f, 0x1a, 0xb9, 0xfe, 0xd4, 0x19, 0x70, 0xb5, + 0x22, 0x76, 0xf9, 0xee, 0xaa, 0x33, 0x61, 0xa4, 0x02, 0x99, 0xb3, 0x61, 0xa3, 0x32, 0x7e, 0x2c, + 0x3b, 0x50, 0x3f, 0x9b, 0x24, 0x7a, 0x35, 0xe8, 0xca, 0x08, 0x3e, 0x89, 0x5e, 0x93, 0xf6, 0x5a, + 0x25, 0xdc, 0xa2, 0xe8, 0x4d, 0x76, 0x4f, 0x8e, 0xf3, 0xa7, 0x5c, 0xcb, 0xe7, 0x38, 0x7f, 0x2a, + 0x68, 0xc8, 0x14, 0x44, 0xaf, 0x6a, 0x9a, 0x8c, 0x8e, 0xde, 0x4c, 0xf3, 0x66, 0x74, 0xf4, 0xe6, + 0xfa, 0x31, 0x63, 0xe8, 0x17, 0xc3, 0x9f, 0x10, 0xf2, 0x4f, 0x98, 0x68, 0xb3, 0x34, 0x15, 0x95, + 0xbe, 0xdc, 0x6a, 0x6f, 0x9e, 0x88, 0x26, 0x61, 0xfc, 0x5f, 0xd7, 0x54, 0x47, 0xb3, 0xf0, 0x0d, + 0x11, 0xbd, 0x55, 0x81, 0x71, 0xee, 0x19, 0x54, 0x7b, 0xfb, 0x84, 0x54, 0x89, 0x05, 0x3d, 0x80, + 0x49, 0x7e, 0x77, 0x46, 0x17, 0x8e, 0xbf, 0x54, 0x6b, 0x17, 0x8b, 0xe7, 0xe3, 0xab, 0x21, 0xe3, + 0xb6, 0x37, 0xc5, 0x7f, 0x67, 0x7f, 0xf3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdd, 0x00, 0x24, + 0xb7, 0xe5, 0x2e, 0x00, 0x00, } diff --git a/weed/server/volume_grpc_file.go b/weed/server/volume_grpc_file.go new file mode 100644 index 000000000..c20aeb60f --- /dev/null +++ b/weed/server/volume_grpc_file.go @@ -0,0 +1,130 @@ +package weed_server + +import ( + "context" + "encoding/json" + "net/http" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (vs *VolumeServer) FileGet(req *volume_server_pb.FileGetRequest, stream volume_server_pb.VolumeServer_FileGetServer) error { + + headResponse := &volume_server_pb.FileGetResponse{} + n := new(needle.Needle) + + commaIndex := strings.LastIndex(req.FileId, ",") + vid := req.FileId[:commaIndex] + fid := req.FileId[commaIndex+1:] + + volumeId, err := needle.NewVolumeId(vid) + if err != nil { + headResponse.ErrorCode = http.StatusBadRequest + return stream.Send(headResponse) + } + err = n.ParsePath(fid) + if err != nil { + headResponse.ErrorCode = http.StatusBadRequest + return stream.Send(headResponse) + } + + hasVolume := vs.store.HasVolume(volumeId) + _, hasEcVolume := vs.store.FindEcVolume(volumeId) + + if !hasVolume && !hasEcVolume { + headResponse.ErrorCode = http.StatusMovedPermanently + return stream.Send(headResponse) + } + + cookie := n.Cookie + var count int + if hasVolume { + count, err = vs.store.ReadVolumeNeedle(volumeId, n) + } else if hasEcVolume { + count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n) + } + + if err != nil || count < 0 { + headResponse.ErrorCode = http.StatusNotFound + return stream.Send(headResponse) + } + if n.Cookie != cookie { + headResponse.ErrorCode = http.StatusNotFound + return stream.Send(headResponse) + } + + if n.LastModified != 0 { + headResponse.LastModified = n.LastModified + } + + headResponse.Etag = n.Etag() + + if n.HasPairs() { + pairMap := make(map[string]string) + err = json.Unmarshal(n.Pairs, &pairMap) + if err != nil { + glog.V(0).Infoln("Unmarshal pairs error:", err) + } + headResponse.Headers = pairMap + } + + /* + // skip this, no redirection + if vs.tryHandleChunkedFile(n, filename, w, r) { + return + } + */ + + if n.NameSize > 0 { + headResponse.Filename = string(n.Name) + } + mtype := "" + if n.MimeSize > 0 { + mt := string(n.Mime) + if !strings.HasPrefix(mt, "application/octet-stream") { + mtype = mt + } + } + headResponse.ContentType = mtype + + headResponse.IsGzipped = n.IsGzipped() + + if n.IsGzipped() && req.AcceptGzip { + if n.Data, err = util.UnGzipData(n.Data); err != nil { + glog.V(0).Infof("ungzip %s error: %v", req.FileId, err) + } + } + + headResponse.ContentLength = uint32(len(n.Data)) + bytesToRead := len(n.Data) + bytesRead := 0 + + t := headResponse + + for bytesRead < bytesToRead { + + stopIndex := bytesRead + BufferSizeLimit + if stopIndex > bytesToRead { + stopIndex = bytesToRead + } + + if t == nil { + t = &volume_server_pb.FileGetResponse{} + } + t.Data = n.Data[bytesRead:stopIndex] + + err = stream.Send(t) + t = nil + if err != nil { + return err + } + + bytesRead = stopIndex + } + + return nil +} From cf5064d7025c6b74ac721b8914a76d5f1d155836 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 14 Feb 2020 09:09:15 -0800 Subject: [PATCH 0120/2432] properly close http response --- weed/command/download.go | 1 + weed/s3api/s3api_object_copy_handlers.go | 2 ++ weed/s3api/s3api_object_handlers.go | 5 ++--- weed/s3api/s3api_object_multipart_handlers.go | 1 + weed/util/http_util.go | 19 ++++++++++++------- 5 files changed, 18 insertions(+), 10 deletions(-) diff --git a/weed/command/download.go b/weed/command/download.go index b3e33defd..be0eb47e5 100644 --- a/weed/command/download.go +++ b/weed/command/download.go @@ -71,6 +71,7 @@ func downloadToFile(server, fileId, saveDir string) error { } f, err := os.OpenFile(path.Join(saveDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) if err != nil { + io.Copy(ioutil.Discard, rc) return err } defer f.Close() diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go index 5e0fa5de1..b18ab329c 100644 --- a/weed/s3api/s3api_object_copy_handlers.go +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -48,6 +48,7 @@ func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request writeErrorResponse(w, ErrInvalidCopySource, r.URL) return } + defer dataReader.Close() etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) @@ -129,6 +130,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req writeErrorResponse(w, ErrInvalidCopySource, r.URL) return } + defer dataReader.Close() etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 8dc733eb9..b8df86aee 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -44,6 +44,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) if rAuthType == authTypeStreamingSigned { dataReader = newSignV4ChunkedReader(r) } + defer dataReader.Close() uploadUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket) @@ -156,7 +157,7 @@ func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) { io.Copy(w, proxyResonse.Body) } -func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.ReadCloser) (etag string, code ErrorCode) { +func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code ErrorCode) { hash := md5.New() var body io.Reader = io.TeeReader(dataReader, hash) @@ -179,8 +180,6 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader resp, postErr := client.Do(proxyReq) - dataReader.Close() - if postErr != nil { glog.Errorf("post to filer: %v", postErr) return "", ErrInternalError diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 72a25e4a5..3588e14d7 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -199,6 +199,7 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ if rAuthType == authTypeStreamingSigned { dataReader = newSignV4ChunkedReader(r) } + defer dataReader.Close() uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket) diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 08007a038..38202f4de 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -35,13 +35,13 @@ func PostBytes(url string, body []byte) ([]byte, error) { return nil, fmt.Errorf("Post to %s: %v", url, err) } defer r.Body.Close() - if r.StatusCode >= 400 { - return nil, fmt.Errorf("%s: %s", url, r.Status) - } b, err := ioutil.ReadAll(r.Body) if err != nil { return nil, fmt.Errorf("Read response body: %v", err) } + if r.StatusCode >= 400 { + return nil, fmt.Errorf("%s: %s", url, r.Status) + } return b, nil } @@ -88,7 +88,7 @@ func Head(url string) (http.Header, error) { if err != nil { return nil, err } - defer r.Body.Close() + defer closeResp(r) if r.StatusCode >= 400 { return nil, fmt.Errorf("%s: %s", url, r.Status) } @@ -130,7 +130,7 @@ func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachB if err != nil { return err } - defer r.Body.Close() + defer closeResp(r) if r.StatusCode != 200 { return fmt.Errorf("%s: %s", url, r.Status) } @@ -153,7 +153,7 @@ func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) e if err != nil { return err } - defer r.Body.Close() + defer closeResp(r) if r.StatusCode != 200 { return fmt.Errorf("%s: %s", url, r.Status) } @@ -262,7 +262,7 @@ func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte if err != nil { return 0, err } - defer r.Body.Close() + defer closeResp(r) if r.StatusCode >= 400 { return 0, fmt.Errorf("%s: %s", fileUrl, r.Status) } @@ -307,3 +307,8 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e return r.Body, nil } + +func closeResp(resp *http.Response) { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() +} From 96c1ae84719e934231e0a81c89917d1190be5e39 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 14 Feb 2020 09:46:36 -0800 Subject: [PATCH 0121/2432] refactoring the close http response --- weed/s3api/s3api_object_handlers.go | 4 +++- weed/util/http_util.go | 10 +++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index b8df86aee..1f1c181ec 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -13,6 +13,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -145,9 +146,10 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des writeErrorResponse(w, ErrInternalError, r.URL) return } - defer resp.Body.Close() + defer util.CloseResponse(resp) responseFn(resp, w) + } func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) { for k, v := range proxyResonse.Header { diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 38202f4de..f819d8497 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -88,7 +88,7 @@ func Head(url string) (http.Header, error) { if err != nil { return nil, err } - defer closeResp(r) + defer CloseResponse(r) if r.StatusCode >= 400 { return nil, fmt.Errorf("%s: %s", url, r.Status) } @@ -130,7 +130,7 @@ func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachB if err != nil { return err } - defer closeResp(r) + defer CloseResponse(r) if r.StatusCode != 200 { return fmt.Errorf("%s: %s", url, r.Status) } @@ -153,7 +153,7 @@ func GetUrlStream(url string, values url.Values, readFn func(io.Reader) error) e if err != nil { return err } - defer closeResp(r) + defer CloseResponse(r) if r.StatusCode != 200 { return fmt.Errorf("%s: %s", url, r.Status) } @@ -262,7 +262,7 @@ func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte if err != nil { return 0, err } - defer closeResp(r) + defer CloseResponse(r) if r.StatusCode >= 400 { return 0, fmt.Errorf("%s: %s", fileUrl, r.Status) } @@ -308,7 +308,7 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e return r.Body, nil } -func closeResp(resp *http.Response) { +func CloseResponse(resp *http.Response) { io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() } From 9232d3ac68a8d23a7f67ec66d8e227c362431507 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 14 Feb 2020 10:28:02 -0800 Subject: [PATCH 0122/2432] ignore error when counting closed connections --- weed/util/net_timeout.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/weed/util/net_timeout.go b/weed/util/net_timeout.go index b8068e67f..8acd50d42 100644 --- a/weed/util/net_timeout.go +++ b/weed/util/net_timeout.go @@ -66,11 +66,8 @@ func (c *Conn) Write(b []byte) (count int, e error) { } func (c *Conn) Close() error { - err := c.Conn.Close() - if err == nil { - stats.ConnectionClose() - } - return err + stats.ConnectionClose() + return c.Conn.Close() } func NewListener(addr string, timeout time.Duration) (net.Listener, error) { From 4cdde5f569419535b4ded8b0e54d18ead13a6458 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 17 Feb 2020 12:31:59 -0800 Subject: [PATCH 0123/2432] configuration stores the identity list --- weed/pb/iam.proto | 2 +- weed/pb/iam_pb/iam.pb.go | 47 +++++++++++++++-------------- weed/s3api/auth_credentials.go | 10 +++--- weed/s3api/auth_credentials_test.go | 10 +++--- 4 files changed, 35 insertions(+), 34 deletions(-) diff --git a/weed/pb/iam.proto b/weed/pb/iam.proto index ddb4e5e5d..2eef22dd9 100644 --- a/weed/pb/iam.proto +++ b/weed/pb/iam.proto @@ -13,7 +13,7 @@ service SeaweedIdentityAccessManagement { ////////////////////////////////////////////////// -message Identities { +message S3ApiConfiguration { repeated Identity identities = 1; } diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go index 53cef9cc1..7f796677c 100644 --- a/weed/pb/iam_pb/iam.pb.go +++ b/weed/pb/iam_pb/iam.pb.go @@ -35,16 +35,16 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -type Identities struct { +type S3ApiConfiguration struct { Identities []*Identity `protobuf:"bytes,1,rep,name=identities" json:"identities,omitempty"` } -func (m *Identities) Reset() { *m = Identities{} } -func (m *Identities) String() string { return proto.CompactTextString(m) } -func (*Identities) ProtoMessage() {} -func (*Identities) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *S3ApiConfiguration) Reset() { *m = S3ApiConfiguration{} } +func (m *S3ApiConfiguration) String() string { return proto.CompactTextString(m) } +func (*S3ApiConfiguration) ProtoMessage() {} +func (*S3ApiConfiguration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (m *Identities) GetIdentities() []*Identity { +func (m *S3ApiConfiguration) GetIdentities() []*Identity { if m != nil { return m.Identities } @@ -108,7 +108,7 @@ func (m *Credential) GetSecretKey() string { } func init() { - proto.RegisterType((*Identities)(nil), "iam_pb.Identities") + proto.RegisterType((*S3ApiConfiguration)(nil), "iam_pb.Identities") proto.RegisterType((*Identity)(nil), "iam_pb.Identity") proto.RegisterType((*Credential)(nil), "iam_pb.Credential") } @@ -154,20 +154,21 @@ var _SeaweedIdentityAccessManagement_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("iam.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 238 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x90, 0xc1, 0x4a, 0xc3, 0x40, - 0x10, 0x86, 0x49, 0x23, 0xb5, 0x99, 0x5e, 0xca, 0x9c, 0xf6, 0xa0, 0x18, 0x73, 0xca, 0x29, 0x48, - 0xf5, 0x2c, 0xa8, 0xa7, 0x5a, 0x04, 0x89, 0x0f, 0x50, 0xa6, 0xdb, 0x51, 0x16, 0xbb, 0x9b, 0x92, - 0x59, 0x90, 0xbc, 0xbd, 0x64, 0xb7, 0xdb, 0xf6, 0xb6, 0xf3, 0x7f, 0xdf, 0xcc, 0x32, 0x03, 0x85, - 0x21, 0xdb, 0x1c, 0xfa, 0xce, 0x77, 0x38, 0x35, 0x64, 0x37, 0x87, 0x6d, 0xf5, 0x0c, 0xb0, 0xda, - 0xb1, 0xf3, 0xc6, 0x1b, 0x16, 0x7c, 0x00, 0x30, 0xa7, 0x4a, 0x65, 0x65, 0x5e, 0xcf, 0x97, 0x8b, - 0x26, 0xaa, 0xcd, 0xd1, 0x1b, 0xda, 0x0b, 0xa7, 0x72, 0x30, 0x4b, 0x39, 0x22, 0x5c, 0x39, 0xb2, - 0xac, 0xb2, 0x32, 0xab, 0x8b, 0x36, 0xbc, 0xf1, 0x09, 0xe6, 0xba, 0xe7, 0x60, 0xd0, 0x5e, 0xd4, - 0x24, 0x8c, 0xc4, 0x34, 0xf2, 0xed, 0x84, 0xda, 0x4b, 0x0d, 0x15, 0x5c, 0x93, 0xf6, 0xa6, 0x73, - 0xa2, 0xf2, 0x32, 0xaf, 0x8b, 0x36, 0x95, 0xd5, 0x3b, 0xc0, 0xb9, 0x09, 0x6f, 0x01, 0x48, 0x6b, - 0x16, 0xd9, 0xfc, 0xf2, 0x70, 0xfc, 0xb7, 0x88, 0xc9, 0x9a, 0x87, 0x11, 0x0b, 0xeb, 0x9e, 0x7d, - 0xc0, 0x93, 0x88, 0x63, 0xb2, 0xe6, 0x61, 0x79, 0x0f, 0x77, 0x5f, 0x4c, 0x7f, 0xcc, 0xbb, 0xb4, - 0xc2, 0x4b, 0x68, 0xfd, 0x20, 0x47, 0x3f, 0x6c, 0xd9, 0xf9, 0xd7, 0x1b, 0x58, 0x48, 0x54, 0xbe, - 0xa5, 0xd1, 0x7b, 0x33, 0x66, 0xb3, 0x15, 0xd9, 0xcf, 0xf1, 0x88, 0xdb, 0x69, 0xb8, 0xe5, 0xe3, - 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x65, 0xb9, 0x71, 0x4e, 0x58, 0x01, 0x00, 0x00, + // 250 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x90, 0x41, 0x4b, 0xc3, 0x40, + 0x10, 0x85, 0x69, 0x23, 0xb5, 0x99, 0x5e, 0xca, 0x9c, 0xf6, 0xa0, 0x18, 0x73, 0xca, 0x29, 0x48, + 0xeb, 0x1f, 0xa8, 0x05, 0xa1, 0x16, 0x41, 0xd2, 0x1f, 0x50, 0xa6, 0xdb, 0x69, 0x19, 0xec, 0x6e, + 0x42, 0x76, 0x45, 0xf2, 0xef, 0x25, 0xbb, 0x46, 0x7b, 0xdb, 0x7d, 0xdf, 0x7b, 0xb3, 0x3b, 0x0f, + 0x52, 0x21, 0x53, 0x36, 0x6d, 0xed, 0x6b, 0x9c, 0x08, 0x99, 0x7d, 0x73, 0xc8, 0x5f, 0x01, 0x77, + 0xcb, 0x55, 0x23, 0xeb, 0xda, 0x9e, 0xe4, 0xfc, 0xd5, 0x92, 0x97, 0xda, 0xe2, 0x13, 0x80, 0x1c, + 0xd9, 0x7a, 0xf1, 0xc2, 0x4e, 0x8d, 0xb2, 0xa4, 0x98, 0x2d, 0xe6, 0x65, 0x8c, 0x94, 0x9b, 0x48, + 0xba, 0xea, 0xca, 0x93, 0x5b, 0x98, 0x0e, 0x3a, 0x22, 0xdc, 0x58, 0x32, 0xac, 0x46, 0xd9, 0xa8, + 0x48, 0xab, 0x70, 0xc6, 0x67, 0x98, 0xe9, 0x96, 0x83, 0x83, 0x2e, 0x4e, 0x8d, 0xc3, 0x48, 0x1c, + 0x46, 0xae, 0xff, 0x50, 0x75, 0x6d, 0x43, 0x05, 0xb7, 0xa4, 0xfb, 0x1f, 0x39, 0x95, 0x64, 0x49, + 0x91, 0x56, 0xc3, 0x35, 0x7f, 0x03, 0xf8, 0x0f, 0xe1, 0x3d, 0x00, 0x69, 0xcd, 0xce, 0xed, 0x3f, + 0xb9, 0xfb, 0x7d, 0x37, 0x8d, 0xca, 0x96, 0xbb, 0x1e, 0x3b, 0xd6, 0x2d, 0xfb, 0x80, 0xc7, 0x11, + 0x47, 0x65, 0xcb, 0xdd, 0xe2, 0x11, 0x1e, 0x76, 0x4c, 0xdf, 0xcc, 0xc7, 0x61, 0x85, 0x55, 0x88, + 0xbe, 0x93, 0xa5, 0x33, 0x1b, 0xb6, 0xfe, 0xe5, 0x0e, 0xe6, 0x2e, 0x5a, 0x4e, 0xae, 0xd4, 0x17, + 0xe9, 0xb5, 0xe9, 0x86, 0xcc, 0x47, 0x5f, 0xe6, 0x61, 0x12, 0x3a, 0x5d, 0xfe, 0x04, 0x00, 0x00, + 0xff, 0xff, 0x83, 0x4f, 0x61, 0x03, 0x60, 0x01, 0x00, 0x00, } diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go index e5d693951..aa5199117 100644 --- a/weed/s3api/auth_credentials.go +++ b/weed/s3api/auth_credentials.go @@ -47,15 +47,15 @@ func NewIdentityAccessManagement(fileName string, domain string) *IdentityAccess if fileName == "" { return iam } - if err := iam.loadIdentities(fileName); err != nil { + if err := iam.loadS3ApiConfiguration(fileName); err != nil { glog.Fatalf("fail to load config file %s: %v", fileName, err) } return iam } -func (iam *IdentityAccessManagement) loadIdentities(fileName string) error { +func (iam *IdentityAccessManagement) loadS3ApiConfiguration(fileName string) error { - identities := &iam_pb.Identities{} + s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} rawData, readErr := ioutil.ReadFile(fileName) if readErr != nil { @@ -64,12 +64,12 @@ func (iam *IdentityAccessManagement) loadIdentities(fileName string) error { } glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) - if err := jsonpb.Unmarshal(bytes.NewReader(rawData), identities); err != nil { + if err := jsonpb.Unmarshal(bytes.NewReader(rawData), s3ApiConfiguration); err != nil { glog.Warningf("unmarshal error: %v", err) return fmt.Errorf("unmarshal %s error: %v", fileName, err) } - for _, ident := range identities.Identities { + for _, ident := range s3ApiConfiguration.Identities { t := &Identity{ Name: ident.Name, Credentials: nil, diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go index 5e605bdd5..c6f76560c 100644 --- a/weed/s3api/auth_credentials_test.go +++ b/weed/s3api/auth_credentials_test.go @@ -10,7 +10,7 @@ import ( func TestIdentityListFileFormat(t *testing.T) { - identities := &iam_pb.Identities{} + s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} identity1 := &iam_pb.Identity{ Name: "some_name", @@ -52,16 +52,16 @@ func TestIdentityListFileFormat(t *testing.T) { }, } - identities.Identities = append(identities.Identities, identity1) - identities.Identities = append(identities.Identities, identity2) - identities.Identities = append(identities.Identities, identity3) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity1) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity2) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity3) m := jsonpb.Marshaler{ EmitDefaults: true, Indent: " ", } - text, _ := m.MarshalToString(identities) + text, _ := m.MarshalToString(s3ApiConfiguration) println(text) From 443a68484a0a93fba6911529650b42a379615b8a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 17 Feb 2020 12:54:45 -0800 Subject: [PATCH 0124/2432] 1.56 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 646a7d96d..4fb318f08 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 55) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 56) ) From dbabdd418ebd6cf6502112379bef328a0762cf40 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 18 Feb 2020 09:16:04 -0800 Subject: [PATCH 0125/2432] add unhandled request auth type fix 2020-02-18 11:43:57.396699 I | http: panic serving 172.28.0.43:50658: runtime error: invalid memory address or nil pointer dereference goroutine 595 [running]: net/http.(*conn).serve.func1(0xc0001fe3c0) /usr/lib/go/src/net/http/server.go:1767 +0x13b panic(0x55c4e35f3820, 0x55c4e48b3c40) /usr/lib/go/src/runtime/panic.go:679 +0x1b6 github.com/chrislusf/seaweedfs/weed/s3api.(*IdentityAccessManagement).authRequest(0xc0004b84e0, 0xc000115900, 0xc0000bb650, 0x1, 0x1, 0x55c4e399d740) /go/src/github.com/chrislusf/seaweedfs/weed/s3api/auth_credentials.go:143 +0x11c github.com/chrislusf/seaweedfs/weed/s3api.(*IdentityAccessManagement).Auth.func1(0x55c4e3994c40, 0xc0007808c0, 0xc000115900) /go/src/github.com/chrislusf/seaweedfs/weed/s3api/auth_credentials.go:111 +0x5e net/http.HandlerFunc.ServeHTTP(0xc0004b87e0, 0x55c4e3994c40, 0xc0007808c0, 0xc000115900) /usr/lib/go/src/net/http/server.go:2007 +0x46 github.com/gorilla/mux.(*Router).ServeHTTP(0xc0004ba000, 0x55c4e3994c40, 0xc0007808c0, 0xc000115700) /root/go/pkg/mod/github.com/gorilla/mux@v1.7.3/mux.go:212 +0xe4 net/http.serverHandler.ServeHTTP(0xc00011e0e0, 0x55c4e3994c40, 0xc0007808c0, 0xc000115700) /usr/lib/go/src/net/http/server.go:2802 +0xa6 net/http.(*conn).serve(0xc0001fe3c0, 0x55c4e399d680, 0xc000894180) /usr/lib/go/src/net/http/server.go:1890 +0x877 created by net/http.(*Server).Serve /usr/lib/go/src/net/http/server.go:2927 +0x390 --- weed/s3api/auth_credentials.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go index aa5199117..b7a8dbf95 100644 --- a/weed/s3api/auth_credentials.go +++ b/weed/s3api/auth_credentials.go @@ -133,6 +133,12 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, actions []Acti case authTypeSigned, authTypePresigned: glog.V(3).Infof("v4 auth type") identity, s3Err = iam.reqSignatureV4Verify(r) + case authTypePostPolicy: + return ErrNotImplemented; + case authTypeJWT: + return ErrNotImplemented; + case authTypeAnonymous: + return ErrNotImplemented } glog.V(3).Infof("auth error: %v", s3Err) From b07089cff233254fb3310eb748b4f5b7fc495adb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 18 Feb 2020 19:38:59 -0800 Subject: [PATCH 0126/2432] webdav out of "unstable" --- weed/command/webdav.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/command/webdav.go b/weed/command/webdav.go index 0e6f89040..4d5752247 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -36,7 +36,7 @@ func init() { var cmdWebDav = &Command{ UsageLine: "webdav -port=7333 -filer=", - Short: " start a webdav server that is backed by a filer", + Short: "start a webdav server that is backed by a filer", Long: `start a webdav server that is backed by a filer. `, From 45156cc2feef983068dbd9c7c513fbb1772527b0 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 20 Feb 2020 01:21:11 -0800 Subject: [PATCH 0127/2432] add test cases for replication=011 --- weed/topology/volume_growth_test.go | 122 ++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) diff --git a/weed/topology/volume_growth_test.go b/weed/topology/volume_growth_test.go index e3c5cc580..a004dc210 100644 --- a/weed/topology/volume_growth_test.go +++ b/weed/topology/volume_growth_test.go @@ -131,3 +131,125 @@ func TestFindEmptySlotsForOneVolume(t *testing.T) { fmt.Println("assigned node :", server.Id()) } } + +var topologyLayout2 = ` +{ + "dc1":{ + "rack1":{ + "server111":{ + "volumes":[ + {"id":1, "size":12312}, + {"id":2, "size":12312}, + {"id":3, "size":12312} + ], + "limit":300 + }, + "server112":{ + "volumes":[ + {"id":4, "size":12312}, + {"id":5, "size":12312}, + {"id":6, "size":12312} + ], + "limit":300 + }, + "server113":{ + "volumes":[], + "limit":300 + }, + "server114":{ + "volumes":[], + "limit":300 + }, + "server115":{ + "volumes":[], + "limit":300 + }, + "server116":{ + "volumes":[], + "limit":300 + } + }, + "rack2":{ + "server121":{ + "volumes":[ + {"id":4, "size":12312}, + {"id":5, "size":12312}, + {"id":6, "size":12312} + ], + "limit":300 + }, + "server122":{ + "volumes":[], + "limit":300 + }, + "server123":{ + "volumes":[ + {"id":2, "size":12312}, + {"id":3, "size":12312}, + {"id":4, "size":12312} + ], + "limit":300 + }, + "server124":{ + "volumes":[], + "limit":300 + }, + "server125":{ + "volumes":[], + "limit":300 + }, + "server126":{ + "volumes":[], + "limit":300 + } + }, + "rack3":{ + "server131":{ + "volumes":[], + "limit":300 + }, + "server132":{ + "volumes":[], + "limit":300 + }, + "server133":{ + "volumes":[], + "limit":300 + }, + "server134":{ + "volumes":[], + "limit":300 + }, + "server135":{ + "volumes":[], + "limit":300 + }, + "server136":{ + "volumes":[], + "limit":300 + } + } + } +} +` + +func TestReplication011(t *testing.T) { + topo := setup(topologyLayout2) + vg := NewDefaultVolumeGrowth() + rp, _ := super_block.NewReplicaPlacementFromString("011") + volumeGrowOption := &VolumeGrowOption{ + Collection: "MAIL", + ReplicaPlacement: rp, + DataCenter: "dc1", + Rack: "", + DataNode: "", + } + servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption) + if err != nil { + fmt.Println("finding empty slots error :", err) + t.Fail() + } + for _, server := range servers { + fmt.Println("assigned node :", server.Id()) + } +} From 621cdbdf587dfd2218229055e74613ccae17d7d7 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 20 Feb 2020 15:44:17 -0800 Subject: [PATCH 0128/2432] filer: avoid possible timeouts for updates and deletions --- weed/filer2/filer.go | 19 ++++++------- weed/filer2/filer_deletion.go | 33 +++++++++++------------ weed/util/queue_unbounded.go | 45 +++++++++++++++++++++++++++++++ weed/util/queue_unbounded_test.go | 25 +++++++++++++++++ 4 files changed, 95 insertions(+), 27 deletions(-) create mode 100644 weed/util/queue_unbounded.go create mode 100644 weed/util/queue_unbounded_test.go diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index a0af942e0..4db48e386 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -13,6 +13,7 @@ import ( "github.com/karlseguin/ccache" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" ) @@ -24,19 +25,19 @@ var ( ) type Filer struct { - store *FilerStoreWrapper - directoryCache *ccache.Cache - MasterClient *wdclient.MasterClient - fileIdDeletionChan chan string - GrpcDialOption grpc.DialOption + store *FilerStoreWrapper + directoryCache *ccache.Cache + MasterClient *wdclient.MasterClient + fileIdDeletionQueue *util.UnboundedQueue + GrpcDialOption grpc.DialOption } func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer { f := &Filer{ - directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), - MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters), - fileIdDeletionChan: make(chan string, PaginationSize), - GrpcDialOption: grpcDialOption, + directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), + MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters), + fileIdDeletionQueue: util.NewUnboundedQueue(), + GrpcDialOption: grpcDialOption, } go f.loopProcessingDeletion() diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go index 9937685f7..3a64f636e 100644 --- a/weed/filer2/filer_deletion.go +++ b/weed/filer2/filer_deletion.go @@ -10,8 +10,6 @@ import ( func (f *Filer) loopProcessingDeletion() { - ticker := time.NewTicker(5 * time.Second) - lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { m := make(map[string]operation.LookupResult) for _, vid := range vids { @@ -31,36 +29,35 @@ func (f *Filer) loopProcessingDeletion() { return m, nil } - var fileIds []string + var deletionCount int for { - select { - case fid := <-f.fileIdDeletionChan: - fileIds = append(fileIds, fid) - if len(fileIds) >= 4096 { - glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) - fileIds = fileIds[:0] - } - case <-ticker.C: - if len(fileIds) > 0 { - glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) - fileIds = fileIds[:0] + deletionCount = 0 + f.fileIdDeletionQueue.Consume(func(fileIds []string) { + deletionCount = len(fileIds) + _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) + if err != nil { + glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + } else { + glog.V(1).Infof("deleting fileIds len=%d", deletionCount) } + }) + + if deletionCount == 0 { + time.Sleep(1123 * time.Millisecond) } } } func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { for _, chunk := range chunks { - f.fileIdDeletionChan <- chunk.GetFileIdString() + f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString()) } } // DeleteFileByFileId direct delete by file id. // Only used when the fileId is not being managed by snapshots. func (f *Filer) DeleteFileByFileId(fileId string) { - f.fileIdDeletionChan <- fileId + f.fileIdDeletionQueue.EnQueue(fileId) } func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { diff --git a/weed/util/queue_unbounded.go b/weed/util/queue_unbounded.go new file mode 100644 index 000000000..664cd965e --- /dev/null +++ b/weed/util/queue_unbounded.go @@ -0,0 +1,45 @@ +package util + +import "sync" + +type UnboundedQueue struct { + outbound []string + outboundLock sync.RWMutex + inbound []string + inboundLock sync.RWMutex +} + +func NewUnboundedQueue() *UnboundedQueue { + q := &UnboundedQueue{} + return q +} + +func (q *UnboundedQueue) EnQueue(items ...string) { + q.inboundLock.Lock() + defer q.inboundLock.Unlock() + + q.outbound = append(q.outbound, items...) + +} + +func (q *UnboundedQueue) Consume(fn func([]string)) { + q.outboundLock.Lock() + defer q.outboundLock.Unlock() + + if len(q.outbound) == 0 { + q.inboundLock.Lock() + inbountLen := len(q.inbound) + if inbountLen > 0 { + t := q.outbound + q.outbound = q.inbound + q.inbound = t + } + q.inboundLock.Unlock() + } + + if len(q.outbound) > 0 { + fn(q.outbound) + q.outbound = q.outbound[:0] + } + +} diff --git a/weed/util/queue_unbounded_test.go b/weed/util/queue_unbounded_test.go new file mode 100644 index 000000000..2d02032cb --- /dev/null +++ b/weed/util/queue_unbounded_test.go @@ -0,0 +1,25 @@ +package util + +import "testing" + +func TestEnqueueAndConsume(t *testing.T) { + + q := NewUnboundedQueue() + + q.EnQueue("1", "2", "3") + + f := func(items []string) { + for _, t := range items { + println(t) + } + println("-----------------------") + } + q.Consume(f) + + q.Consume(f) + + q.EnQueue("4", "5") + q.EnQueue("6", "7") + q.Consume(f) + +} From 3a1d017de238af668cf346ac69de34fe97cb9e7d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 21 Feb 2020 21:23:25 -0800 Subject: [PATCH 0129/2432] shell: ensure dc and rack does not change for replicated volumes fix https://github.com/chrislusf/seaweedfs/issues/1203 --- weed/shell/command_volume_balance.go | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go index bed4f4306..488beb998 100644 --- a/weed/shell/command_volume_balance.go +++ b/weed/shell/command_volume_balance.go @@ -109,14 +109,7 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer return nil } -func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.DataNodeInfo, volumeSizeLimit uint64, collection string, applyBalancing bool) error { - - var nodes []*Node - for _, dn := range dataNodeInfos { - nodes = append(nodes, &Node{ - info: dn, - }) - } +func balanceVolumeServers(commandEnv *CommandEnv, nodes []*Node, volumeSizeLimit uint64, collection string, applyBalancing bool) error { // balance writable volumes for _, n := range nodes { @@ -151,15 +144,19 @@ func balanceVolumeServers(commandEnv *CommandEnv, dataNodeInfos []*master_pb.Dat return nil } -func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter string) (typeToNodes map[uint64][]*master_pb.DataNodeInfo) { - typeToNodes = make(map[uint64][]*master_pb.DataNodeInfo) +func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter string) (typeToNodes map[uint64][]*Node) { + typeToNodes = make(map[uint64][]*Node) for _, dc := range t.DataCenterInfos { if selectedDataCenter != "" && dc.Id != selectedDataCenter { continue } for _, r := range dc.RackInfos { for _, dn := range r.DataNodeInfos { - typeToNodes[dn.MaxVolumeCount] = append(typeToNodes[dn.MaxVolumeCount], dn) + typeToNodes[dn.MaxVolumeCount] = append(typeToNodes[dn.MaxVolumeCount], &Node{ + info: dn, + dc: dc.Id, + rack: r.Id, + }) } } } @@ -169,6 +166,8 @@ func collectVolumeServersByType(t *master_pb.TopologyInfo, selectedDataCenter st type Node struct { info *master_pb.DataNodeInfo selectedVolumes map[uint32]*master_pb.VolumeInformationMessage + dc string + rack string } func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) { @@ -210,6 +209,13 @@ func balanceSelectedVolume(commandEnv *CommandEnv, nodes []*Node, sortCandidates sortCandidatesFn(candidateVolumes) for _, v := range candidateVolumes { + if v.ReplicaPlacement > 0 { + if fullNode.dc != emptyNode.dc && fullNode.rack != emptyNode.rack { + // TODO this logic is too simple, but should work most of the time + // Need a correct algorithm to handle all different cases + continue + } + } if _, found := emptyNode.selectedVolumes[v.Id]; !found { if err := moveVolume(commandEnv, v, fullNode, emptyNode, applyBalancing); err == nil { delete(fullNode.selectedVolumes, v.Id) From f55f49970fb09897202b0c9ed5c3edba815857fc Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 21 Feb 2020 21:45:03 -0800 Subject: [PATCH 0130/2432] move volume server stats to grpc --- weed/pb/volume_server.proto | 13 +- weed/pb/volume_server_pb/volume_server.pb.go | 498 +++++++++++-------- weed/server/master_server.go | 10 +- weed/server/volume_grpc_admin.go | 18 + weed/server/volume_server.go | 8 +- 5 files changed, 327 insertions(+), 220 deletions(-) diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 284e00633..54b0da19d 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -81,7 +81,10 @@ service VolumeServer { rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) { } - // query + rpc VolumeServerStatus (VolumeServerStatusRequest) returns (VolumeServerStatusResponse) { + } + + // query rpc Query (QueryRequest) returns (stream QueriedStripe) { } @@ -409,6 +412,14 @@ message VolumeTierMoveDatFromRemoteResponse { float processedPercentage = 2; } +message VolumeServerStatusRequest { + +} +message VolumeServerStatusResponse { + repeated DiskStatus disk_statuses = 1; + MemStatus memory_status = 2; +} + // select on volume servers message QueryRequest { repeated string selections = 1; diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index ec196a1d9..d14e98693 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -77,6 +77,8 @@ It has these top-level messages: VolumeTierMoveDatToRemoteResponse VolumeTierMoveDatFromRemoteRequest VolumeTierMoveDatFromRemoteResponse + VolumeServerStatusRequest + VolumeServerStatusResponse QueryRequest QueriedStripe */ @@ -1831,6 +1833,38 @@ func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 { return 0 } +type VolumeServerStatusRequest struct { +} + +func (m *VolumeServerStatusRequest) Reset() { *m = VolumeServerStatusRequest{} } +func (m *VolumeServerStatusRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeServerStatusRequest) ProtoMessage() {} +func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68} } + +type VolumeServerStatusResponse struct { + DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses" json:"disk_statuses,omitempty"` + MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus" json:"memory_status,omitempty"` +} + +func (m *VolumeServerStatusResponse) Reset() { *m = VolumeServerStatusResponse{} } +func (m *VolumeServerStatusResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeServerStatusResponse) ProtoMessage() {} +func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{69} } + +func (m *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { + if m != nil { + return m.DiskStatuses + } + return nil +} + +func (m *VolumeServerStatusResponse) GetMemoryStatus() *MemStatus { + if m != nil { + return m.MemoryStatus + } + return nil +} + // select on volume servers type QueryRequest struct { Selections []string `protobuf:"bytes,1,rep,name=selections" json:"selections,omitempty"` @@ -1843,7 +1877,7 @@ type QueryRequest struct { func (m *QueryRequest) Reset() { *m = QueryRequest{} } func (m *QueryRequest) String() string { return proto.CompactTextString(m) } func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68} } +func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70} } func (m *QueryRequest) GetSelections() []string { if m != nil { @@ -1889,7 +1923,7 @@ type QueryRequest_Filter struct { func (m *QueryRequest_Filter) Reset() { *m = QueryRequest_Filter{} } func (m *QueryRequest_Filter) String() string { return proto.CompactTextString(m) } func (*QueryRequest_Filter) ProtoMessage() {} -func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{68, 0} } +func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70, 0} } func (m *QueryRequest_Filter) GetField() string { if m != nil { @@ -1924,7 +1958,7 @@ func (m *QueryRequest_InputSerialization) Reset() { *m = QueryRequest_In func (m *QueryRequest_InputSerialization) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization) ProtoMessage() {} func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{68, 1} + return fileDescriptor0, []int{70, 1} } func (m *QueryRequest_InputSerialization) GetCompressionType() string { @@ -1972,7 +2006,7 @@ func (m *QueryRequest_InputSerialization_CSVInput) Reset() { func (m *QueryRequest_InputSerialization_CSVInput) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{68, 1, 0} + return fileDescriptor0, []int{70, 1, 0} } func (m *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { @@ -2034,7 +2068,7 @@ func (m *QueryRequest_InputSerialization_JSONInput) Reset() { func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{68, 1, 1} + return fileDescriptor0, []int{70, 1, 1} } func (m *QueryRequest_InputSerialization_JSONInput) GetType() string { @@ -2055,7 +2089,7 @@ func (m *QueryRequest_InputSerialization_ParquetInput) String() string { } func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{68, 1, 2} + return fileDescriptor0, []int{70, 1, 2} } type QueryRequest_OutputSerialization struct { @@ -2067,7 +2101,7 @@ func (m *QueryRequest_OutputSerialization) Reset() { *m = QueryRequest_O func (m *QueryRequest_OutputSerialization) String() string { return proto.CompactTextString(m) } func (*QueryRequest_OutputSerialization) ProtoMessage() {} func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{68, 2} + return fileDescriptor0, []int{70, 2} } func (m *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { @@ -2100,7 +2134,7 @@ func (m *QueryRequest_OutputSerialization_CSVOutput) String() string { } func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{68, 2, 0} + return fileDescriptor0, []int{70, 2, 0} } func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { @@ -2150,7 +2184,7 @@ func (m *QueryRequest_OutputSerialization_JSONOutput) String() string { } func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{68, 2, 1} + return fileDescriptor0, []int{70, 2, 1} } func (m *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { @@ -2167,7 +2201,7 @@ type QueriedStripe struct { func (m *QueriedStripe) Reset() { *m = QueriedStripe{} } func (m *QueriedStripe) String() string { return proto.CompactTextString(m) } func (*QueriedStripe) ProtoMessage() {} -func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{69} } +func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{71} } func (m *QueriedStripe) GetRecords() []byte { if m != nil { @@ -2245,6 +2279,8 @@ func init() { proto.RegisterType((*VolumeTierMoveDatToRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteResponse") proto.RegisterType((*VolumeTierMoveDatFromRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteRequest") proto.RegisterType((*VolumeTierMoveDatFromRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteResponse") + proto.RegisterType((*VolumeServerStatusRequest)(nil), "volume_server_pb.VolumeServerStatusRequest") + proto.RegisterType((*VolumeServerStatusResponse)(nil), "volume_server_pb.VolumeServerStatusResponse") proto.RegisterType((*QueryRequest)(nil), "volume_server_pb.QueryRequest") proto.RegisterType((*QueryRequest_Filter)(nil), "volume_server_pb.QueryRequest.Filter") proto.RegisterType((*QueryRequest_InputSerialization)(nil), "volume_server_pb.QueryRequest.InputSerialization") @@ -2303,7 +2339,8 @@ type VolumeServerClient interface { // tiered storage VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) - // query + VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) + // query Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) } @@ -2755,6 +2792,15 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveD return m, nil } +func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) { + out := new(VolumeServerStatusResponse) + err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) { stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[7], c.cc, "/volume_server_pb.VolumeServer/Query", opts...) if err != nil { @@ -2825,7 +2871,8 @@ type VolumeServerServer interface { // tiered storage VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error - // query + VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) + // query Query(*QueryRequest, VolumeServer_QueryServer) error } @@ -3412,6 +3459,24 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDa return x.ServerStream.SendMsg(m) } +func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeServerStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeServerStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(QueryRequest) if err := stream.RecvMsg(m); err != nil { @@ -3533,6 +3598,10 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeEcShardsToVolume", Handler: _VolumeServer_VolumeEcShardsToVolume_Handler, }, + { + MethodName: "VolumeServerStatus", + Handler: _VolumeServer_VolumeServerStatus_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -3582,204 +3651,209 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 3173 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x3a, 0x4b, 0x73, 0xdc, 0xc6, - 0xd1, 0x5c, 0x2e, 0x1f, 0xbb, 0xbd, 0x4b, 0x91, 0x1a, 0xd2, 0xd4, 0x1a, 0x22, 0x25, 0x0a, 0xf2, - 0x83, 0x94, 0x2d, 0x52, 0xa6, 0xed, 0xcf, 0xfa, 0xe4, 0xcf, 0xfe, 0x22, 0x51, 0x0f, 0xcb, 0x16, - 0x29, 0x1b, 0x94, 0x65, 0x27, 0x76, 0x05, 0x05, 0x02, 0xb3, 0x24, 0x4c, 0x2c, 0x06, 0x02, 0x66, - 0x69, 0xad, 0xca, 0xb9, 0xc4, 0xa9, 0x4a, 0xaa, 0x52, 0xc9, 0x21, 0x95, 0x4b, 0xce, 0xb9, 0xe7, - 0x9a, 0xbf, 0xe0, 0x3f, 0x90, 0xaa, 0x9c, 0x72, 0xc9, 0x39, 0x87, 0xdc, 0x52, 0x95, 0x4b, 0x6a, - 0x5e, 0x58, 0x3c, 0xb9, 0xa0, 0xc5, 0x54, 0x2a, 0x37, 0x4c, 0x4f, 0x3f, 0xa6, 0x7b, 0xba, 0x7b, - 0x7a, 0xa6, 0x01, 0xf3, 0x47, 0xc4, 0xeb, 0xf7, 0xb0, 0x19, 0xe1, 0xf0, 0x08, 0x87, 0xeb, 0x41, - 0x48, 0x28, 0x41, 0x73, 0x29, 0xa0, 0x19, 0xec, 0xe9, 0x1b, 0x80, 0x6e, 0x59, 0xd4, 0x3e, 0xb8, - 0x8d, 0x3d, 0x4c, 0xb1, 0x81, 0x9f, 0xf4, 0x71, 0x44, 0xd1, 0x8b, 0xd0, 0xe8, 0xba, 0x1e, 0x36, - 0x5d, 0x27, 0xea, 0xd4, 0x56, 0xea, 0xab, 0x4d, 0x63, 0x9a, 0x8d, 0xef, 0x3b, 0x91, 0xfe, 0x10, - 0xe6, 0x53, 0x04, 0x51, 0x40, 0xfc, 0x08, 0xa3, 0xeb, 0x30, 0x1d, 0xe2, 0xa8, 0xef, 0x51, 0x41, - 0xd0, 0xda, 0xbc, 0xb0, 0x9e, 0x95, 0xb5, 0x1e, 0x93, 0xf4, 0x3d, 0x6a, 0x28, 0x74, 0xfd, 0xdb, - 0x1a, 0xb4, 0x93, 0x33, 0xe8, 0x1c, 0x4c, 0x4b, 0xe1, 0x9d, 0xda, 0x4a, 0x6d, 0xb5, 0x69, 0x4c, - 0x09, 0xd9, 0x68, 0x11, 0xa6, 0x22, 0x6a, 0xd1, 0x7e, 0xd4, 0x19, 0x5f, 0xa9, 0xad, 0x4e, 0x1a, - 0x72, 0x84, 0x16, 0x60, 0x12, 0x87, 0x21, 0x09, 0x3b, 0x75, 0x8e, 0x2e, 0x06, 0x08, 0xc1, 0x44, - 0xe4, 0x3e, 0xc3, 0x9d, 0x89, 0x95, 0xda, 0xea, 0x8c, 0xc1, 0xbf, 0x51, 0x07, 0xa6, 0x8f, 0x70, - 0x18, 0xb9, 0xc4, 0xef, 0x4c, 0x72, 0xb0, 0x1a, 0xea, 0x1f, 0xc2, 0x99, 0xbb, 0xae, 0x87, 0xef, - 0x61, 0xaa, 0x6c, 0x50, 0xba, 0x8c, 0x8b, 0xd0, 0xb2, 0x6c, 0x1b, 0x07, 0xd4, 0xdc, 0x7f, 0xe6, - 0x06, 0x7c, 0x2d, 0x0d, 0x03, 0x04, 0xe8, 0xde, 0x33, 0x37, 0xd0, 0x7f, 0x5e, 0x87, 0xd9, 0x98, - 0x99, 0xb4, 0x0f, 0x82, 0x09, 0xc7, 0xa2, 0x16, 0x67, 0xd5, 0x36, 0xf8, 0x37, 0x7a, 0x19, 0xce, - 0xd8, 0xc4, 0xa7, 0xd8, 0xa7, 0xa6, 0x87, 0xfd, 0x7d, 0x7a, 0xc0, 0x79, 0xcd, 0x18, 0x33, 0x12, - 0xfa, 0x80, 0x03, 0xd1, 0x25, 0x68, 0x2b, 0x34, 0x3a, 0x08, 0xb0, 0xd4, 0xb2, 0x25, 0x61, 0x8f, - 0x06, 0x01, 0x46, 0x97, 0x61, 0xc6, 0xb3, 0x22, 0x6a, 0xf6, 0x88, 0xe3, 0x76, 0x5d, 0xec, 0x70, - 0xa5, 0x27, 0x8c, 0x36, 0x03, 0x6e, 0x4b, 0x18, 0xd2, 0xc4, 0xa6, 0xfa, 0x56, 0x0f, 0x73, 0xed, - 0x9b, 0x46, 0x3c, 0x66, 0xcb, 0xc3, 0xd4, 0xda, 0xef, 0x4c, 0x71, 0x38, 0xff, 0x46, 0xcb, 0x00, - 0x6e, 0xc4, 0x75, 0x0c, 0xb0, 0xd3, 0x99, 0xe6, 0x6a, 0x36, 0xdd, 0xe8, 0x9e, 0x00, 0xa0, 0x0f, - 0x60, 0xfa, 0x00, 0x5b, 0x0e, 0x0e, 0xa3, 0x4e, 0x83, 0xef, 0xf8, 0x7a, 0x7e, 0xc7, 0x33, 0x56, - 0x58, 0xff, 0x40, 0x10, 0xdc, 0xf1, 0x69, 0x38, 0x30, 0x14, 0x39, 0x5a, 0x82, 0x26, 0xdf, 0xb2, - 0x2d, 0xe2, 0xe0, 0x4e, 0x93, 0x6f, 0xed, 0x10, 0xa0, 0xdd, 0x80, 0x76, 0x92, 0x0c, 0xcd, 0x41, - 0xfd, 0x10, 0x0f, 0xe4, 0x9e, 0xb0, 0x4f, 0xb6, 0xff, 0x47, 0x96, 0xd7, 0xc7, 0xdc, 0x7c, 0x4d, - 0x43, 0x0c, 0x6e, 0x8c, 0x5f, 0xaf, 0xe9, 0xd3, 0x30, 0x79, 0xa7, 0x17, 0xd0, 0x81, 0xfe, 0x0e, - 0x74, 0x1e, 0x5b, 0x76, 0xbf, 0xdf, 0x7b, 0xcc, 0x97, 0xb8, 0x75, 0x80, 0xed, 0x43, 0xb5, 0xd1, - 0xe7, 0xa1, 0x29, 0x17, 0x2e, 0xb7, 0x7a, 0xc6, 0x68, 0x08, 0xc0, 0x7d, 0x47, 0xff, 0x01, 0xbc, - 0x58, 0x40, 0x28, 0x37, 0xf5, 0x32, 0xcc, 0xec, 0x5b, 0xe1, 0x9e, 0xb5, 0x8f, 0xcd, 0xd0, 0xa2, - 0x2e, 0xe1, 0xd4, 0x35, 0xa3, 0x2d, 0x81, 0x06, 0x83, 0xe9, 0x5f, 0x80, 0x96, 0xe2, 0x40, 0x7a, - 0x81, 0x65, 0xd3, 0x2a, 0xc2, 0xd1, 0x0a, 0xb4, 0x82, 0x10, 0x5b, 0x9e, 0x47, 0x6c, 0x8b, 0x0a, - 0xf5, 0xea, 0x46, 0x12, 0xa4, 0x2f, 0xc3, 0xf9, 0x42, 0xe6, 0x62, 0x81, 0xfa, 0xf5, 0xcc, 0xea, - 0x49, 0xaf, 0xe7, 0x56, 0x12, 0xad, 0x2f, 0xe5, 0x56, 0xcd, 0x29, 0x25, 0xdf, 0xff, 0xcd, 0xcc, - 0x7a, 0xd8, 0xf2, 0xfb, 0x41, 0x25, 0xc6, 0xd9, 0x15, 0x2b, 0xd2, 0x98, 0xf3, 0x39, 0x91, 0x0c, - 0xb6, 0x88, 0xe7, 0x61, 0x9b, 0xba, 0xc4, 0x57, 0x6c, 0x2f, 0x00, 0xd8, 0x31, 0x50, 0xee, 0x7f, - 0x02, 0xa2, 0x6b, 0xd0, 0xc9, 0x93, 0x4a, 0xb6, 0x7f, 0xa9, 0xc1, 0x0b, 0x37, 0xa5, 0xd1, 0x84, - 0xe0, 0x4a, 0x1b, 0x90, 0x16, 0x39, 0x9e, 0x15, 0x99, 0xdd, 0xa0, 0x7a, 0x6e, 0x83, 0x18, 0x46, - 0x88, 0x03, 0xcf, 0xb5, 0x2d, 0xce, 0x62, 0x42, 0xc4, 0x6e, 0x02, 0xc4, 0xfc, 0x99, 0x52, 0x4f, - 0x46, 0x24, 0xfb, 0x44, 0x9b, 0xb0, 0xd8, 0xc3, 0x3d, 0x12, 0x0e, 0xcc, 0x9e, 0x15, 0x98, 0x3d, - 0xeb, 0xa9, 0xc9, 0x92, 0x97, 0xd9, 0xdb, 0xe3, 0xe1, 0x39, 0x63, 0x20, 0x31, 0xbb, 0x6d, 0x05, - 0xdb, 0xd6, 0xd3, 0x5d, 0xf7, 0x19, 0xde, 0xde, 0xd3, 0x3b, 0xb0, 0x98, 0xd5, 0x4f, 0xaa, 0xfe, - 0x3f, 0x70, 0x4e, 0x40, 0x76, 0x07, 0xbe, 0xbd, 0xcb, 0x33, 0x66, 0xa5, 0x8d, 0xfa, 0x67, 0x0d, - 0x3a, 0x79, 0x42, 0xe9, 0xf9, 0xcf, 0x6b, 0xb5, 0x13, 0xdb, 0xe4, 0x22, 0xb4, 0xa8, 0xe5, 0x7a, - 0x26, 0xe9, 0x76, 0x23, 0x4c, 0xb9, 0x21, 0x26, 0x0c, 0x60, 0xa0, 0x87, 0x1c, 0x82, 0xd6, 0x60, - 0xce, 0x16, 0xde, 0x6f, 0x86, 0xf8, 0xc8, 0xe5, 0x39, 0x7e, 0x9a, 0x2f, 0x6c, 0xd6, 0x56, 0x51, - 0x21, 0xc0, 0x48, 0x87, 0x19, 0xd7, 0x79, 0x6a, 0xf2, 0xec, 0xce, 0x8f, 0x88, 0x06, 0xe7, 0xd6, - 0x72, 0x9d, 0xa7, 0x2c, 0x61, 0x31, 0x8b, 0xea, 0x8f, 0x61, 0x49, 0x28, 0x7f, 0xdf, 0xb7, 0x43, - 0xdc, 0xc3, 0x3e, 0xb5, 0xbc, 0x2d, 0x12, 0x0c, 0x2a, 0xb9, 0xcd, 0x8b, 0xd0, 0x88, 0x5c, 0xdf, - 0xc6, 0xa6, 0x2f, 0x8e, 0xaa, 0x09, 0x63, 0x9a, 0x8f, 0x77, 0x22, 0xfd, 0x16, 0x2c, 0x97, 0xf0, - 0x95, 0x96, 0xbd, 0x04, 0x6d, 0xbe, 0x30, 0x99, 0xde, 0xe5, 0x81, 0xd1, 0x62, 0xb0, 0x2d, 0x01, - 0xd2, 0xdf, 0x00, 0x24, 0x78, 0x6c, 0x93, 0xbe, 0x5f, 0x2d, 0x9c, 0x5f, 0x80, 0xf9, 0x14, 0x89, - 0xf4, 0x8d, 0x37, 0x61, 0x41, 0x80, 0x3f, 0xf5, 0x7b, 0x95, 0x79, 0x9d, 0x83, 0x17, 0x32, 0x44, - 0x92, 0xdb, 0xa6, 0x12, 0x92, 0x2e, 0x26, 0x8e, 0x65, 0xb6, 0xa8, 0x56, 0x90, 0xae, 0x27, 0x78, - 0xe6, 0x12, 0x0b, 0xb6, 0xc2, 0x43, 0x03, 0x5b, 0x0e, 0xf1, 0xbd, 0x41, 0xe5, 0xcc, 0x55, 0x40, - 0x29, 0xf9, 0x7e, 0x06, 0x8b, 0x2a, 0xa3, 0xf9, 0x5d, 0x77, 0xbf, 0x1f, 0xe2, 0xaa, 0x99, 0x38, - 0xe9, 0xb2, 0xe3, 0x39, 0x97, 0xd5, 0x37, 0x54, 0x98, 0x25, 0x18, 0xcb, 0x2d, 0x8d, 0xeb, 0x93, - 0x5a, 0xa2, 0x3e, 0xd1, 0xff, 0x50, 0x83, 0xb3, 0x8a, 0xa2, 0xa2, 0x5f, 0x9d, 0x30, 0xb0, 0xea, - 0xa5, 0x81, 0x35, 0x31, 0x0c, 0xac, 0x55, 0x98, 0x8b, 0x48, 0x3f, 0xb4, 0xb1, 0xc9, 0x6a, 0x12, - 0xd3, 0x67, 0x67, 0xb0, 0x88, 0xbb, 0x33, 0x02, 0x7e, 0xdb, 0xa2, 0xd6, 0x0e, 0x71, 0xb0, 0xfe, - 0xff, 0xca, 0xed, 0x52, 0xfe, 0xba, 0x06, 0x67, 0x79, 0xe9, 0x61, 0x05, 0x01, 0xf6, 0x1d, 0xd3, - 0xa2, 0xcc, 0xe9, 0x6b, 0xdc, 0xe9, 0xcf, 0xb0, 0x89, 0x9b, 0x1c, 0x7e, 0x93, 0xee, 0x44, 0xfa, - 0x6f, 0xc7, 0x61, 0x96, 0xd1, 0xb2, 0x20, 0xab, 0xa4, 0xef, 0x1c, 0xd4, 0xf1, 0x53, 0x2a, 0x15, - 0x65, 0x9f, 0x68, 0x03, 0xe6, 0x65, 0x34, 0xbb, 0xc4, 0x1f, 0x06, 0x7a, 0x5d, 0xe4, 0xc5, 0xe1, - 0x54, 0x1c, 0xeb, 0x17, 0xa1, 0x15, 0x51, 0x12, 0xa8, 0xbc, 0x21, 0xea, 0x22, 0x60, 0x20, 0x99, - 0x37, 0xd2, 0x36, 0x9d, 0x2c, 0xb0, 0x69, 0xdb, 0x8d, 0x4c, 0x6c, 0x9b, 0x62, 0x55, 0x3c, 0xf3, - 0x34, 0x0c, 0x70, 0xa3, 0x3b, 0xb6, 0xb0, 0x06, 0x7a, 0x1f, 0x96, 0xdc, 0x7d, 0x9f, 0x84, 0xd8, - 0x94, 0x86, 0xe4, 0xf1, 0xeb, 0x13, 0x6a, 0x76, 0x49, 0xdf, 0x57, 0x95, 0x53, 0x47, 0xe0, 0xec, - 0x72, 0x14, 0x66, 0x81, 0x1d, 0x42, 0xef, 0xb2, 0x79, 0xfd, 0x6d, 0x98, 0x1b, 0x5a, 0xa5, 0x7a, - 0x16, 0xf8, 0xb6, 0xa6, 0x3c, 0xee, 0x91, 0xe5, 0x7a, 0xbb, 0xd8, 0x77, 0x70, 0xf8, 0x9c, 0xd9, - 0x09, 0x5d, 0x83, 0x05, 0xd7, 0xf1, 0xb0, 0x49, 0xdd, 0x1e, 0x26, 0x7d, 0x6a, 0x46, 0xd8, 0x26, - 0xbe, 0x13, 0x29, 0xfb, 0xb2, 0xb9, 0x47, 0x62, 0x6a, 0x57, 0xcc, 0xe8, 0x3f, 0x8b, 0x4f, 0x89, - 0xe4, 0x2a, 0x86, 0xf5, 0x91, 0x8f, 0x31, 0x63, 0x28, 0x4a, 0x3d, 0xa9, 0x46, 0x5b, 0x00, 0x45, - 0x55, 0xc7, 0x76, 0x48, 0x22, 0xed, 0x11, 0x67, 0xc0, 0x57, 0xd4, 0x36, 0x40, 0x80, 0x6e, 0x11, - 0x67, 0xc0, 0xd3, 0x75, 0x64, 0x72, 0x27, 0xb3, 0x0f, 0xfa, 0xfe, 0x21, 0x5f, 0x4d, 0xc3, 0x68, - 0xb9, 0xd1, 0x03, 0x2b, 0xa2, 0x5b, 0x0c, 0xa4, 0xff, 0xb1, 0xa6, 0xf2, 0x05, 0x5b, 0x86, 0x81, - 0x6d, 0xec, 0x1e, 0xfd, 0x07, 0xcc, 0xc1, 0x28, 0xa4, 0x13, 0xa4, 0x6a, 0x61, 0x19, 0x70, 0x48, - 0xcc, 0xc9, 0x53, 0x95, 0xcf, 0x0c, 0xd3, 0x55, 0x7a, 0xe1, 0x32, 0x5d, 0x7d, 0xa9, 0x8e, 0x8b, - 0x3b, 0xf6, 0xee, 0x81, 0x15, 0x3a, 0xd1, 0x3d, 0xec, 0xe3, 0xd0, 0xa2, 0xa7, 0x52, 0xbe, 0xe8, - 0x2b, 0x70, 0xa1, 0x8c, 0xbb, 0x94, 0xff, 0x85, 0x3a, 0x06, 0x15, 0x86, 0x81, 0xf7, 0xfa, 0xae, - 0xe7, 0x9c, 0x8a, 0xf8, 0x8f, 0xb2, 0xca, 0xc5, 0xcc, 0xa5, 0xff, 0x5c, 0x81, 0xb3, 0x21, 0x07, - 0x51, 0x33, 0x62, 0x08, 0xf1, 0x7d, 0x74, 0xc6, 0x98, 0x95, 0x13, 0x9c, 0x90, 0xdd, 0x4b, 0x7f, - 0x39, 0xae, 0x3c, 0x40, 0x71, 0x3b, 0xb5, 0xb4, 0x7a, 0x1e, 0x9a, 0x43, 0xf1, 0x75, 0x2e, 0xbe, - 0x11, 0x49, 0xb9, 0xcc, 0x3b, 0x6d, 0x12, 0x0c, 0x4c, 0x6c, 0x8b, 0x8a, 0x82, 0x6f, 0x75, 0x83, - 0x5d, 0xcf, 0x82, 0xc1, 0x1d, 0x9b, 0x17, 0x14, 0xd5, 0x73, 0x6c, 0x82, 0xdb, 0x57, 0x82, 0xdb, - 0x54, 0x92, 0xdb, 0x57, 0x9c, 0x9b, 0xc2, 0x39, 0x72, 0xbb, 0x02, 0x67, 0x7a, 0x88, 0xf3, 0xd8, - 0xed, 0x32, 0x9c, 0xa1, 0x57, 0xa5, 0x8d, 0x21, 0x77, 0xf5, 0x6b, 0x38, 0x9f, 0x9e, 0xad, 0x7e, - 0x60, 0x3f, 0x97, 0xb1, 0xf4, 0x0b, 0x59, 0x77, 0xca, 0x9c, 0xfa, 0x47, 0xd9, 0x65, 0x57, 0xae, - 0x70, 0x9e, 0x6f, 0x5d, 0xcb, 0x59, 0x83, 0xa4, 0xcb, 0xa4, 0xcf, 0xb3, 0xcb, 0x3e, 0x41, 0xb9, - 0x74, 0xbc, 0xe0, 0x8b, 0xd9, 0x10, 0xc8, 0xd6, 0x54, 0xbf, 0x8b, 0xf3, 0xab, 0xc4, 0x60, 0x15, - 0x4d, 0xe5, 0xbc, 0x26, 0xe5, 0xca, 0x77, 0x85, 0x69, 0x29, 0x16, 0x2d, 0xc2, 0x94, 0x3c, 0x0f, - 0xc5, 0x8d, 0x45, 0x8e, 0x52, 0x4f, 0x26, 0x75, 0xf9, 0x64, 0xa2, 0x9e, 0x82, 0xd8, 0x9d, 0x7b, - 0x52, 0xa4, 0x47, 0x36, 0xfe, 0x08, 0x0f, 0xf4, 0x9d, 0x4c, 0xc4, 0x89, 0xa5, 0x1d, 0xf3, 0xe0, - 0x21, 0x5e, 0x14, 0x1c, 0xbe, 0xe7, 0x8e, 0x7c, 0x38, 0x69, 0xba, 0xd2, 0x09, 0x1c, 0xfd, 0x57, - 0xb5, 0x21, 0xc3, 0x5b, 0x1e, 0xd9, 0x3b, 0x45, 0xaf, 0x4c, 0x6a, 0x51, 0x4f, 0x69, 0x91, 0x7c, - 0x13, 0x9a, 0x48, 0xbf, 0x09, 0x25, 0x82, 0x28, 0xb9, 0x9c, 0xb2, 0xd4, 0xfc, 0x88, 0x9c, 0xde, - 0xcd, 0x32, 0x9f, 0x9a, 0x87, 0xdc, 0xa5, 0xfc, 0x1b, 0x70, 0x9e, 0x19, 0x5c, 0x40, 0xf9, 0xbd, - 0xa5, 0xfa, 0xdd, 0xee, 0x6f, 0xe3, 0xb0, 0x54, 0x4c, 0x5c, 0xe5, 0x7e, 0xf7, 0x2e, 0x68, 0xf1, - 0xfd, 0x89, 0x1d, 0x8d, 0x11, 0xb5, 0x7a, 0x41, 0x7c, 0x38, 0x8a, 0x33, 0xf4, 0x9c, 0xbc, 0x4c, - 0x3d, 0x52, 0xf3, 0xea, 0x84, 0xcc, 0x5d, 0xbe, 0xea, 0xb9, 0xcb, 0x17, 0x13, 0xe0, 0x58, 0xb4, - 0x4c, 0x80, 0xa8, 0xe1, 0xce, 0x39, 0x16, 0x2d, 0x13, 0x10, 0x13, 0x73, 0x01, 0xc2, 0x6b, 0x5b, - 0x12, 0x9f, 0x0b, 0x58, 0x06, 0x90, 0xe5, 0x55, 0xdf, 0x57, 0x97, 0xc9, 0xa6, 0x28, 0xae, 0xfa, - 0x7e, 0x69, 0x95, 0x39, 0x5d, 0x5a, 0x65, 0xa6, 0x77, 0xb3, 0x91, 0xdb, 0xcd, 0xcf, 0x01, 0x6e, - 0xbb, 0xd1, 0xa1, 0x30, 0x32, 0x2b, 0x6b, 0x1d, 0x57, 0xdd, 0x06, 0xd8, 0x27, 0x83, 0x58, 0x9e, - 0x27, 0x4d, 0xc7, 0x3e, 0x59, 0xf8, 0xf4, 0x23, 0xec, 0x48, 0xeb, 0xf0, 0x6f, 0x06, 0xeb, 0x86, - 0x18, 0x4b, 0x03, 0xf0, 0x6f, 0xfd, 0xf7, 0x35, 0x68, 0x6e, 0xe3, 0x9e, 0xe4, 0x7c, 0x01, 0x60, - 0x9f, 0x84, 0xa4, 0x4f, 0x5d, 0x1f, 0x8b, 0x2a, 0x7c, 0xd2, 0x48, 0x40, 0xbe, 0xbf, 0x1c, 0x9e, - 0x1a, 0xb0, 0xd7, 0x95, 0xc6, 0xe4, 0xdf, 0x0c, 0x76, 0x80, 0xad, 0x40, 0xda, 0x8f, 0x7f, 0xb3, - 0xbb, 0x4e, 0x44, 0x2d, 0xfb, 0x90, 0x1b, 0x6b, 0xc2, 0x10, 0x03, 0xfd, 0xcf, 0x35, 0x00, 0x03, - 0xf7, 0x08, 0xe5, 0xbe, 0xc6, 0xaa, 0xdb, 0x3d, 0xcb, 0x3e, 0x64, 0xf7, 0x05, 0xfe, 0xa2, 0x29, - 0x2c, 0xd1, 0x92, 0x30, 0xfe, 0xa2, 0xb9, 0x0c, 0xa0, 0x50, 0x64, 0xfe, 0x6a, 0x1a, 0x4d, 0x09, - 0x11, 0x37, 0x03, 0x15, 0xca, 0xf2, 0x11, 0x70, 0x98, 0xd3, 0xc4, 0xb2, 0x55, 0x4e, 0x3b, 0x0f, - 0xcd, 0xac, 0x2b, 0xf0, 0x54, 0xc0, 0xfd, 0xe0, 0x32, 0xcc, 0xa8, 0x27, 0x53, 0xee, 0x68, 0x52, - 0x95, 0xb6, 0x02, 0x32, 0xe7, 0xe2, 0xcf, 0x93, 0x4f, 0x29, 0xf6, 0x63, 0x1f, 0x68, 0x1a, 0x43, - 0x80, 0xfe, 0x0d, 0x80, 0xba, 0xd0, 0x77, 0x09, 0xda, 0x84, 0x49, 0xc6, 0x5c, 0x3d, 0x82, 0x2f, - 0xe5, 0x9f, 0x44, 0x87, 0x66, 0x30, 0x04, 0x6a, 0x32, 0x01, 0x8d, 0xa7, 0x12, 0xd0, 0xe8, 0xfb, - 0x9c, 0xfe, 0x5d, 0x0d, 0x56, 0x64, 0xf9, 0xe8, 0xe2, 0x70, 0x9b, 0x1c, 0xb1, 0x52, 0xe2, 0x11, - 0x11, 0x42, 0x4e, 0x25, 0x73, 0x5e, 0x87, 0x8e, 0x83, 0x23, 0xea, 0xfa, 0x5c, 0xa0, 0xa9, 0x36, - 0x85, 0xbf, 0x22, 0x8b, 0x05, 0x2d, 0x26, 0xe6, 0x6f, 0x89, 0xe9, 0x1d, 0xab, 0x87, 0xd1, 0x55, - 0x98, 0x3f, 0xc4, 0x38, 0x30, 0x3d, 0x62, 0x5b, 0x9e, 0xa9, 0x62, 0x52, 0xd6, 0x47, 0x73, 0x6c, - 0xea, 0x01, 0x9b, 0xb9, 0x2d, 0xe2, 0x52, 0x8f, 0xe0, 0xd2, 0x31, 0x9a, 0xc8, 0xbc, 0xb4, 0x04, - 0xcd, 0x20, 0x24, 0x36, 0x8e, 0x98, 0xcf, 0xd6, 0xf8, 0x31, 0x35, 0x04, 0xa0, 0x6b, 0x30, 0x1f, - 0x0f, 0x3e, 0xc6, 0xa1, 0x8d, 0x7d, 0x6a, 0xed, 0x8b, 0x77, 0xd3, 0x71, 0xa3, 0x68, 0x4a, 0xff, - 0x4d, 0x0d, 0xf4, 0x9c, 0xd4, 0xbb, 0x21, 0xe9, 0x9d, 0xa2, 0x05, 0x37, 0x60, 0x81, 0xdb, 0x21, - 0xe4, 0x2c, 0x87, 0x86, 0x10, 0xd7, 0x98, 0xb3, 0x6c, 0x4e, 0x48, 0x53, 0x96, 0xe8, 0xc3, 0xe5, - 0x63, 0xd7, 0xf4, 0x6f, 0xb2, 0xc5, 0x3f, 0xda, 0xd0, 0xfe, 0xa4, 0x8f, 0xc3, 0x41, 0xe2, 0xc1, - 0x35, 0xc2, 0x52, 0x0b, 0xd5, 0x07, 0x4a, 0x40, 0x58, 0xa6, 0xed, 0x86, 0xa4, 0x67, 0xc6, 0xad, - 0xa2, 0x71, 0x8e, 0xd2, 0x62, 0xc0, 0xbb, 0xa2, 0x5d, 0x84, 0xde, 0x83, 0xa9, 0xae, 0xeb, 0x51, - 0x2c, 0x9a, 0x33, 0xad, 0xcd, 0x97, 0xf3, 0x11, 0x91, 0x94, 0xb9, 0x7e, 0x97, 0x23, 0x1b, 0x92, - 0x08, 0xed, 0xc1, 0xbc, 0xeb, 0x07, 0xfc, 0xea, 0x15, 0xba, 0x96, 0xe7, 0x3e, 0x1b, 0x3e, 0x19, - 0xb6, 0x36, 0xdf, 0x18, 0xc1, 0xeb, 0x3e, 0xa3, 0xdc, 0x4d, 0x12, 0x1a, 0xc8, 0xcd, 0xc1, 0x10, - 0x86, 0x05, 0xd2, 0xa7, 0x79, 0x21, 0x93, 0x5c, 0xc8, 0xe6, 0x08, 0x21, 0x0f, 0x39, 0x69, 0x5a, - 0xca, 0x3c, 0xc9, 0x03, 0xb5, 0x1d, 0x98, 0x12, 0xca, 0xb1, 0x1c, 0xd9, 0x75, 0xb1, 0xa7, 0xfa, - 0x4a, 0x62, 0xc0, 0xd2, 0x00, 0x09, 0x70, 0x68, 0xf9, 0x2a, 0xdd, 0xa9, 0xe1, 0xb0, 0xbf, 0x51, - 0x4f, 0xf4, 0x37, 0xb4, 0x3f, 0x4d, 0x02, 0xca, 0x6b, 0xa8, 0xde, 0x41, 0x43, 0x1c, 0xb1, 0x14, - 0x92, 0xcc, 0xaf, 0xb3, 0x09, 0x38, 0xcf, 0xb1, 0x9f, 0x41, 0xd3, 0x8e, 0x8e, 0x4c, 0x6e, 0x12, - 0x2e, 0xb3, 0xb5, 0x79, 0xe3, 0xc4, 0x26, 0x5d, 0xdf, 0xda, 0x7d, 0xcc, 0xa1, 0x46, 0xc3, 0x8e, - 0x8e, 0xf8, 0x17, 0xfa, 0x11, 0xc0, 0x57, 0x11, 0xf1, 0x25, 0x67, 0xb1, 0xf1, 0xef, 0x9e, 0x9c, - 0xf3, 0x87, 0xbb, 0x0f, 0x77, 0x04, 0xeb, 0x26, 0x63, 0x27, 0x78, 0xdb, 0x30, 0x13, 0x58, 0xe1, - 0x93, 0x3e, 0xa6, 0x92, 0xbd, 0xf0, 0x85, 0xf7, 0x4f, 0xce, 0xfe, 0x63, 0xc1, 0x46, 0x48, 0x68, - 0x07, 0x89, 0x91, 0xf6, 0xdd, 0x38, 0x34, 0x94, 0x5e, 0xec, 0xf6, 0xc6, 0x3d, 0x5c, 0xbc, 0x61, - 0x98, 0xae, 0xdf, 0x25, 0xd2, 0xa2, 0x67, 0x18, 0x5c, 0x3c, 0x63, 0xf0, 0xec, 0xbf, 0x06, 0x73, - 0x21, 0xb6, 0x49, 0xe8, 0xb0, 0x1a, 0xd7, 0xed, 0xb9, 0xcc, 0xed, 0xc5, 0x5e, 0xce, 0x0a, 0xf8, - 0x6d, 0x05, 0x46, 0xaf, 0xc2, 0x2c, 0xdf, 0xf6, 0x04, 0x66, 0x5d, 0xf1, 0xc4, 0x5e, 0x02, 0x71, - 0x0d, 0xe6, 0x9e, 0xf4, 0x59, 0xde, 0xb0, 0x0f, 0xac, 0xd0, 0xb2, 0x29, 0x89, 0x5f, 0x13, 0x66, - 0x39, 0x7c, 0x2b, 0x06, 0xa3, 0xb7, 0x60, 0x51, 0xa0, 0xe2, 0xc8, 0xb6, 0x82, 0x98, 0x02, 0x87, - 0xf2, 0xb2, 0xb9, 0xc0, 0x67, 0xef, 0xf0, 0xc9, 0x2d, 0x35, 0x87, 0x34, 0x68, 0xd8, 0xa4, 0xd7, - 0xc3, 0x3e, 0x8d, 0x64, 0xfb, 0x2f, 0x1e, 0xa3, 0x9b, 0xb0, 0x6c, 0x79, 0x1e, 0xf9, 0xda, 0xe4, - 0x94, 0x8e, 0x99, 0xd3, 0x4e, 0x5c, 0x3d, 0x35, 0x8e, 0xf4, 0x09, 0xc7, 0x31, 0xd2, 0x8a, 0x6a, - 0x17, 0xa1, 0x19, 0xef, 0x23, 0xab, 0x18, 0x12, 0x0e, 0xc9, 0xbf, 0xb5, 0x33, 0xd0, 0x4e, 0xee, - 0x84, 0xf6, 0xf7, 0x3a, 0xcc, 0x17, 0x04, 0x15, 0xfa, 0x02, 0x80, 0x79, 0xab, 0x08, 0x2d, 0xe9, - 0xae, 0xff, 0x77, 0xf2, 0xe0, 0x64, 0xfe, 0x2a, 0xc0, 0x06, 0xf3, 0x7e, 0xf1, 0x89, 0x7e, 0x0c, - 0x2d, 0xee, 0xb1, 0x92, 0xbb, 0x70, 0xd9, 0xf7, 0xbe, 0x07, 0x77, 0xa6, 0xab, 0x64, 0xcf, 0x63, - 0x40, 0x7c, 0x6b, 0x7f, 0xad, 0x41, 0x33, 0x16, 0xcc, 0xea, 0x1f, 0xb1, 0x51, 0x7c, 0xaf, 0x23, - 0x55, 0xff, 0x70, 0xd8, 0x5d, 0x0e, 0xfa, 0xaf, 0x74, 0x25, 0xed, 0x1d, 0x80, 0xa1, 0xfe, 0x85, - 0x2a, 0xd4, 0x0a, 0x55, 0xd0, 0xd7, 0x60, 0x86, 0x59, 0xd6, 0xc5, 0xce, 0x2e, 0x0d, 0xdd, 0x80, - 0x37, 0xea, 0x05, 0x4e, 0x24, 0x2f, 0x90, 0x6a, 0xb8, 0xf9, 0xd3, 0x25, 0x68, 0x27, 0x1f, 0xd0, - 0xd0, 0x97, 0xd0, 0x4a, 0xfc, 0x90, 0x80, 0x5e, 0xca, 0x6f, 0x5a, 0xfe, 0x07, 0x07, 0xed, 0xe5, - 0x11, 0x58, 0xf2, 0x8e, 0x35, 0x86, 0x0c, 0x98, 0x96, 0x4d, 0x6c, 0xb4, 0x72, 0x4c, 0x7f, 0x5b, - 0x70, 0xbd, 0x34, 0xb2, 0x03, 0xae, 0x8f, 0x5d, 0xab, 0x21, 0x1f, 0xce, 0xe6, 0x7a, 0xca, 0xe8, - 0x4a, 0x9e, 0xb6, 0xac, 0x63, 0xad, 0xbd, 0x56, 0x09, 0x37, 0xd6, 0x81, 0xc2, 0x7c, 0x41, 0x93, - 0x18, 0xbd, 0x3e, 0x82, 0x4b, 0xaa, 0x51, 0xad, 0x5d, 0xad, 0x88, 0x1d, 0x4b, 0x7d, 0x02, 0x28, - 0xdf, 0x41, 0x46, 0xaf, 0x8d, 0x64, 0x33, 0xec, 0x50, 0x6b, 0xaf, 0x57, 0x43, 0x2e, 0x55, 0x54, - 0xf4, 0x96, 0x47, 0x2a, 0x9a, 0xea, 0x5e, 0x8f, 0x54, 0x34, 0xd3, 0xb0, 0x1e, 0x43, 0x87, 0x30, - 0x97, 0xed, 0x3b, 0xa3, 0xb5, 0xb2, 0xbf, 0x5f, 0x72, 0x6d, 0x6d, 0xed, 0x4a, 0x15, 0xd4, 0x58, - 0x18, 0x86, 0x33, 0xe9, 0x3e, 0x2f, 0x7a, 0x35, 0x4f, 0x5f, 0xd8, 0xe9, 0xd6, 0x56, 0x47, 0x23, - 0x26, 0x75, 0xca, 0xf6, 0x7e, 0x8b, 0x74, 0x2a, 0x69, 0x2c, 0x17, 0xe9, 0x54, 0xd6, 0x4a, 0xd6, - 0xc7, 0xd0, 0x37, 0xaa, 0xa1, 0x98, 0xe9, 0x89, 0xa2, 0xf5, 0x32, 0x36, 0xc5, 0x4d, 0x59, 0x6d, - 0xa3, 0x32, 0x7e, 0x22, 0x1a, 0xbf, 0x84, 0x56, 0xa2, 0x35, 0x5a, 0x94, 0x3f, 0xf2, 0xcd, 0xd6, - 0xa2, 0xfc, 0x51, 0xd4, 0x5f, 0x1d, 0x43, 0x7b, 0x30, 0x93, 0x6a, 0x96, 0xa2, 0x57, 0xca, 0x28, - 0xd3, 0x6f, 0x8a, 0xda, 0xab, 0x23, 0xf1, 0x62, 0x19, 0xa6, 0xca, 0x88, 0x32, 0x05, 0x96, 0x2e, - 0x2e, 0x9d, 0x03, 0x5f, 0x19, 0x85, 0x96, 0x0a, 0xe5, 0x5c, 0x4b, 0xb5, 0x30, 0x94, 0xcb, 0x5a, - 0xb6, 0x85, 0xa1, 0x5c, 0xde, 0xa5, 0x1d, 0x43, 0x07, 0x30, 0x9b, 0x69, 0xa7, 0xa2, 0xd5, 0x32, - 0x16, 0xd9, 0x56, 0xae, 0xb6, 0x56, 0x01, 0x33, 0x96, 0xf4, 0x43, 0x75, 0x81, 0xe7, 0x2e, 0x77, - 0xb9, 0x9c, 0x74, 0xe8, 0x67, 0x2f, 0x1d, 0x8f, 0x14, 0xb3, 0xfe, 0x1a, 0x16, 0x8a, 0x5e, 0xd9, - 0xd0, 0xd5, 0xa2, 0x67, 0x81, 0xd2, 0xa7, 0x3c, 0x6d, 0xbd, 0x2a, 0x7a, 0x2c, 0xf8, 0x53, 0x68, - 0xa8, 0x96, 0x22, 0x2a, 0x38, 0x94, 0x32, 0x4d, 0x58, 0x4d, 0x3f, 0x0e, 0x25, 0x11, 0x2a, 0x3d, - 0x95, 0x15, 0x86, 0xbd, 0xbe, 0xf2, 0xac, 0x90, 0xeb, 0x4a, 0x96, 0x67, 0x85, 0x7c, 0xeb, 0x90, - 0x8b, 0x8b, 0xdd, 0x2e, 0xd9, 0x1a, 0x2b, 0x77, 0xbb, 0x82, 0xce, 0x5f, 0xb9, 0xdb, 0x15, 0x76, - 0xdb, 0xc6, 0xd0, 0x4f, 0xd4, 0xef, 0x01, 0xd9, 0x8e, 0x18, 0x2a, 0xcd, 0x2d, 0x25, 0x9d, 0x39, - 0xed, 0x5a, 0x75, 0x82, 0x58, 0xfc, 0x33, 0x95, 0x09, 0x33, 0x1d, 0xb1, 0xf2, 0x4c, 0x58, 0xdc, - 0x97, 0xd3, 0x36, 0x2a, 0xe3, 0xe7, 0x83, 0x3c, 0xd9, 0x32, 0x2a, 0xb7, 0x76, 0x41, 0x97, 0xad, - 0xdc, 0xda, 0x85, 0x5d, 0x28, 0x1e, 0x1f, 0x45, 0xed, 0xa0, 0xa2, 0xf8, 0x38, 0xa6, 0x5f, 0xa5, - 0xad, 0x57, 0x45, 0x4f, 0x15, 0x0a, 0xf9, 0x7e, 0x0f, 0x1a, 0xb9, 0xfe, 0xd4, 0x19, 0x70, 0xb5, - 0x22, 0x76, 0xf9, 0xee, 0xaa, 0x33, 0x61, 0xa4, 0x02, 0x99, 0xb3, 0x61, 0xa3, 0x32, 0x7e, 0x2c, - 0x3b, 0x50, 0x3f, 0x9b, 0x24, 0x7a, 0x35, 0xe8, 0xca, 0x08, 0x3e, 0x89, 0x5e, 0x93, 0xf6, 0x5a, - 0x25, 0xdc, 0xa2, 0xe8, 0x4d, 0x76, 0x4f, 0x8e, 0xf3, 0xa7, 0x5c, 0xcb, 0xe7, 0x38, 0x7f, 0x2a, - 0x68, 0xc8, 0x14, 0x44, 0xaf, 0x6a, 0x9a, 0x8c, 0x8e, 0xde, 0x4c, 0xf3, 0x66, 0x74, 0xf4, 0xe6, - 0xfa, 0x31, 0x63, 0xe8, 0x17, 0xc3, 0x9f, 0x10, 0xf2, 0x4f, 0x98, 0x68, 0xb3, 0x34, 0x15, 0x95, - 0xbe, 0xdc, 0x6a, 0x6f, 0x9e, 0x88, 0x26, 0x61, 0xfc, 0x5f, 0xd7, 0x54, 0x47, 0xb3, 0xf0, 0x0d, - 0x11, 0xbd, 0x55, 0x81, 0x71, 0xee, 0x19, 0x54, 0x7b, 0xfb, 0x84, 0x54, 0x89, 0x05, 0x3d, 0x80, - 0x49, 0x7e, 0x77, 0x46, 0x17, 0x8e, 0xbf, 0x54, 0x6b, 0x17, 0x8b, 0xe7, 0xe3, 0xab, 0x21, 0xe3, - 0xb6, 0x37, 0xc5, 0x7f, 0x67, 0x7f, 0xf3, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdd, 0x00, 0x24, - 0xb7, 0xe5, 0x2e, 0x00, 0x00, + // 3250 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x1b, 0xcb, 0x6e, 0x1c, 0xc7, + 0x91, 0xcb, 0xe5, 0x63, 0xb7, 0x76, 0x57, 0xa4, 0x9a, 0x32, 0xb5, 0x1e, 0x52, 0x12, 0x35, 0xf2, + 0x43, 0x92, 0x2d, 0x52, 0xa6, 0xed, 0x58, 0x91, 0x63, 0xc7, 0x12, 0xf5, 0xb0, 0x6c, 0x91, 0xb2, + 0x87, 0xb2, 0xec, 0xc4, 0x46, 0x06, 0xc3, 0x99, 0x5e, 0x72, 0xcc, 0xd9, 0xe9, 0xd1, 0x4c, 0x2f, + 0xad, 0x15, 0x9c, 0x93, 0x03, 0x24, 0x40, 0x90, 0x1c, 0x82, 0x5c, 0x72, 0x0c, 0x72, 0xcf, 0x35, + 0xbf, 0xe0, 0x1f, 0x08, 0x90, 0x53, 0x2e, 0x39, 0xe7, 0x90, 0x43, 0x80, 0x00, 0xb9, 0x04, 0xfd, + 0x9a, 0x9d, 0x27, 0x77, 0x68, 0x31, 0x08, 0x72, 0x9b, 0xae, 0xae, 0xae, 0xea, 0xaa, 0xae, 0xaa, + 0xae, 0xae, 0xda, 0x85, 0x85, 0x03, 0xe2, 0x0d, 0xfa, 0xd8, 0x8c, 0x70, 0x78, 0x80, 0xc3, 0xd5, + 0x20, 0x24, 0x94, 0xa0, 0xf9, 0x14, 0xd0, 0x0c, 0x76, 0xf4, 0x35, 0x40, 0x37, 0x2d, 0x6a, 0xef, + 0xdd, 0xc2, 0x1e, 0xa6, 0xd8, 0xc0, 0x8f, 0x07, 0x38, 0xa2, 0xe8, 0x79, 0x68, 0xf4, 0x5c, 0x0f, + 0x9b, 0xae, 0x13, 0x75, 0x6b, 0x2b, 0xf5, 0x8b, 0x4d, 0x63, 0x96, 0x8d, 0xef, 0x39, 0x91, 0xfe, + 0x00, 0x16, 0x52, 0x0b, 0xa2, 0x80, 0xf8, 0x11, 0x46, 0xd7, 0x60, 0x36, 0xc4, 0xd1, 0xc0, 0xa3, + 0x62, 0x41, 0x6b, 0xfd, 0xec, 0x6a, 0x96, 0xd7, 0x6a, 0xbc, 0x64, 0xe0, 0x51, 0x43, 0xa1, 0xeb, + 0xdf, 0xd4, 0xa0, 0x9d, 0x9c, 0x41, 0xa7, 0x61, 0x56, 0x32, 0xef, 0xd6, 0x56, 0x6a, 0x17, 0x9b, + 0xc6, 0x8c, 0xe0, 0x8d, 0x16, 0x61, 0x26, 0xa2, 0x16, 0x1d, 0x44, 0xdd, 0xc9, 0x95, 0xda, 0xc5, + 0x69, 0x43, 0x8e, 0xd0, 0x29, 0x98, 0xc6, 0x61, 0x48, 0xc2, 0x6e, 0x9d, 0xa3, 0x8b, 0x01, 0x42, + 0x30, 0x15, 0xb9, 0x4f, 0x71, 0x77, 0x6a, 0xa5, 0x76, 0xb1, 0x63, 0xf0, 0x6f, 0xd4, 0x85, 0xd9, + 0x03, 0x1c, 0x46, 0x2e, 0xf1, 0xbb, 0xd3, 0x1c, 0xac, 0x86, 0xfa, 0x07, 0x70, 0xe2, 0x8e, 0xeb, + 0xe1, 0xbb, 0x98, 0x2a, 0x1d, 0x94, 0x6e, 0xe3, 0x1c, 0xb4, 0x2c, 0xdb, 0xc6, 0x01, 0x35, 0x77, + 0x9f, 0xba, 0x01, 0xdf, 0x4b, 0xc3, 0x00, 0x01, 0xba, 0xfb, 0xd4, 0x0d, 0xf4, 0x9f, 0xd7, 0x61, + 0x2e, 0x26, 0x26, 0xf5, 0x83, 0x60, 0xca, 0xb1, 0xa8, 0xc5, 0x49, 0xb5, 0x0d, 0xfe, 0x8d, 0x5e, + 0x84, 0x13, 0x36, 0xf1, 0x29, 0xf6, 0xa9, 0xe9, 0x61, 0x7f, 0x97, 0xee, 0x71, 0x5a, 0x1d, 0xa3, + 0x23, 0xa1, 0xf7, 0x39, 0x10, 0x9d, 0x87, 0xb6, 0x42, 0xa3, 0xc3, 0x00, 0x4b, 0x29, 0x5b, 0x12, + 0xf6, 0x70, 0x18, 0x60, 0x74, 0x01, 0x3a, 0x9e, 0x15, 0x51, 0xb3, 0x4f, 0x1c, 0xb7, 0xe7, 0x62, + 0x87, 0x0b, 0x3d, 0x65, 0xb4, 0x19, 0x70, 0x53, 0xc2, 0x90, 0x26, 0x0e, 0xd5, 0xb7, 0xfa, 0x98, + 0x4b, 0xdf, 0x34, 0xe2, 0x31, 0xdb, 0x1e, 0xa6, 0xd6, 0x6e, 0x77, 0x86, 0xc3, 0xf9, 0x37, 0x3a, + 0x03, 0xe0, 0x46, 0x5c, 0xc6, 0x00, 0x3b, 0xdd, 0x59, 0x2e, 0x66, 0xd3, 0x8d, 0xee, 0x0a, 0x00, + 0x7a, 0x1f, 0x66, 0xf7, 0xb0, 0xe5, 0xe0, 0x30, 0xea, 0x36, 0xf8, 0x89, 0xaf, 0xe6, 0x4f, 0x3c, + 0xa3, 0x85, 0xd5, 0xf7, 0xc5, 0x82, 0xdb, 0x3e, 0x0d, 0x87, 0x86, 0x5a, 0x8e, 0x96, 0xa1, 0xc9, + 0x8f, 0x6c, 0x83, 0x38, 0xb8, 0xdb, 0xe4, 0x47, 0x3b, 0x02, 0x68, 0xd7, 0xa1, 0x9d, 0x5c, 0x86, + 0xe6, 0xa1, 0xbe, 0x8f, 0x87, 0xf2, 0x4c, 0xd8, 0x27, 0x3b, 0xff, 0x03, 0xcb, 0x1b, 0x60, 0xae, + 0xbe, 0xa6, 0x21, 0x06, 0xd7, 0x27, 0xaf, 0xd5, 0xf4, 0x59, 0x98, 0xbe, 0xdd, 0x0f, 0xe8, 0x50, + 0x7f, 0x0b, 0xba, 0x8f, 0x2c, 0x7b, 0x30, 0xe8, 0x3f, 0xe2, 0x5b, 0xdc, 0xd8, 0xc3, 0xf6, 0xbe, + 0x3a, 0xe8, 0x25, 0x68, 0xca, 0x8d, 0xcb, 0xa3, 0xee, 0x18, 0x0d, 0x01, 0xb8, 0xe7, 0xe8, 0xef, + 0xc1, 0xf3, 0x05, 0x0b, 0xe5, 0xa1, 0x5e, 0x80, 0xce, 0xae, 0x15, 0xee, 0x58, 0xbb, 0xd8, 0x0c, + 0x2d, 0xea, 0x12, 0xbe, 0xba, 0x66, 0xb4, 0x25, 0xd0, 0x60, 0x30, 0xfd, 0x73, 0xd0, 0x52, 0x14, + 0x48, 0x3f, 0xb0, 0x6c, 0x5a, 0x85, 0x39, 0x5a, 0x81, 0x56, 0x10, 0x62, 0xcb, 0xf3, 0x88, 0x6d, + 0x51, 0x21, 0x5e, 0xdd, 0x48, 0x82, 0xf4, 0x33, 0xb0, 0x54, 0x48, 0x5c, 0x6c, 0x50, 0xbf, 0x96, + 0xd9, 0x3d, 0xe9, 0xf7, 0xdd, 0x4a, 0xac, 0xf5, 0xe5, 0xdc, 0xae, 0xf9, 0x4a, 0x49, 0xf7, 0xfb, + 0x99, 0x59, 0x0f, 0x5b, 0xfe, 0x20, 0xa8, 0x44, 0x38, 0xbb, 0x63, 0xb5, 0x34, 0xa6, 0x7c, 0x5a, + 0x04, 0x83, 0x0d, 0xe2, 0x79, 0xd8, 0xa6, 0x2e, 0xf1, 0x15, 0xd9, 0xb3, 0x00, 0x76, 0x0c, 0x94, + 0xe7, 0x9f, 0x80, 0xe8, 0x1a, 0x74, 0xf3, 0x4b, 0x25, 0xd9, 0xbf, 0xd6, 0xe0, 0xb9, 0x1b, 0x52, + 0x69, 0x82, 0x71, 0xa5, 0x03, 0x48, 0xb3, 0x9c, 0xcc, 0xb2, 0xcc, 0x1e, 0x50, 0x3d, 0x77, 0x40, + 0x0c, 0x23, 0xc4, 0x81, 0xe7, 0xda, 0x16, 0x27, 0x31, 0x25, 0x7c, 0x37, 0x01, 0x62, 0xf6, 0x4c, + 0xa9, 0x27, 0x3d, 0x92, 0x7d, 0xa2, 0x75, 0x58, 0xec, 0xe3, 0x3e, 0x09, 0x87, 0x66, 0xdf, 0x0a, + 0xcc, 0xbe, 0xf5, 0xc4, 0x64, 0xc1, 0xcb, 0xec, 0xef, 0x70, 0xf7, 0xec, 0x18, 0x48, 0xcc, 0x6e, + 0x5a, 0xc1, 0xa6, 0xf5, 0x64, 0xdb, 0x7d, 0x8a, 0x37, 0x77, 0xf4, 0x2e, 0x2c, 0x66, 0xe5, 0x93, + 0xa2, 0x7f, 0x0f, 0x4e, 0x0b, 0xc8, 0xf6, 0xd0, 0xb7, 0xb7, 0x79, 0xc4, 0xac, 0x74, 0x50, 0xff, + 0xae, 0x41, 0x37, 0xbf, 0x50, 0x5a, 0xfe, 0xb3, 0x6a, 0xed, 0xc8, 0x3a, 0x39, 0x07, 0x2d, 0x6a, + 0xb9, 0x9e, 0x49, 0x7a, 0xbd, 0x08, 0x53, 0xae, 0x88, 0x29, 0x03, 0x18, 0xe8, 0x01, 0x87, 0xa0, + 0x4b, 0x30, 0x6f, 0x0b, 0xeb, 0x37, 0x43, 0x7c, 0xe0, 0xf2, 0x18, 0x3f, 0xcb, 0x37, 0x36, 0x67, + 0x2b, 0xaf, 0x10, 0x60, 0xa4, 0x43, 0xc7, 0x75, 0x9e, 0x98, 0x3c, 0xba, 0xf3, 0x2b, 0xa2, 0xc1, + 0xa9, 0xb5, 0x5c, 0xe7, 0x09, 0x0b, 0x58, 0x4c, 0xa3, 0xfa, 0x23, 0x58, 0x16, 0xc2, 0xdf, 0xf3, + 0xed, 0x10, 0xf7, 0xb1, 0x4f, 0x2d, 0x6f, 0x83, 0x04, 0xc3, 0x4a, 0x66, 0xf3, 0x3c, 0x34, 0x22, + 0xd7, 0xb7, 0xb1, 0xe9, 0x8b, 0xab, 0x6a, 0xca, 0x98, 0xe5, 0xe3, 0xad, 0x48, 0xbf, 0x09, 0x67, + 0x4a, 0xe8, 0x4a, 0xcd, 0x9e, 0x87, 0x36, 0xdf, 0x98, 0x0c, 0xef, 0xf2, 0xc2, 0x68, 0x31, 0xd8, + 0x86, 0x00, 0xe9, 0xaf, 0x01, 0x12, 0x34, 0x36, 0xc9, 0xc0, 0xaf, 0xe6, 0xce, 0xcf, 0xc1, 0x42, + 0x6a, 0x89, 0xb4, 0x8d, 0xd7, 0xe1, 0x94, 0x00, 0x7f, 0xe2, 0xf7, 0x2b, 0xd3, 0x3a, 0x0d, 0xcf, + 0x65, 0x16, 0x49, 0x6a, 0xeb, 0x8a, 0x49, 0x3a, 0x99, 0x38, 0x94, 0xd8, 0xa2, 0xda, 0x41, 0x3a, + 0x9f, 0xe0, 0x91, 0x4b, 0x6c, 0xd8, 0x0a, 0xf7, 0x0d, 0x6c, 0x39, 0xc4, 0xf7, 0x86, 0x95, 0x23, + 0x57, 0xc1, 0x4a, 0x49, 0xf7, 0x53, 0x58, 0x54, 0x11, 0xcd, 0xef, 0xb9, 0xbb, 0x83, 0x10, 0x57, + 0x8d, 0xc4, 0x49, 0x93, 0x9d, 0xcc, 0x99, 0xac, 0xbe, 0xa6, 0xdc, 0x2c, 0x41, 0x58, 0x1e, 0x69, + 0x9c, 0x9f, 0xd4, 0x12, 0xf9, 0x89, 0xfe, 0xc7, 0x1a, 0x9c, 0x54, 0x2b, 0x2a, 0xda, 0xd5, 0x11, + 0x1d, 0xab, 0x5e, 0xea, 0x58, 0x53, 0x23, 0xc7, 0xba, 0x08, 0xf3, 0x11, 0x19, 0x84, 0x36, 0x36, + 0x59, 0x4e, 0x62, 0xfa, 0xec, 0x0e, 0x16, 0x7e, 0x77, 0x42, 0xc0, 0x6f, 0x59, 0xd4, 0xda, 0x22, + 0x0e, 0xd6, 0x7f, 0xa8, 0xcc, 0x2e, 0x65, 0xaf, 0x97, 0xe0, 0x24, 0x4f, 0x3d, 0xac, 0x20, 0xc0, + 0xbe, 0x63, 0x5a, 0x94, 0x19, 0x7d, 0x8d, 0x1b, 0xfd, 0x09, 0x36, 0x71, 0x83, 0xc3, 0x6f, 0xd0, + 0xad, 0x48, 0xff, 0xed, 0x24, 0xcc, 0xb1, 0xb5, 0xcc, 0xc9, 0x2a, 0xc9, 0x3b, 0x0f, 0x75, 0xfc, + 0x84, 0x4a, 0x41, 0xd9, 0x27, 0x5a, 0x83, 0x05, 0xe9, 0xcd, 0x2e, 0xf1, 0x47, 0x8e, 0x5e, 0x17, + 0x71, 0x71, 0x34, 0x15, 0xfb, 0xfa, 0x39, 0x68, 0x45, 0x94, 0x04, 0x2a, 0x6e, 0x88, 0xbc, 0x08, + 0x18, 0x48, 0xc6, 0x8d, 0xb4, 0x4e, 0xa7, 0x0b, 0x74, 0xda, 0x76, 0x23, 0x13, 0xdb, 0xa6, 0xd8, + 0x15, 0x8f, 0x3c, 0x0d, 0x03, 0xdc, 0xe8, 0xb6, 0x2d, 0xb4, 0x81, 0xde, 0x85, 0x65, 0x77, 0xd7, + 0x27, 0x21, 0x36, 0xa5, 0x22, 0xb9, 0xff, 0xfa, 0x84, 0x9a, 0x3d, 0x32, 0xf0, 0x55, 0xe6, 0xd4, + 0x15, 0x38, 0xdb, 0x1c, 0x85, 0x69, 0x60, 0x8b, 0xd0, 0x3b, 0x6c, 0x5e, 0x7f, 0x13, 0xe6, 0x47, + 0x5a, 0xa9, 0x1e, 0x05, 0xbe, 0xa9, 0x29, 0x8b, 0x7b, 0x68, 0xb9, 0xde, 0x36, 0xf6, 0x1d, 0x1c, + 0x3e, 0x63, 0x74, 0x42, 0x57, 0xe1, 0x94, 0xeb, 0x78, 0xd8, 0xa4, 0x6e, 0x1f, 0x93, 0x01, 0x35, + 0x23, 0x6c, 0x13, 0xdf, 0x89, 0x94, 0x7e, 0xd9, 0xdc, 0x43, 0x31, 0xb5, 0x2d, 0x66, 0xf4, 0x9f, + 0xc5, 0xb7, 0x44, 0x72, 0x17, 0xa3, 0xfc, 0xc8, 0xc7, 0x98, 0x11, 0x14, 0xa9, 0x9e, 0x14, 0xa3, + 0x2d, 0x80, 0x22, 0xab, 0x63, 0x27, 0x24, 0x91, 0x76, 0x88, 0x33, 0xe4, 0x3b, 0x6a, 0x1b, 0x20, + 0x40, 0x37, 0x89, 0x33, 0xe4, 0xe1, 0x3a, 0x32, 0xb9, 0x91, 0xd9, 0x7b, 0x03, 0x7f, 0x9f, 0xef, + 0xa6, 0x61, 0xb4, 0xdc, 0xe8, 0xbe, 0x15, 0xd1, 0x0d, 0x06, 0xd2, 0xff, 0x54, 0x53, 0xf1, 0x82, + 0x6d, 0xc3, 0xc0, 0x36, 0x76, 0x0f, 0xfe, 0x07, 0xea, 0x60, 0x2b, 0xa4, 0x11, 0xa4, 0x72, 0x61, + 0xe9, 0x70, 0x48, 0xcc, 0xc9, 0x5b, 0x95, 0xcf, 0x8c, 0xc2, 0x55, 0x7a, 0xe3, 0x32, 0x5c, 0x7d, + 0xa1, 0xae, 0x8b, 0xdb, 0xf6, 0xf6, 0x9e, 0x15, 0x3a, 0xd1, 0x5d, 0xec, 0xe3, 0xd0, 0xa2, 0xc7, + 0x92, 0xbe, 0xe8, 0x2b, 0x70, 0xb6, 0x8c, 0xba, 0xe4, 0xff, 0xb9, 0xba, 0x06, 0x15, 0x86, 0x81, + 0x77, 0x06, 0xae, 0xe7, 0x1c, 0x0b, 0xfb, 0x0f, 0xb3, 0xc2, 0xc5, 0xc4, 0xa5, 0xfd, 0x5c, 0x86, + 0x93, 0x21, 0x07, 0x51, 0x33, 0x62, 0x08, 0xf1, 0x7b, 0xb4, 0x63, 0xcc, 0xc9, 0x09, 0xbe, 0x90, + 0xbd, 0x4b, 0x7f, 0x39, 0xa9, 0x2c, 0x40, 0x51, 0x3b, 0xb6, 0xb0, 0xba, 0x04, 0xcd, 0x11, 0xfb, + 0x3a, 0x67, 0xdf, 0x88, 0x24, 0x5f, 0x66, 0x9d, 0x36, 0x09, 0x86, 0x26, 0xb6, 0x45, 0x46, 0xc1, + 0x8f, 0xba, 0xc1, 0x9e, 0x67, 0xc1, 0xf0, 0xb6, 0xcd, 0x13, 0x8a, 0xea, 0x31, 0x36, 0x41, 0xed, + 0x4b, 0x41, 0x6d, 0x26, 0x49, 0xed, 0x4b, 0x4e, 0x4d, 0xe1, 0x1c, 0xb8, 0x3d, 0x81, 0x33, 0x3b, + 0xc2, 0x79, 0xe4, 0xf6, 0x18, 0xce, 0xc8, 0xaa, 0xd2, 0xca, 0x90, 0xa7, 0xfa, 0x15, 0x2c, 0xa5, + 0x67, 0xab, 0x5f, 0xd8, 0xcf, 0xa4, 0x2c, 0xfd, 0x6c, 0xd6, 0x9c, 0x32, 0xb7, 0xfe, 0x41, 0x76, + 0xdb, 0x95, 0x33, 0x9c, 0x67, 0xdb, 0xd7, 0x99, 0xac, 0x42, 0xd2, 0x69, 0xd2, 0x67, 0xd9, 0x6d, + 0x1f, 0x21, 0x5d, 0x3a, 0x9c, 0xf1, 0xb9, 0xac, 0x0b, 0x64, 0x73, 0xaa, 0xdf, 0xc5, 0xf1, 0x55, + 0x62, 0xb0, 0x8c, 0xa6, 0x72, 0x5c, 0x93, 0x7c, 0x65, 0x5d, 0x61, 0x56, 0xb2, 0x45, 0x8b, 0x30, + 0x23, 0xef, 0x43, 0xf1, 0x62, 0x91, 0xa3, 0x54, 0xc9, 0xa4, 0x2e, 0x4b, 0x26, 0xaa, 0x14, 0xc4, + 0xde, 0xdc, 0xd3, 0x22, 0x3c, 0xb2, 0xf1, 0x87, 0x78, 0xa8, 0x6f, 0x65, 0x3c, 0x4e, 0x6c, 0xed, + 0x90, 0x82, 0x87, 0xa8, 0x28, 0x38, 0xfc, 0xcc, 0x1d, 0x59, 0x38, 0x69, 0xba, 0xd2, 0x08, 0x1c, + 0xfd, 0x57, 0xb5, 0x11, 0xc1, 0x9b, 0x1e, 0xd9, 0x39, 0x46, 0xab, 0x4c, 0x4a, 0x51, 0x4f, 0x49, + 0x91, 0xac, 0x09, 0x4d, 0xa5, 0x6b, 0x42, 0x09, 0x27, 0x4a, 0x6e, 0xa7, 0x2c, 0x34, 0x3f, 0x24, + 0xc7, 0xf7, 0xb2, 0xcc, 0x87, 0xe6, 0x11, 0x75, 0xc9, 0xff, 0x3a, 0x2c, 0x31, 0x85, 0x0b, 0x28, + 0x7f, 0xb7, 0x54, 0x7f, 0xdb, 0xfd, 0x7d, 0x12, 0x96, 0x8b, 0x17, 0x57, 0x79, 0xdf, 0xbd, 0x0d, + 0x5a, 0xfc, 0x7e, 0x62, 0x57, 0x63, 0x44, 0xad, 0x7e, 0x10, 0x5f, 0x8e, 0xe2, 0x0e, 0x3d, 0x2d, + 0x1f, 0x53, 0x0f, 0xd5, 0xbc, 0xba, 0x21, 0x73, 0x8f, 0xaf, 0x7a, 0xee, 0xf1, 0xc5, 0x18, 0x38, + 0x16, 0x2d, 0x63, 0x20, 0x72, 0xb8, 0xd3, 0x8e, 0x45, 0xcb, 0x18, 0xc4, 0x8b, 0x39, 0x03, 0x61, + 0xb5, 0x2d, 0x89, 0xcf, 0x19, 0x9c, 0x01, 0x90, 0xe9, 0xd5, 0xc0, 0x57, 0x8f, 0xc9, 0xa6, 0x48, + 0xae, 0x06, 0x7e, 0x69, 0x96, 0x39, 0x5b, 0x9a, 0x65, 0xa6, 0x4f, 0xb3, 0x91, 0x3b, 0xcd, 0xcf, + 0x00, 0x6e, 0xb9, 0xd1, 0xbe, 0x50, 0x32, 0x4b, 0x6b, 0x1d, 0x57, 0xbd, 0x06, 0xd8, 0x27, 0x83, + 0x58, 0x9e, 0x27, 0x55, 0xc7, 0x3e, 0x99, 0xfb, 0x0c, 0x22, 0xec, 0x48, 0xed, 0xf0, 0x6f, 0x06, + 0xeb, 0x85, 0x18, 0x4b, 0x05, 0xf0, 0x6f, 0xfd, 0x0f, 0x35, 0x68, 0x6e, 0xe2, 0xbe, 0xa4, 0x7c, + 0x16, 0x60, 0x97, 0x84, 0x64, 0x40, 0x5d, 0x1f, 0x8b, 0x2c, 0x7c, 0xda, 0x48, 0x40, 0xbe, 0x3b, + 0x1f, 0x1e, 0x1a, 0xb0, 0xd7, 0x93, 0xca, 0xe4, 0xdf, 0x0c, 0xb6, 0x87, 0xad, 0x40, 0xea, 0x8f, + 0x7f, 0xb3, 0xb7, 0x4e, 0x44, 0x2d, 0x7b, 0x9f, 0x2b, 0x6b, 0xca, 0x10, 0x03, 0xfd, 0x2f, 0x35, + 0x00, 0x03, 0xf7, 0x09, 0xe5, 0xb6, 0xc6, 0xb2, 0xdb, 0x1d, 0xcb, 0xde, 0x67, 0xef, 0x05, 0x5e, + 0xd1, 0x14, 0x9a, 0x68, 0x49, 0x18, 0xaf, 0x68, 0x9e, 0x01, 0x50, 0x28, 0x32, 0x7e, 0x35, 0x8d, + 0xa6, 0x84, 0x88, 0x97, 0x81, 0x72, 0x65, 0x59, 0x04, 0x1c, 0xc5, 0x34, 0xb1, 0x6d, 0x15, 0xd3, + 0x96, 0xa0, 0x99, 0x35, 0x05, 0x1e, 0x0a, 0xb8, 0x1d, 0x5c, 0x80, 0x8e, 0x2a, 0x99, 0x72, 0x43, + 0x93, 0xa2, 0xb4, 0x15, 0x90, 0x19, 0x17, 0x2f, 0x4f, 0x3e, 0xa1, 0xd8, 0x8f, 0x6d, 0xa0, 0x69, + 0x8c, 0x00, 0xfa, 0xd7, 0x00, 0xea, 0x41, 0xdf, 0x23, 0x68, 0x1d, 0xa6, 0x19, 0x71, 0x55, 0x04, + 0x5f, 0xce, 0x97, 0x44, 0x47, 0x6a, 0x30, 0x04, 0x6a, 0x32, 0x00, 0x4d, 0xa6, 0x02, 0xd0, 0xf8, + 0xf7, 0x9c, 0xfe, 0x6d, 0x0d, 0x56, 0x64, 0xfa, 0xe8, 0xe2, 0x70, 0x93, 0x1c, 0xb0, 0x54, 0xe2, + 0x21, 0x11, 0x4c, 0x8e, 0x25, 0x72, 0x5e, 0x83, 0xae, 0x83, 0x23, 0xea, 0xfa, 0x9c, 0xa1, 0xa9, + 0x0e, 0x85, 0x57, 0x91, 0xc5, 0x86, 0x16, 0x13, 0xf3, 0x37, 0xc5, 0xf4, 0x96, 0xd5, 0xc7, 0xe8, + 0x0a, 0x2c, 0xec, 0x63, 0x1c, 0x98, 0x1e, 0xb1, 0x2d, 0xcf, 0x54, 0x3e, 0x29, 0xf3, 0xa3, 0x79, + 0x36, 0x75, 0x9f, 0xcd, 0xdc, 0x12, 0x7e, 0xa9, 0x47, 0x70, 0xfe, 0x10, 0x49, 0x64, 0x5c, 0x5a, + 0x86, 0x66, 0x10, 0x12, 0x1b, 0x47, 0xcc, 0x66, 0x6b, 0xfc, 0x9a, 0x1a, 0x01, 0xd0, 0x55, 0x58, + 0x88, 0x07, 0x1f, 0xe1, 0xd0, 0xc6, 0x3e, 0xb5, 0x76, 0x45, 0xdd, 0x74, 0xd2, 0x28, 0x9a, 0xd2, + 0x7f, 0x53, 0x03, 0x3d, 0xc7, 0xf5, 0x4e, 0x48, 0xfa, 0xc7, 0xa8, 0xc1, 0x35, 0x38, 0xc5, 0xf5, + 0x10, 0x72, 0x92, 0x23, 0x45, 0x88, 0x67, 0xcc, 0x49, 0x36, 0x27, 0xb8, 0x29, 0x4d, 0x0c, 0xe0, + 0xc2, 0xa1, 0x7b, 0xfa, 0x2f, 0xe9, 0x62, 0x49, 0xdd, 0xbe, 0xe2, 0x65, 0x92, 0xba, 0x4e, 0xf4, + 0xdf, 0xd7, 0xd4, 0x65, 0x98, 0x9e, 0x95, 0x7b, 0xb9, 0x01, 0x1d, 0xc7, 0x8d, 0xf6, 0x4d, 0xd1, + 0x91, 0x39, 0xcc, 0xfe, 0x47, 0x71, 0xd0, 0x68, 0x3b, 0xf1, 0x37, 0x8e, 0xd0, 0x7b, 0xd0, 0x91, + 0x55, 0xcf, 0x44, 0x93, 0xa7, 0xb5, 0xbe, 0x94, 0x27, 0x11, 0xc7, 0x3b, 0xa3, 0x2d, 0x56, 0x88, + 0x91, 0xfe, 0xaf, 0x36, 0xb4, 0x3f, 0x1e, 0xe0, 0x70, 0x98, 0xa8, 0x18, 0x47, 0x58, 0x1e, 0x83, + 0x6a, 0x64, 0x25, 0x20, 0xec, 0xaa, 0xe8, 0x85, 0xa4, 0x6f, 0xc6, 0xbd, 0xae, 0x49, 0x8e, 0xd2, + 0x62, 0xc0, 0x3b, 0xa2, 0xdf, 0x85, 0xde, 0x81, 0x99, 0x9e, 0xeb, 0x51, 0x2c, 0xba, 0x4b, 0xad, + 0xf5, 0x17, 0xf3, 0xfb, 0x49, 0xf2, 0x5c, 0xbd, 0xc3, 0x91, 0x0d, 0xb9, 0x08, 0xed, 0xc0, 0x82, + 0xeb, 0x07, 0xfc, 0xed, 0x18, 0xba, 0x96, 0xe7, 0x3e, 0x1d, 0xd5, 0x3c, 0x5b, 0xeb, 0xaf, 0x8d, + 0xa1, 0x75, 0x8f, 0xad, 0xdc, 0x4e, 0x2e, 0x34, 0x90, 0x9b, 0x83, 0x21, 0x0c, 0xa7, 0xc8, 0x80, + 0xe6, 0x99, 0x4c, 0x73, 0x26, 0xeb, 0x63, 0x98, 0x3c, 0xe0, 0x4b, 0xd3, 0x5c, 0x16, 0x48, 0x1e, + 0xa8, 0x6d, 0xc1, 0x8c, 0x10, 0x8e, 0x05, 0xf9, 0x9e, 0x8b, 0x3d, 0xd5, 0x18, 0x13, 0x03, 0x16, + 0xc7, 0x48, 0x80, 0x43, 0xcb, 0x57, 0xf1, 0x5a, 0x0d, 0x47, 0x0d, 0x9a, 0x7a, 0xa2, 0x41, 0xa3, + 0xfd, 0x79, 0x1a, 0x50, 0x5e, 0x42, 0x55, 0xc8, 0x0d, 0x71, 0xc4, 0x62, 0x60, 0xf2, 0x82, 0x98, + 0x4b, 0xc0, 0xf9, 0x25, 0xf1, 0x29, 0x34, 0xed, 0xe8, 0xc0, 0xe4, 0x2a, 0x91, 0xe6, 0x72, 0xfd, + 0xc8, 0x2a, 0x5d, 0xdd, 0xd8, 0x7e, 0xc4, 0xa1, 0x46, 0xc3, 0x8e, 0x0e, 0xf8, 0x17, 0xfa, 0x31, + 0xc0, 0x97, 0x11, 0xf1, 0x25, 0x65, 0x71, 0xf0, 0x6f, 0x1f, 0x9d, 0xf2, 0x07, 0xdb, 0x0f, 0xb6, + 0x04, 0xe9, 0x26, 0x23, 0x27, 0x68, 0xdb, 0xd0, 0x09, 0xac, 0xf0, 0xf1, 0x00, 0x53, 0x49, 0x5e, + 0xd8, 0xc2, 0xbb, 0x47, 0x27, 0xff, 0x91, 0x20, 0x23, 0x38, 0xb4, 0x83, 0xc4, 0x48, 0xfb, 0x76, + 0x12, 0x1a, 0x4a, 0x2e, 0xf6, 0xfc, 0xe4, 0x16, 0x2e, 0x8a, 0x30, 0xa6, 0xeb, 0xf7, 0x88, 0xd4, + 0xe8, 0x09, 0x06, 0x17, 0x75, 0x18, 0x7e, 0x7d, 0x5d, 0x82, 0xf9, 0x10, 0xdb, 0x24, 0x74, 0x58, + 0x92, 0xee, 0xf6, 0x5d, 0x66, 0xf6, 0xe2, 0x2c, 0xe7, 0x04, 0xfc, 0x96, 0x02, 0xa3, 0x97, 0x61, + 0x8e, 0x1f, 0x7b, 0x02, 0xb3, 0xae, 0x68, 0x62, 0x2f, 0x81, 0x78, 0x09, 0xe6, 0x1f, 0x0f, 0x58, + 0xe0, 0xb3, 0xf7, 0xac, 0xd0, 0xb2, 0x29, 0x89, 0xcb, 0x21, 0x73, 0x1c, 0xbe, 0x11, 0x83, 0xd1, + 0x1b, 0xb0, 0x28, 0x50, 0x71, 0x64, 0x5b, 0x41, 0xbc, 0x02, 0x87, 0xf2, 0xb5, 0x7c, 0x8a, 0xcf, + 0xde, 0xe6, 0x93, 0x1b, 0x6a, 0x0e, 0x69, 0xd0, 0xb0, 0x49, 0xbf, 0x8f, 0x7d, 0x1a, 0xc9, 0xfe, + 0x65, 0x3c, 0x46, 0x37, 0xe0, 0x8c, 0xe5, 0x79, 0xe4, 0x2b, 0x93, 0xaf, 0x74, 0xcc, 0x9c, 0x74, + 0xe2, 0xed, 0xac, 0x71, 0xa4, 0x8f, 0x39, 0x8e, 0x91, 0x16, 0x54, 0x3b, 0x07, 0xcd, 0xf8, 0x1c, + 0x59, 0xca, 0x93, 0x30, 0x48, 0xfe, 0xad, 0x9d, 0x80, 0x76, 0xf2, 0x24, 0xb4, 0x7f, 0xd4, 0x61, + 0xa1, 0xc0, 0xa9, 0xd0, 0xe7, 0x00, 0xcc, 0x5a, 0x85, 0x6b, 0x49, 0x73, 0xfd, 0xc1, 0xd1, 0x9d, + 0x93, 0xd9, 0xab, 0x00, 0x1b, 0xcc, 0xfa, 0xc5, 0x27, 0xfa, 0x09, 0xb4, 0xb8, 0xc5, 0x4a, 0xea, + 0xc2, 0x64, 0xdf, 0xf9, 0x0e, 0xd4, 0x99, 0xac, 0x92, 0x3c, 0xf7, 0x01, 0xf1, 0xad, 0xfd, 0xad, + 0x06, 0xcd, 0x98, 0x31, 0x4b, 0xe0, 0xc4, 0x41, 0xf1, 0xb3, 0x8e, 0x54, 0x02, 0xc7, 0x61, 0x77, + 0x38, 0xe8, 0xff, 0xd2, 0x94, 0xb4, 0xb7, 0x00, 0x46, 0xf2, 0x17, 0x8a, 0x50, 0x2b, 0x14, 0x41, + 0xbf, 0x04, 0x1d, 0xa6, 0x59, 0x17, 0x3b, 0xdb, 0x34, 0x74, 0x03, 0xfe, 0x4b, 0x03, 0x81, 0x13, + 0xc9, 0x17, 0xb0, 0x1a, 0xae, 0xff, 0x73, 0x19, 0xda, 0xc9, 0x9b, 0x14, 0x7d, 0x01, 0xad, 0xc4, + 0x2f, 0x2a, 0xd0, 0x0b, 0xf9, 0x43, 0xcb, 0xff, 0x42, 0x43, 0x7b, 0x71, 0x0c, 0x96, 0x7c, 0x24, + 0x4e, 0x20, 0x03, 0x66, 0x65, 0x17, 0x1e, 0xad, 0x1c, 0xd2, 0xa0, 0x17, 0x54, 0xcf, 0x8f, 0x6d, + 0xe1, 0xeb, 0x13, 0x57, 0x6b, 0xc8, 0x87, 0x93, 0xb9, 0xa6, 0x38, 0xba, 0x9c, 0x5f, 0x5b, 0xd6, + 0x72, 0xd7, 0x5e, 0xa9, 0x84, 0x1b, 0xcb, 0x40, 0x61, 0xa1, 0xa0, 0xcb, 0x8d, 0x5e, 0x1d, 0x43, + 0x25, 0xd5, 0x69, 0xd7, 0xae, 0x54, 0xc4, 0x8e, 0xb9, 0x3e, 0x06, 0x94, 0x6f, 0x81, 0xa3, 0x57, + 0xc6, 0x92, 0x19, 0xb5, 0xd8, 0xb5, 0x57, 0xab, 0x21, 0x97, 0x0a, 0x2a, 0x9a, 0xe3, 0x63, 0x05, + 0x4d, 0xb5, 0xdf, 0xc7, 0x0a, 0x9a, 0xe9, 0xb8, 0x4f, 0xa0, 0x7d, 0x98, 0xcf, 0x36, 0xce, 0xd1, + 0xa5, 0xb2, 0x9f, 0xef, 0xe4, 0xfa, 0xf2, 0xda, 0xe5, 0x2a, 0xa8, 0x31, 0x33, 0x0c, 0x27, 0xd2, + 0x8d, 0x6a, 0xf4, 0x72, 0x7e, 0x7d, 0x61, 0xab, 0x5e, 0xbb, 0x38, 0x1e, 0x31, 0x29, 0x53, 0xb6, + 0x79, 0x5d, 0x24, 0x53, 0x49, 0x67, 0xbc, 0x48, 0xa6, 0xb2, 0x5e, 0xb8, 0x3e, 0x81, 0xbe, 0x56, + 0x1d, 0xd1, 0x4c, 0x53, 0x17, 0xad, 0x96, 0x91, 0x29, 0xee, 0x2a, 0x6b, 0x6b, 0x95, 0xf1, 0x13, + 0xde, 0xf8, 0x05, 0xb4, 0x12, 0xbd, 0xdd, 0xa2, 0xf8, 0x91, 0xef, 0x16, 0x17, 0xc5, 0x8f, 0xa2, + 0x06, 0xf1, 0x04, 0xda, 0x81, 0x4e, 0xaa, 0xdb, 0x8b, 0x5e, 0x2a, 0x5b, 0x99, 0x2e, 0x8a, 0x6a, + 0x2f, 0x8f, 0xc5, 0x8b, 0x79, 0x98, 0x2a, 0x22, 0xca, 0x10, 0x58, 0xba, 0xb9, 0x74, 0x0c, 0x7c, + 0x69, 0x1c, 0x5a, 0xca, 0x95, 0x73, 0x3d, 0xe1, 0x42, 0x57, 0x2e, 0xeb, 0x39, 0x17, 0xba, 0x72, + 0x79, 0x9b, 0x79, 0x02, 0xed, 0xc1, 0x5c, 0xa6, 0x1f, 0x8c, 0x2e, 0x96, 0x91, 0xc8, 0xf6, 0xa2, + 0xb5, 0x4b, 0x15, 0x30, 0x63, 0x4e, 0x3f, 0x52, 0x15, 0x08, 0x6e, 0x72, 0x17, 0xca, 0x97, 0x8e, + 0xec, 0xec, 0x85, 0xc3, 0x91, 0x62, 0xd2, 0x5f, 0xc1, 0xa9, 0xa2, 0x32, 0x21, 0xba, 0x52, 0x54, + 0xd7, 0x28, 0xad, 0x45, 0x6a, 0xab, 0x55, 0xd1, 0x63, 0xc6, 0x9f, 0x40, 0x43, 0xf5, 0x44, 0x51, + 0xc1, 0xa5, 0x94, 0xe9, 0x22, 0x6b, 0xfa, 0x61, 0x28, 0x09, 0x57, 0xe9, 0xab, 0xa8, 0x30, 0x6a, + 0x56, 0x96, 0x47, 0x85, 0x5c, 0x5b, 0xb5, 0x3c, 0x2a, 0xe4, 0x7b, 0x9f, 0x9c, 0x5d, 0x6c, 0x76, + 0xc9, 0xde, 0x5e, 0xb9, 0xd9, 0x15, 0xb4, 0x2e, 0xcb, 0xcd, 0xae, 0xb0, 0x5d, 0x38, 0x81, 0x7e, + 0xaa, 0x7e, 0xdf, 0x90, 0x6d, 0xe9, 0xa1, 0xd2, 0xd8, 0x52, 0xd2, 0x5a, 0xd4, 0xae, 0x56, 0x5f, + 0x10, 0xb3, 0x7f, 0xaa, 0x22, 0x61, 0xa6, 0xa5, 0x57, 0x1e, 0x09, 0x8b, 0x1b, 0x8b, 0xda, 0x5a, + 0x65, 0xfc, 0xbc, 0x93, 0x27, 0x7b, 0x5e, 0xe5, 0xda, 0x2e, 0x68, 0x13, 0x96, 0x6b, 0xbb, 0xb0, + 0x8d, 0xc6, 0xfd, 0xa3, 0xa8, 0x9f, 0x55, 0xe4, 0x1f, 0x87, 0x34, 0xdc, 0xb4, 0xd5, 0xaa, 0xe8, + 0xa9, 0x44, 0x21, 0xdf, 0xb0, 0x42, 0x63, 0xf7, 0x9f, 0xba, 0x03, 0xae, 0x54, 0xc4, 0x2e, 0x3f, + 0x5d, 0x75, 0x27, 0x8c, 0x15, 0x20, 0x73, 0x37, 0xac, 0x55, 0xc6, 0x8f, 0x79, 0x07, 0xea, 0xd7, + 0x32, 0x89, 0x66, 0x13, 0xba, 0x3c, 0x86, 0x4e, 0xa2, 0x59, 0xa6, 0xbd, 0x52, 0x09, 0xb7, 0xc8, + 0x7b, 0x93, 0xed, 0x9f, 0xc3, 0xec, 0x29, 0xd7, 0xb3, 0x3a, 0xcc, 0x9e, 0x0a, 0x3a, 0x4a, 0x05, + 0xde, 0xab, 0xba, 0x3e, 0xe3, 0xbd, 0x37, 0xd3, 0x7d, 0x1a, 0xef, 0xbd, 0xb9, 0x86, 0xd2, 0x04, + 0xfa, 0xc5, 0xe8, 0x57, 0x14, 0xf9, 0x1a, 0x2c, 0x5a, 0x2f, 0x0d, 0x45, 0xa5, 0xa5, 0x67, 0xed, + 0xf5, 0x23, 0xad, 0x49, 0x28, 0xff, 0xd7, 0x35, 0xd5, 0x92, 0x2d, 0x2c, 0x82, 0xa2, 0x37, 0x2a, + 0x10, 0xce, 0xd5, 0x71, 0xb5, 0x37, 0x8f, 0xb8, 0xaa, 0xc8, 0x1a, 0x92, 0xf5, 0xcf, 0x72, 0x6b, + 0x28, 0xa8, 0xa1, 0x96, 0x5b, 0x43, 0x51, 0x49, 0x55, 0x9f, 0x40, 0xf7, 0x61, 0x9a, 0x3f, 0xd7, + 0xd1, 0xd9, 0xc3, 0xdf, 0xf1, 0xda, 0xb9, 0xe2, 0xf9, 0xf8, 0x35, 0xca, 0x04, 0xd8, 0x99, 0xe1, + 0x7f, 0x01, 0x78, 0xfd, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x74, 0xac, 0x01, 0xab, 0x19, 0x30, + 0x00, 0x00, } diff --git a/weed/server/master_server.go b/weed/server/master_server.go index b3cc310e6..f5f547a32 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -115,9 +115,11 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler))) r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler))) r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler)) - r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) - r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) - r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) + /* + r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) + r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) + r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) + */ r.HandleFunc("/{fileId}", ms.redirectHandler) } @@ -220,7 +222,7 @@ func (ms *MasterServer) startAdminScripts() { commandEnv.MasterClient.WaitUntilConnected() c := time.Tick(time.Duration(sleepMinutes) * time.Minute) - for _ = range c { + for range c { if ms.Topo.IsLeader() { for _, line := range scriptLines { diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go index 43987b748..27b21ac09 100644 --- a/weed/server/volume_grpc_admin.go +++ b/weed/server/volume_grpc_admin.go @@ -3,9 +3,11 @@ package weed_server import ( "context" "fmt" + "path/filepath" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) @@ -148,3 +150,19 @@ func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_serv return resp, err } + +func (vs *VolumeServer) VolumeServerStatus(ctx context.Context, req *volume_server_pb.VolumeServerStatusRequest) (*volume_server_pb.VolumeServerStatusResponse, error) { + + resp := &volume_server_pb.VolumeServerStatusResponse{} + + for _, loc := range vs.store.Locations { + if dir, e := filepath.Abs(loc.Directory); e == nil { + resp.DiskStatuses = append(resp.DiskStatuses, stats.NewDiskStatus(dir)) + } + } + + resp.MemoryStatus = stats.MemStat() + + return resp, nil + +} diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index 0fdcf662a..2d716edc1 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -77,9 +77,11 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, // only expose the volume server details for safe environments adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler) adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler)) - adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) - adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) - adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + /* + adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) + adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) + adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + */ } adminMux.HandleFunc("/", vs.privateStoreHandler) if publicMux != adminMux { From 48b7ad5fa84be4b57ab09a1309245a21f4efaed2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 22 Feb 2020 14:01:04 -0800 Subject: [PATCH 0131/2432] s3: deny anonymous type --- weed/s3api/auth_credentials.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go index b7a8dbf95..6d3363232 100644 --- a/weed/s3api/auth_credentials.go +++ b/weed/s3api/auth_credentials.go @@ -134,10 +134,14 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, actions []Acti glog.V(3).Infof("v4 auth type") identity, s3Err = iam.reqSignatureV4Verify(r) case authTypePostPolicy: - return ErrNotImplemented; + glog.V(3).Infof("post policy auth type") + return ErrNotImplemented case authTypeJWT: - return ErrNotImplemented; + glog.V(3).Infof("jwt auth type") + return ErrNotImplemented case authTypeAnonymous: + return ErrAccessDenied + default: return ErrNotImplemented } From e83bfd0a35a01ff873b7669c8b3b74062ff6b69e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 22 Feb 2020 21:23:30 -0800 Subject: [PATCH 0132/2432] adjust log level --- weed/security/tls.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/weed/security/tls.go b/weed/security/tls.go index f4f525ede..1832e6e07 100644 --- a/weed/security/tls.go +++ b/weed/security/tls.go @@ -3,12 +3,14 @@ package security import ( "crypto/tls" "crypto/x509" - "github.com/spf13/viper" "io/ioutil" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/spf13/viper" + "google.golang.org/grpc" "google.golang.org/grpc/credentials" + + "github.com/chrislusf/seaweedfs/weed/glog" ) func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { @@ -19,12 +21,12 @@ func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { // load cert/key, ca cert cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) if err != nil { - glog.Errorf("load cert/key error: %v", err) + glog.V(1).Infof("load cert/key error: %v", err) return nil } caCert, err := ioutil.ReadFile(config.GetString(component + ".ca")) if err != nil { - glog.Errorf("read ca cert file error: %v", err) + glog.V(1).Infof("read ca cert file error: %v", err) return nil } caCertPool := x509.NewCertPool() @@ -46,12 +48,12 @@ func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { // load cert/key, cacert cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) if err != nil { - glog.Errorf("load cert/key error: %v", err) + glog.V(1).Infof("load cert/key error: %v", err) return grpc.WithInsecure() } caCert, err := ioutil.ReadFile(config.GetString(component + ".ca")) if err != nil { - glog.Errorf("read ca cert file error: %v", err) + glog.V(1).Infof("read ca cert file error: %v", err) return grpc.WithInsecure() } caCertPool := x509.NewCertPool() From 4ed6b584e22eb332bfdc61112a625d49c772bafb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 22 Feb 2020 21:34:18 -0800 Subject: [PATCH 0133/2432] s3: access control limited by bucket --- weed/command/s3.go | 13 +++++++++++++ weed/s3api/auth_credentials.go | 34 +++++++++++++++++++++++++--------- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/weed/command/s3.go b/weed/command/s3.go index 4dc4b82f6..5fb59fcca 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -87,6 +87,19 @@ var cmdS3 = &Command{ "Read", "Write" ] + }, + { + "name": "user_limited_to_bucket1", + "credentials": [ + { + "accessKey": "some_access_key4", + "secretKey": "some_secret_key4" + } + ], + "actions": [ + "Read:bucket1", + "Write:bucket1" + ] } ] } diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go index 6d3363232..c1e8dff1e 100644 --- a/weed/s3api/auth_credentials.go +++ b/weed/s3api/auth_credentials.go @@ -7,6 +7,7 @@ import ( "net/http" "github.com/golang/protobuf/jsonpb" + "github.com/gorilla/mux" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" @@ -101,14 +102,14 @@ func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identi return nil, nil, false } -func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, actions ...Action) http.HandlerFunc { +func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc { if len(iam.identities) == 0 { return f } return func(w http.ResponseWriter, r *http.Request) { - errCode := iam.authRequest(r, actions) + errCode := iam.authRequest(r, action) if errCode == ErrNone { f(w, r) return @@ -118,7 +119,7 @@ func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, actions ...Action) } // check whether the request has valid access keys -func (iam *IdentityAccessManagement) authRequest(r *http.Request, actions []Action) ErrorCode { +func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) ErrorCode { var identity *Identity var s3Err ErrorCode switch getRequestAuthType(r) { @@ -152,7 +153,10 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, actions []Acti glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions) - if !identity.canDo(actions) { + vars := mux.Vars(r) + bucket := vars["bucket"] + + if !identity.canDo(action, bucket) { return ErrAccessDenied } @@ -160,12 +164,24 @@ func (iam *IdentityAccessManagement) authRequest(r *http.Request, actions []Acti } -func (identity *Identity) canDo(actions []Action) bool { +func (identity *Identity) canDo(action Action, bucket string) bool { for _, a := range identity.Actions { - for _, b := range actions { - if a == b { - return true - } + if a == "Admin" { + return true + } + } + for _, a := range identity.Actions { + if a == action { + return true + } + } + if bucket == "" { + return false + } + limitedByBucket := string(action) + ":" + bucket + for _, a := range identity.Actions { + if string(a) == limitedByBucket { + return true } } return false From 6ea394b802f1bb52a07a91614e51185aea2281dd Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 22 Feb 2020 21:34:49 -0800 Subject: [PATCH 0134/2432] 1.57 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 4fb318f08..01227194f 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 56) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 57) ) From cd4c9a365bb415573b52624a29d09083b956b484 Mon Sep 17 00:00:00 2001 From: Yoni Nakache <45972051+LazyDBA247-Anyvision@users.noreply.github.com> Date: Sun, 23 Feb 2020 22:33:47 +0200 Subject: [PATCH 0135/2432] DiskStats: adding Total & Percent Usage making relevant data visible and readable. --- weed/server/volume_server_ui/templates.go | 40 ++++++++++++++++++++--- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go index 81496b1de..a2d1dd5bf 100644 --- a/weed/server/volume_server_ui/templates.go +++ b/weed/server/volume_server_ui/templates.go @@ -1,11 +1,29 @@ package master_ui import ( + "fmt" "html/template" "strconv" "strings" ) +func bytesToHumanReadble(b uint64) string { + const unit = 1024 + if b < unit { + return fmt.Sprintf("%d B", b) + } + div, exp := uint64(unit), 0 + for n := b / unit; n >= unit; n /= unit { + div *= unit + exp++ + } + return fmt.Sprintf("%.2f %ciB", float64(b)/float64(div), "KMGTPE"[exp]) +} + +func percentFrom(total uint64, part_of uint64) string { + return fmt.Sprintf("%.2f", (float64(part_of)/float64(total))*100) +} + func join(data []int64) string { var ret []string for _, d := range data { @@ -15,7 +33,9 @@ func join(data []int64) string { } var funcMap = template.FuncMap{ - "join": join, + "join": join, + "bytesToHumanReadble": bytesToHumanReadble, + "percentFrom": percentFrom, } var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` @@ -57,13 +77,25 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`

Disk Stats

- +
+ + + + + + + + + {{ range .DiskStatuses }} - - + + + + {{ end }} +
PathTotalFree% Usage
{{ .Dir }}{{ .Free }} Bytes Free{{ .Dir }}{{ bytesToHumanReadble .All }}{{ bytesToHumanReadble .Used }}{{ percentFrom .All .Used}}
From ec8619176a4a3e23a2e273d397785d424859d51b Mon Sep 17 00:00:00 2001 From: LazyDBA247-Anyvision Date: Sun, 23 Feb 2020 23:24:24 +0200 Subject: [PATCH 0136/2432] volume server protobuf: add precentFree & percentUsage to the DiskStatus --- weed/pb/volume_server_pb/volume_server.pb.go | 24 ++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index d14e98693..3206be5f1 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1505,10 +1505,12 @@ func (m *ReadVolumeFileStatusResponse) GetCollection() string { } type DiskStatus struct { - Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` + Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` + All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` + Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` + Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` + PercentFree float32 `protobuf:"fixed32,5,opt,name=percentFree" json:"percentFree,omitempty"` + PercentUsed float32 `protobuf:"fixed32,6,opt,name=percentUsed" json:"percentUsed,omitempty"` } func (m *DiskStatus) Reset() { *m = DiskStatus{} } @@ -1544,6 +1546,20 @@ func (m *DiskStatus) GetFree() uint64 { return 0 } +func (m *DiskStatus) GetPercentFree() float32 { + if m != nil { + return m.PercentFree + } + return float32(0.0) +} + +func (m *DiskStatus) GetPercentUsed() float32 { + if m != nil { + return m.PercentUsed + } + return float32(0.0) +} + type MemStatus struct { Goroutines int32 `protobuf:"varint,1,opt,name=goroutines" json:"goroutines,omitempty"` All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` From ab91dbe1d740e1cb2953aa51bb2f1f51941e53c8 Mon Sep 17 00:00:00 2001 From: LazyDBA247-Anyvision Date: Sun, 23 Feb 2020 23:25:56 +0200 Subject: [PATCH 0137/2432] fillInDiskStatus: update function to calculate new stats --- weed/stats/disk_supported.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/weed/stats/disk_supported.go b/weed/stats/disk_supported.go index 0537828b0..dff580b5b 100644 --- a/weed/stats/disk_supported.go +++ b/weed/stats/disk_supported.go @@ -17,5 +17,7 @@ func fillInDiskStatus(disk *volume_server_pb.DiskStatus) { disk.All = fs.Blocks * uint64(fs.Bsize) disk.Free = fs.Bfree * uint64(fs.Bsize) disk.Used = disk.All - disk.Free + disk.PercentFree = float32((float64(disk.Free) / float64(disk.All)) * 100) + disk.PercentUsed = float32((float64(disk.Used) / float64(disk.All)) * 100) return } From 4ff513d64d1c9b94d7cb7cf342477f6bef19dd36 Mon Sep 17 00:00:00 2001 From: LazyDBA247-Anyvision Date: Sun, 23 Feb 2020 23:27:09 +0200 Subject: [PATCH 0138/2432] staus route: add DiskStatuses for disk in the volume server status whem monitoring server, better to know the status of the disks & volumes in a single route. --- weed/server/volume_server_handlers_admin.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/weed/server/volume_server_handlers_admin.go b/weed/server/volume_server_handlers_admin.go index 1938a34c4..89bc051c5 100644 --- a/weed/server/volume_server_handlers_admin.go +++ b/weed/server/volume_server_handlers_admin.go @@ -12,6 +12,13 @@ import ( func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) m["Version"] = util.VERSION + var ds []*volume_server_pb.DiskStatus + for _, loc := range vs.store.Locations { + if dir, e := filepath.Abs(loc.Directory); e == nil { + ds = append(ds, stats.NewDiskStatus(dir)) + } + } + m["DiskStatuses"] = ds m["Volumes"] = vs.store.VolumeInfos() writeJsonQuiet(w, r, http.StatusOK, m) } From bbb163c5ad155e4fdd2ce2ea95a9c3788946df37 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 23 Feb 2020 18:04:22 -0800 Subject: [PATCH 0139/2432] add for https://github.com/chrislusf/seaweedfs/pull/1207 --- weed/pb/volume_server.proto | 2 ++ 1 file changed, 2 insertions(+) diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 54b0da19d..ce53fdc96 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -363,6 +363,8 @@ message DiskStatus { uint64 all = 2; uint64 used = 3; uint64 free = 4; + float percent_free = 5; + float percent_used = 6; } message MemStatus { From e96f85ed8ab3f3e1de2993b32e90a1cf26d2e697 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 23 Feb 2020 18:06:34 -0800 Subject: [PATCH 0140/2432] make pb files --- weed/pb/iam_pb/iam.pb.go | 4 +- weed/pb/volume_server_pb/volume_server.pb.go | 419 ++++++++++--------- 2 files changed, 212 insertions(+), 211 deletions(-) diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go index 7f796677c..b7d7b038b 100644 --- a/weed/pb/iam_pb/iam.pb.go +++ b/weed/pb/iam_pb/iam.pb.go @@ -9,7 +9,7 @@ It is generated from these files: iam.proto It has these top-level messages: - Identities + S3ApiConfiguration Identity Credential */ @@ -108,7 +108,7 @@ func (m *Credential) GetSecretKey() string { } func init() { - proto.RegisterType((*S3ApiConfiguration)(nil), "iam_pb.Identities") + proto.RegisterType((*S3ApiConfiguration)(nil), "iam_pb.S3ApiConfiguration") proto.RegisterType((*Identity)(nil), "iam_pb.Identity") proto.RegisterType((*Credential)(nil), "iam_pb.Credential") } diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 3206be5f1..56baa0cf7 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1509,8 +1509,8 @@ type DiskStatus struct { All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` - PercentFree float32 `protobuf:"fixed32,5,opt,name=percentFree" json:"percentFree,omitempty"` - PercentUsed float32 `protobuf:"fixed32,6,opt,name=percentUsed" json:"percentUsed,omitempty"` + PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree" json:"percent_free,omitempty"` + PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed" json:"percent_used,omitempty"` } func (m *DiskStatus) Reset() { *m = DiskStatus{} } @@ -1550,14 +1550,14 @@ func (m *DiskStatus) GetPercentFree() float32 { if m != nil { return m.PercentFree } - return float32(0.0) + return 0 } func (m *DiskStatus) GetPercentUsed() float32 { if m != nil { return m.PercentUsed } - return float32(0.0) + return 0 } type MemStatus struct { @@ -3667,209 +3667,210 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 3250 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x1b, 0xcb, 0x6e, 0x1c, 0xc7, - 0x91, 0xcb, 0xe5, 0x63, 0xb7, 0x76, 0x57, 0xa4, 0x9a, 0x32, 0xb5, 0x1e, 0x52, 0x12, 0x35, 0xf2, - 0x43, 0x92, 0x2d, 0x52, 0xa6, 0xed, 0x58, 0x91, 0x63, 0xc7, 0x12, 0xf5, 0xb0, 0x6c, 0x91, 0xb2, - 0x87, 0xb2, 0xec, 0xc4, 0x46, 0x06, 0xc3, 0x99, 0x5e, 0x72, 0xcc, 0xd9, 0xe9, 0xd1, 0x4c, 0x2f, - 0xad, 0x15, 0x9c, 0x93, 0x03, 0x24, 0x40, 0x90, 0x1c, 0x82, 0x5c, 0x72, 0x0c, 0x72, 0xcf, 0x35, - 0xbf, 0xe0, 0x1f, 0x08, 0x90, 0x53, 0x2e, 0x39, 0xe7, 0x90, 0x43, 0x80, 0x00, 0xb9, 0x04, 0xfd, - 0x9a, 0x9d, 0x27, 0x77, 0x68, 0x31, 0x08, 0x72, 0x9b, 0xae, 0xae, 0xae, 0xea, 0xaa, 0xae, 0xaa, - 0xae, 0xae, 0xda, 0x85, 0x85, 0x03, 0xe2, 0x0d, 0xfa, 0xd8, 0x8c, 0x70, 0x78, 0x80, 0xc3, 0xd5, - 0x20, 0x24, 0x94, 0xa0, 0xf9, 0x14, 0xd0, 0x0c, 0x76, 0xf4, 0x35, 0x40, 0x37, 0x2d, 0x6a, 0xef, - 0xdd, 0xc2, 0x1e, 0xa6, 0xd8, 0xc0, 0x8f, 0x07, 0x38, 0xa2, 0xe8, 0x79, 0x68, 0xf4, 0x5c, 0x0f, - 0x9b, 0xae, 0x13, 0x75, 0x6b, 0x2b, 0xf5, 0x8b, 0x4d, 0x63, 0x96, 0x8d, 0xef, 0x39, 0x91, 0xfe, - 0x00, 0x16, 0x52, 0x0b, 0xa2, 0x80, 0xf8, 0x11, 0x46, 0xd7, 0x60, 0x36, 0xc4, 0xd1, 0xc0, 0xa3, - 0x62, 0x41, 0x6b, 0xfd, 0xec, 0x6a, 0x96, 0xd7, 0x6a, 0xbc, 0x64, 0xe0, 0x51, 0x43, 0xa1, 0xeb, - 0xdf, 0xd4, 0xa0, 0x9d, 0x9c, 0x41, 0xa7, 0x61, 0x56, 0x32, 0xef, 0xd6, 0x56, 0x6a, 0x17, 0x9b, - 0xc6, 0x8c, 0xe0, 0x8d, 0x16, 0x61, 0x26, 0xa2, 0x16, 0x1d, 0x44, 0xdd, 0xc9, 0x95, 0xda, 0xc5, - 0x69, 0x43, 0x8e, 0xd0, 0x29, 0x98, 0xc6, 0x61, 0x48, 0xc2, 0x6e, 0x9d, 0xa3, 0x8b, 0x01, 0x42, - 0x30, 0x15, 0xb9, 0x4f, 0x71, 0x77, 0x6a, 0xa5, 0x76, 0xb1, 0x63, 0xf0, 0x6f, 0xd4, 0x85, 0xd9, - 0x03, 0x1c, 0x46, 0x2e, 0xf1, 0xbb, 0xd3, 0x1c, 0xac, 0x86, 0xfa, 0x07, 0x70, 0xe2, 0x8e, 0xeb, - 0xe1, 0xbb, 0x98, 0x2a, 0x1d, 0x94, 0x6e, 0xe3, 0x1c, 0xb4, 0x2c, 0xdb, 0xc6, 0x01, 0x35, 0x77, - 0x9f, 0xba, 0x01, 0xdf, 0x4b, 0xc3, 0x00, 0x01, 0xba, 0xfb, 0xd4, 0x0d, 0xf4, 0x9f, 0xd7, 0x61, - 0x2e, 0x26, 0x26, 0xf5, 0x83, 0x60, 0xca, 0xb1, 0xa8, 0xc5, 0x49, 0xb5, 0x0d, 0xfe, 0x8d, 0x5e, - 0x84, 0x13, 0x36, 0xf1, 0x29, 0xf6, 0xa9, 0xe9, 0x61, 0x7f, 0x97, 0xee, 0x71, 0x5a, 0x1d, 0xa3, - 0x23, 0xa1, 0xf7, 0x39, 0x10, 0x9d, 0x87, 0xb6, 0x42, 0xa3, 0xc3, 0x00, 0x4b, 0x29, 0x5b, 0x12, - 0xf6, 0x70, 0x18, 0x60, 0x74, 0x01, 0x3a, 0x9e, 0x15, 0x51, 0xb3, 0x4f, 0x1c, 0xb7, 0xe7, 0x62, - 0x87, 0x0b, 0x3d, 0x65, 0xb4, 0x19, 0x70, 0x53, 0xc2, 0x90, 0x26, 0x0e, 0xd5, 0xb7, 0xfa, 0x98, - 0x4b, 0xdf, 0x34, 0xe2, 0x31, 0xdb, 0x1e, 0xa6, 0xd6, 0x6e, 0x77, 0x86, 0xc3, 0xf9, 0x37, 0x3a, - 0x03, 0xe0, 0x46, 0x5c, 0xc6, 0x00, 0x3b, 0xdd, 0x59, 0x2e, 0x66, 0xd3, 0x8d, 0xee, 0x0a, 0x00, - 0x7a, 0x1f, 0x66, 0xf7, 0xb0, 0xe5, 0xe0, 0x30, 0xea, 0x36, 0xf8, 0x89, 0xaf, 0xe6, 0x4f, 0x3c, - 0xa3, 0x85, 0xd5, 0xf7, 0xc5, 0x82, 0xdb, 0x3e, 0x0d, 0x87, 0x86, 0x5a, 0x8e, 0x96, 0xa1, 0xc9, - 0x8f, 0x6c, 0x83, 0x38, 0xb8, 0xdb, 0xe4, 0x47, 0x3b, 0x02, 0x68, 0xd7, 0xa1, 0x9d, 0x5c, 0x86, - 0xe6, 0xa1, 0xbe, 0x8f, 0x87, 0xf2, 0x4c, 0xd8, 0x27, 0x3b, 0xff, 0x03, 0xcb, 0x1b, 0x60, 0xae, - 0xbe, 0xa6, 0x21, 0x06, 0xd7, 0x27, 0xaf, 0xd5, 0xf4, 0x59, 0x98, 0xbe, 0xdd, 0x0f, 0xe8, 0x50, - 0x7f, 0x0b, 0xba, 0x8f, 0x2c, 0x7b, 0x30, 0xe8, 0x3f, 0xe2, 0x5b, 0xdc, 0xd8, 0xc3, 0xf6, 0xbe, - 0x3a, 0xe8, 0x25, 0x68, 0xca, 0x8d, 0xcb, 0xa3, 0xee, 0x18, 0x0d, 0x01, 0xb8, 0xe7, 0xe8, 0xef, - 0xc1, 0xf3, 0x05, 0x0b, 0xe5, 0xa1, 0x5e, 0x80, 0xce, 0xae, 0x15, 0xee, 0x58, 0xbb, 0xd8, 0x0c, - 0x2d, 0xea, 0x12, 0xbe, 0xba, 0x66, 0xb4, 0x25, 0xd0, 0x60, 0x30, 0xfd, 0x73, 0xd0, 0x52, 0x14, - 0x48, 0x3f, 0xb0, 0x6c, 0x5a, 0x85, 0x39, 0x5a, 0x81, 0x56, 0x10, 0x62, 0xcb, 0xf3, 0x88, 0x6d, - 0x51, 0x21, 0x5e, 0xdd, 0x48, 0x82, 0xf4, 0x33, 0xb0, 0x54, 0x48, 0x5c, 0x6c, 0x50, 0xbf, 0x96, - 0xd9, 0x3d, 0xe9, 0xf7, 0xdd, 0x4a, 0xac, 0xf5, 0xe5, 0xdc, 0xae, 0xf9, 0x4a, 0x49, 0xf7, 0xfb, - 0x99, 0x59, 0x0f, 0x5b, 0xfe, 0x20, 0xa8, 0x44, 0x38, 0xbb, 0x63, 0xb5, 0x34, 0xa6, 0x7c, 0x5a, - 0x04, 0x83, 0x0d, 0xe2, 0x79, 0xd8, 0xa6, 0x2e, 0xf1, 0x15, 0xd9, 0xb3, 0x00, 0x76, 0x0c, 0x94, - 0xe7, 0x9f, 0x80, 0xe8, 0x1a, 0x74, 0xf3, 0x4b, 0x25, 0xd9, 0xbf, 0xd6, 0xe0, 0xb9, 0x1b, 0x52, - 0x69, 0x82, 0x71, 0xa5, 0x03, 0x48, 0xb3, 0x9c, 0xcc, 0xb2, 0xcc, 0x1e, 0x50, 0x3d, 0x77, 0x40, - 0x0c, 0x23, 0xc4, 0x81, 0xe7, 0xda, 0x16, 0x27, 0x31, 0x25, 0x7c, 0x37, 0x01, 0x62, 0xf6, 0x4c, - 0xa9, 0x27, 0x3d, 0x92, 0x7d, 0xa2, 0x75, 0x58, 0xec, 0xe3, 0x3e, 0x09, 0x87, 0x66, 0xdf, 0x0a, - 0xcc, 0xbe, 0xf5, 0xc4, 0x64, 0xc1, 0xcb, 0xec, 0xef, 0x70, 0xf7, 0xec, 0x18, 0x48, 0xcc, 0x6e, - 0x5a, 0xc1, 0xa6, 0xf5, 0x64, 0xdb, 0x7d, 0x8a, 0x37, 0x77, 0xf4, 0x2e, 0x2c, 0x66, 0xe5, 0x93, - 0xa2, 0x7f, 0x0f, 0x4e, 0x0b, 0xc8, 0xf6, 0xd0, 0xb7, 0xb7, 0x79, 0xc4, 0xac, 0x74, 0x50, 0xff, - 0xae, 0x41, 0x37, 0xbf, 0x50, 0x5a, 0xfe, 0xb3, 0x6a, 0xed, 0xc8, 0x3a, 0x39, 0x07, 0x2d, 0x6a, - 0xb9, 0x9e, 0x49, 0x7a, 0xbd, 0x08, 0x53, 0xae, 0x88, 0x29, 0x03, 0x18, 0xe8, 0x01, 0x87, 0xa0, - 0x4b, 0x30, 0x6f, 0x0b, 0xeb, 0x37, 0x43, 0x7c, 0xe0, 0xf2, 0x18, 0x3f, 0xcb, 0x37, 0x36, 0x67, - 0x2b, 0xaf, 0x10, 0x60, 0xa4, 0x43, 0xc7, 0x75, 0x9e, 0x98, 0x3c, 0xba, 0xf3, 0x2b, 0xa2, 0xc1, - 0xa9, 0xb5, 0x5c, 0xe7, 0x09, 0x0b, 0x58, 0x4c, 0xa3, 0xfa, 0x23, 0x58, 0x16, 0xc2, 0xdf, 0xf3, - 0xed, 0x10, 0xf7, 0xb1, 0x4f, 0x2d, 0x6f, 0x83, 0x04, 0xc3, 0x4a, 0x66, 0xf3, 0x3c, 0x34, 0x22, - 0xd7, 0xb7, 0xb1, 0xe9, 0x8b, 0xab, 0x6a, 0xca, 0x98, 0xe5, 0xe3, 0xad, 0x48, 0xbf, 0x09, 0x67, - 0x4a, 0xe8, 0x4a, 0xcd, 0x9e, 0x87, 0x36, 0xdf, 0x98, 0x0c, 0xef, 0xf2, 0xc2, 0x68, 0x31, 0xd8, - 0x86, 0x00, 0xe9, 0xaf, 0x01, 0x12, 0x34, 0x36, 0xc9, 0xc0, 0xaf, 0xe6, 0xce, 0xcf, 0xc1, 0x42, - 0x6a, 0x89, 0xb4, 0x8d, 0xd7, 0xe1, 0x94, 0x00, 0x7f, 0xe2, 0xf7, 0x2b, 0xd3, 0x3a, 0x0d, 0xcf, - 0x65, 0x16, 0x49, 0x6a, 0xeb, 0x8a, 0x49, 0x3a, 0x99, 0x38, 0x94, 0xd8, 0xa2, 0xda, 0x41, 0x3a, - 0x9f, 0xe0, 0x91, 0x4b, 0x6c, 0xd8, 0x0a, 0xf7, 0x0d, 0x6c, 0x39, 0xc4, 0xf7, 0x86, 0x95, 0x23, - 0x57, 0xc1, 0x4a, 0x49, 0xf7, 0x53, 0x58, 0x54, 0x11, 0xcd, 0xef, 0xb9, 0xbb, 0x83, 0x10, 0x57, - 0x8d, 0xc4, 0x49, 0x93, 0x9d, 0xcc, 0x99, 0xac, 0xbe, 0xa6, 0xdc, 0x2c, 0x41, 0x58, 0x1e, 0x69, - 0x9c, 0x9f, 0xd4, 0x12, 0xf9, 0x89, 0xfe, 0xc7, 0x1a, 0x9c, 0x54, 0x2b, 0x2a, 0xda, 0xd5, 0x11, - 0x1d, 0xab, 0x5e, 0xea, 0x58, 0x53, 0x23, 0xc7, 0xba, 0x08, 0xf3, 0x11, 0x19, 0x84, 0x36, 0x36, - 0x59, 0x4e, 0x62, 0xfa, 0xec, 0x0e, 0x16, 0x7e, 0x77, 0x42, 0xc0, 0x6f, 0x59, 0xd4, 0xda, 0x22, - 0x0e, 0xd6, 0x7f, 0xa8, 0xcc, 0x2e, 0x65, 0xaf, 0x97, 0xe0, 0x24, 0x4f, 0x3d, 0xac, 0x20, 0xc0, - 0xbe, 0x63, 0x5a, 0x94, 0x19, 0x7d, 0x8d, 0x1b, 0xfd, 0x09, 0x36, 0x71, 0x83, 0xc3, 0x6f, 0xd0, - 0xad, 0x48, 0xff, 0xed, 0x24, 0xcc, 0xb1, 0xb5, 0xcc, 0xc9, 0x2a, 0xc9, 0x3b, 0x0f, 0x75, 0xfc, - 0x84, 0x4a, 0x41, 0xd9, 0x27, 0x5a, 0x83, 0x05, 0xe9, 0xcd, 0x2e, 0xf1, 0x47, 0x8e, 0x5e, 0x17, - 0x71, 0x71, 0x34, 0x15, 0xfb, 0xfa, 0x39, 0x68, 0x45, 0x94, 0x04, 0x2a, 0x6e, 0x88, 0xbc, 0x08, - 0x18, 0x48, 0xc6, 0x8d, 0xb4, 0x4e, 0xa7, 0x0b, 0x74, 0xda, 0x76, 0x23, 0x13, 0xdb, 0xa6, 0xd8, - 0x15, 0x8f, 0x3c, 0x0d, 0x03, 0xdc, 0xe8, 0xb6, 0x2d, 0xb4, 0x81, 0xde, 0x85, 0x65, 0x77, 0xd7, - 0x27, 0x21, 0x36, 0xa5, 0x22, 0xb9, 0xff, 0xfa, 0x84, 0x9a, 0x3d, 0x32, 0xf0, 0x55, 0xe6, 0xd4, - 0x15, 0x38, 0xdb, 0x1c, 0x85, 0x69, 0x60, 0x8b, 0xd0, 0x3b, 0x6c, 0x5e, 0x7f, 0x13, 0xe6, 0x47, - 0x5a, 0xa9, 0x1e, 0x05, 0xbe, 0xa9, 0x29, 0x8b, 0x7b, 0x68, 0xb9, 0xde, 0x36, 0xf6, 0x1d, 0x1c, - 0x3e, 0x63, 0x74, 0x42, 0x57, 0xe1, 0x94, 0xeb, 0x78, 0xd8, 0xa4, 0x6e, 0x1f, 0x93, 0x01, 0x35, - 0x23, 0x6c, 0x13, 0xdf, 0x89, 0x94, 0x7e, 0xd9, 0xdc, 0x43, 0x31, 0xb5, 0x2d, 0x66, 0xf4, 0x9f, - 0xc5, 0xb7, 0x44, 0x72, 0x17, 0xa3, 0xfc, 0xc8, 0xc7, 0x98, 0x11, 0x14, 0xa9, 0x9e, 0x14, 0xa3, - 0x2d, 0x80, 0x22, 0xab, 0x63, 0x27, 0x24, 0x91, 0x76, 0x88, 0x33, 0xe4, 0x3b, 0x6a, 0x1b, 0x20, - 0x40, 0x37, 0x89, 0x33, 0xe4, 0xe1, 0x3a, 0x32, 0xb9, 0x91, 0xd9, 0x7b, 0x03, 0x7f, 0x9f, 0xef, - 0xa6, 0x61, 0xb4, 0xdc, 0xe8, 0xbe, 0x15, 0xd1, 0x0d, 0x06, 0xd2, 0xff, 0x54, 0x53, 0xf1, 0x82, - 0x6d, 0xc3, 0xc0, 0x36, 0x76, 0x0f, 0xfe, 0x07, 0xea, 0x60, 0x2b, 0xa4, 0x11, 0xa4, 0x72, 0x61, - 0xe9, 0x70, 0x48, 0xcc, 0xc9, 0x5b, 0x95, 0xcf, 0x8c, 0xc2, 0x55, 0x7a, 0xe3, 0x32, 0x5c, 0x7d, - 0xa1, 0xae, 0x8b, 0xdb, 0xf6, 0xf6, 0x9e, 0x15, 0x3a, 0xd1, 0x5d, 0xec, 0xe3, 0xd0, 0xa2, 0xc7, - 0x92, 0xbe, 0xe8, 0x2b, 0x70, 0xb6, 0x8c, 0xba, 0xe4, 0xff, 0xb9, 0xba, 0x06, 0x15, 0x86, 0x81, - 0x77, 0x06, 0xae, 0xe7, 0x1c, 0x0b, 0xfb, 0x0f, 0xb3, 0xc2, 0xc5, 0xc4, 0xa5, 0xfd, 0x5c, 0x86, - 0x93, 0x21, 0x07, 0x51, 0x33, 0x62, 0x08, 0xf1, 0x7b, 0xb4, 0x63, 0xcc, 0xc9, 0x09, 0xbe, 0x90, - 0xbd, 0x4b, 0x7f, 0x39, 0xa9, 0x2c, 0x40, 0x51, 0x3b, 0xb6, 0xb0, 0xba, 0x04, 0xcd, 0x11, 0xfb, - 0x3a, 0x67, 0xdf, 0x88, 0x24, 0x5f, 0x66, 0x9d, 0x36, 0x09, 0x86, 0x26, 0xb6, 0x45, 0x46, 0xc1, - 0x8f, 0xba, 0xc1, 0x9e, 0x67, 0xc1, 0xf0, 0xb6, 0xcd, 0x13, 0x8a, 0xea, 0x31, 0x36, 0x41, 0xed, - 0x4b, 0x41, 0x6d, 0x26, 0x49, 0xed, 0x4b, 0x4e, 0x4d, 0xe1, 0x1c, 0xb8, 0x3d, 0x81, 0x33, 0x3b, - 0xc2, 0x79, 0xe4, 0xf6, 0x18, 0xce, 0xc8, 0xaa, 0xd2, 0xca, 0x90, 0xa7, 0xfa, 0x15, 0x2c, 0xa5, - 0x67, 0xab, 0x5f, 0xd8, 0xcf, 0xa4, 0x2c, 0xfd, 0x6c, 0xd6, 0x9c, 0x32, 0xb7, 0xfe, 0x41, 0x76, - 0xdb, 0x95, 0x33, 0x9c, 0x67, 0xdb, 0xd7, 0x99, 0xac, 0x42, 0xd2, 0x69, 0xd2, 0x67, 0xd9, 0x6d, - 0x1f, 0x21, 0x5d, 0x3a, 0x9c, 0xf1, 0xb9, 0xac, 0x0b, 0x64, 0x73, 0xaa, 0xdf, 0xc5, 0xf1, 0x55, - 0x62, 0xb0, 0x8c, 0xa6, 0x72, 0x5c, 0x93, 0x7c, 0x65, 0x5d, 0x61, 0x56, 0xb2, 0x45, 0x8b, 0x30, - 0x23, 0xef, 0x43, 0xf1, 0x62, 0x91, 0xa3, 0x54, 0xc9, 0xa4, 0x2e, 0x4b, 0x26, 0xaa, 0x14, 0xc4, - 0xde, 0xdc, 0xd3, 0x22, 0x3c, 0xb2, 0xf1, 0x87, 0x78, 0xa8, 0x6f, 0x65, 0x3c, 0x4e, 0x6c, 0xed, - 0x90, 0x82, 0x87, 0xa8, 0x28, 0x38, 0xfc, 0xcc, 0x1d, 0x59, 0x38, 0x69, 0xba, 0xd2, 0x08, 0x1c, - 0xfd, 0x57, 0xb5, 0x11, 0xc1, 0x9b, 0x1e, 0xd9, 0x39, 0x46, 0xab, 0x4c, 0x4a, 0x51, 0x4f, 0x49, - 0x91, 0xac, 0x09, 0x4d, 0xa5, 0x6b, 0x42, 0x09, 0x27, 0x4a, 0x6e, 0xa7, 0x2c, 0x34, 0x3f, 0x24, - 0xc7, 0xf7, 0xb2, 0xcc, 0x87, 0xe6, 0x11, 0x75, 0xc9, 0xff, 0x3a, 0x2c, 0x31, 0x85, 0x0b, 0x28, - 0x7f, 0xb7, 0x54, 0x7f, 0xdb, 0xfd, 0x7d, 0x12, 0x96, 0x8b, 0x17, 0x57, 0x79, 0xdf, 0xbd, 0x0d, - 0x5a, 0xfc, 0x7e, 0x62, 0x57, 0x63, 0x44, 0xad, 0x7e, 0x10, 0x5f, 0x8e, 0xe2, 0x0e, 0x3d, 0x2d, - 0x1f, 0x53, 0x0f, 0xd5, 0xbc, 0xba, 0x21, 0x73, 0x8f, 0xaf, 0x7a, 0xee, 0xf1, 0xc5, 0x18, 0x38, - 0x16, 0x2d, 0x63, 0x20, 0x72, 0xb8, 0xd3, 0x8e, 0x45, 0xcb, 0x18, 0xc4, 0x8b, 0x39, 0x03, 0x61, - 0xb5, 0x2d, 0x89, 0xcf, 0x19, 0x9c, 0x01, 0x90, 0xe9, 0xd5, 0xc0, 0x57, 0x8f, 0xc9, 0xa6, 0x48, - 0xae, 0x06, 0x7e, 0x69, 0x96, 0x39, 0x5b, 0x9a, 0x65, 0xa6, 0x4f, 0xb3, 0x91, 0x3b, 0xcd, 0xcf, - 0x00, 0x6e, 0xb9, 0xd1, 0xbe, 0x50, 0x32, 0x4b, 0x6b, 0x1d, 0x57, 0xbd, 0x06, 0xd8, 0x27, 0x83, - 0x58, 0x9e, 0x27, 0x55, 0xc7, 0x3e, 0x99, 0xfb, 0x0c, 0x22, 0xec, 0x48, 0xed, 0xf0, 0x6f, 0x06, - 0xeb, 0x85, 0x18, 0x4b, 0x05, 0xf0, 0x6f, 0xfd, 0x0f, 0x35, 0x68, 0x6e, 0xe2, 0xbe, 0xa4, 0x7c, - 0x16, 0x60, 0x97, 0x84, 0x64, 0x40, 0x5d, 0x1f, 0x8b, 0x2c, 0x7c, 0xda, 0x48, 0x40, 0xbe, 0x3b, - 0x1f, 0x1e, 0x1a, 0xb0, 0xd7, 0x93, 0xca, 0xe4, 0xdf, 0x0c, 0xb6, 0x87, 0xad, 0x40, 0xea, 0x8f, - 0x7f, 0xb3, 0xb7, 0x4e, 0x44, 0x2d, 0x7b, 0x9f, 0x2b, 0x6b, 0xca, 0x10, 0x03, 0xfd, 0x2f, 0x35, - 0x00, 0x03, 0xf7, 0x09, 0xe5, 0xb6, 0xc6, 0xb2, 0xdb, 0x1d, 0xcb, 0xde, 0x67, 0xef, 0x05, 0x5e, - 0xd1, 0x14, 0x9a, 0x68, 0x49, 0x18, 0xaf, 0x68, 0x9e, 0x01, 0x50, 0x28, 0x32, 0x7e, 0x35, 0x8d, - 0xa6, 0x84, 0x88, 0x97, 0x81, 0x72, 0x65, 0x59, 0x04, 0x1c, 0xc5, 0x34, 0xb1, 0x6d, 0x15, 0xd3, - 0x96, 0xa0, 0x99, 0x35, 0x05, 0x1e, 0x0a, 0xb8, 0x1d, 0x5c, 0x80, 0x8e, 0x2a, 0x99, 0x72, 0x43, - 0x93, 0xa2, 0xb4, 0x15, 0x90, 0x19, 0x17, 0x2f, 0x4f, 0x3e, 0xa1, 0xd8, 0x8f, 0x6d, 0xa0, 0x69, - 0x8c, 0x00, 0xfa, 0xd7, 0x00, 0xea, 0x41, 0xdf, 0x23, 0x68, 0x1d, 0xa6, 0x19, 0x71, 0x55, 0x04, - 0x5f, 0xce, 0x97, 0x44, 0x47, 0x6a, 0x30, 0x04, 0x6a, 0x32, 0x00, 0x4d, 0xa6, 0x02, 0xd0, 0xf8, - 0xf7, 0x9c, 0xfe, 0x6d, 0x0d, 0x56, 0x64, 0xfa, 0xe8, 0xe2, 0x70, 0x93, 0x1c, 0xb0, 0x54, 0xe2, - 0x21, 0x11, 0x4c, 0x8e, 0x25, 0x72, 0x5e, 0x83, 0xae, 0x83, 0x23, 0xea, 0xfa, 0x9c, 0xa1, 0xa9, - 0x0e, 0x85, 0x57, 0x91, 0xc5, 0x86, 0x16, 0x13, 0xf3, 0x37, 0xc5, 0xf4, 0x96, 0xd5, 0xc7, 0xe8, - 0x0a, 0x2c, 0xec, 0x63, 0x1c, 0x98, 0x1e, 0xb1, 0x2d, 0xcf, 0x54, 0x3e, 0x29, 0xf3, 0xa3, 0x79, - 0x36, 0x75, 0x9f, 0xcd, 0xdc, 0x12, 0x7e, 0xa9, 0x47, 0x70, 0xfe, 0x10, 0x49, 0x64, 0x5c, 0x5a, - 0x86, 0x66, 0x10, 0x12, 0x1b, 0x47, 0xcc, 0x66, 0x6b, 0xfc, 0x9a, 0x1a, 0x01, 0xd0, 0x55, 0x58, - 0x88, 0x07, 0x1f, 0xe1, 0xd0, 0xc6, 0x3e, 0xb5, 0x76, 0x45, 0xdd, 0x74, 0xd2, 0x28, 0x9a, 0xd2, - 0x7f, 0x53, 0x03, 0x3d, 0xc7, 0xf5, 0x4e, 0x48, 0xfa, 0xc7, 0xa8, 0xc1, 0x35, 0x38, 0xc5, 0xf5, - 0x10, 0x72, 0x92, 0x23, 0x45, 0x88, 0x67, 0xcc, 0x49, 0x36, 0x27, 0xb8, 0x29, 0x4d, 0x0c, 0xe0, - 0xc2, 0xa1, 0x7b, 0xfa, 0x2f, 0xe9, 0x62, 0x49, 0xdd, 0xbe, 0xe2, 0x65, 0x92, 0xba, 0x4e, 0xf4, - 0xdf, 0xd7, 0xd4, 0x65, 0x98, 0x9e, 0x95, 0x7b, 0xb9, 0x01, 0x1d, 0xc7, 0x8d, 0xf6, 0x4d, 0xd1, - 0x91, 0x39, 0xcc, 0xfe, 0x47, 0x71, 0xd0, 0x68, 0x3b, 0xf1, 0x37, 0x8e, 0xd0, 0x7b, 0xd0, 0x91, - 0x55, 0xcf, 0x44, 0x93, 0xa7, 0xb5, 0xbe, 0x94, 0x27, 0x11, 0xc7, 0x3b, 0xa3, 0x2d, 0x56, 0x88, - 0x91, 0xfe, 0xaf, 0x36, 0xb4, 0x3f, 0x1e, 0xe0, 0x70, 0x98, 0xa8, 0x18, 0x47, 0x58, 0x1e, 0x83, - 0x6a, 0x64, 0x25, 0x20, 0xec, 0xaa, 0xe8, 0x85, 0xa4, 0x6f, 0xc6, 0xbd, 0xae, 0x49, 0x8e, 0xd2, - 0x62, 0xc0, 0x3b, 0xa2, 0xdf, 0x85, 0xde, 0x81, 0x99, 0x9e, 0xeb, 0x51, 0x2c, 0xba, 0x4b, 0xad, - 0xf5, 0x17, 0xf3, 0xfb, 0x49, 0xf2, 0x5c, 0xbd, 0xc3, 0x91, 0x0d, 0xb9, 0x08, 0xed, 0xc0, 0x82, - 0xeb, 0x07, 0xfc, 0xed, 0x18, 0xba, 0x96, 0xe7, 0x3e, 0x1d, 0xd5, 0x3c, 0x5b, 0xeb, 0xaf, 0x8d, - 0xa1, 0x75, 0x8f, 0xad, 0xdc, 0x4e, 0x2e, 0x34, 0x90, 0x9b, 0x83, 0x21, 0x0c, 0xa7, 0xc8, 0x80, - 0xe6, 0x99, 0x4c, 0x73, 0x26, 0xeb, 0x63, 0x98, 0x3c, 0xe0, 0x4b, 0xd3, 0x5c, 0x16, 0x48, 0x1e, - 0xa8, 0x6d, 0xc1, 0x8c, 0x10, 0x8e, 0x05, 0xf9, 0x9e, 0x8b, 0x3d, 0xd5, 0x18, 0x13, 0x03, 0x16, - 0xc7, 0x48, 0x80, 0x43, 0xcb, 0x57, 0xf1, 0x5a, 0x0d, 0x47, 0x0d, 0x9a, 0x7a, 0xa2, 0x41, 0xa3, - 0xfd, 0x79, 0x1a, 0x50, 0x5e, 0x42, 0x55, 0xc8, 0x0d, 0x71, 0xc4, 0x62, 0x60, 0xf2, 0x82, 0x98, - 0x4b, 0xc0, 0xf9, 0x25, 0xf1, 0x29, 0x34, 0xed, 0xe8, 0xc0, 0xe4, 0x2a, 0x91, 0xe6, 0x72, 0xfd, - 0xc8, 0x2a, 0x5d, 0xdd, 0xd8, 0x7e, 0xc4, 0xa1, 0x46, 0xc3, 0x8e, 0x0e, 0xf8, 0x17, 0xfa, 0x31, - 0xc0, 0x97, 0x11, 0xf1, 0x25, 0x65, 0x71, 0xf0, 0x6f, 0x1f, 0x9d, 0xf2, 0x07, 0xdb, 0x0f, 0xb6, - 0x04, 0xe9, 0x26, 0x23, 0x27, 0x68, 0xdb, 0xd0, 0x09, 0xac, 0xf0, 0xf1, 0x00, 0x53, 0x49, 0x5e, - 0xd8, 0xc2, 0xbb, 0x47, 0x27, 0xff, 0x91, 0x20, 0x23, 0x38, 0xb4, 0x83, 0xc4, 0x48, 0xfb, 0x76, - 0x12, 0x1a, 0x4a, 0x2e, 0xf6, 0xfc, 0xe4, 0x16, 0x2e, 0x8a, 0x30, 0xa6, 0xeb, 0xf7, 0x88, 0xd4, - 0xe8, 0x09, 0x06, 0x17, 0x75, 0x18, 0x7e, 0x7d, 0x5d, 0x82, 0xf9, 0x10, 0xdb, 0x24, 0x74, 0x58, - 0x92, 0xee, 0xf6, 0x5d, 0x66, 0xf6, 0xe2, 0x2c, 0xe7, 0x04, 0xfc, 0x96, 0x02, 0xa3, 0x97, 0x61, - 0x8e, 0x1f, 0x7b, 0x02, 0xb3, 0xae, 0x68, 0x62, 0x2f, 0x81, 0x78, 0x09, 0xe6, 0x1f, 0x0f, 0x58, - 0xe0, 0xb3, 0xf7, 0xac, 0xd0, 0xb2, 0x29, 0x89, 0xcb, 0x21, 0x73, 0x1c, 0xbe, 0x11, 0x83, 0xd1, - 0x1b, 0xb0, 0x28, 0x50, 0x71, 0x64, 0x5b, 0x41, 0xbc, 0x02, 0x87, 0xf2, 0xb5, 0x7c, 0x8a, 0xcf, - 0xde, 0xe6, 0x93, 0x1b, 0x6a, 0x0e, 0x69, 0xd0, 0xb0, 0x49, 0xbf, 0x8f, 0x7d, 0x1a, 0xc9, 0xfe, - 0x65, 0x3c, 0x46, 0x37, 0xe0, 0x8c, 0xe5, 0x79, 0xe4, 0x2b, 0x93, 0xaf, 0x74, 0xcc, 0x9c, 0x74, - 0xe2, 0xed, 0xac, 0x71, 0xa4, 0x8f, 0x39, 0x8e, 0x91, 0x16, 0x54, 0x3b, 0x07, 0xcd, 0xf8, 0x1c, - 0x59, 0xca, 0x93, 0x30, 0x48, 0xfe, 0xad, 0x9d, 0x80, 0x76, 0xf2, 0x24, 0xb4, 0x7f, 0xd4, 0x61, - 0xa1, 0xc0, 0xa9, 0xd0, 0xe7, 0x00, 0xcc, 0x5a, 0x85, 0x6b, 0x49, 0x73, 0xfd, 0xc1, 0xd1, 0x9d, - 0x93, 0xd9, 0xab, 0x00, 0x1b, 0xcc, 0xfa, 0xc5, 0x27, 0xfa, 0x09, 0xb4, 0xb8, 0xc5, 0x4a, 0xea, - 0xc2, 0x64, 0xdf, 0xf9, 0x0e, 0xd4, 0x99, 0xac, 0x92, 0x3c, 0xf7, 0x01, 0xf1, 0xad, 0xfd, 0xad, - 0x06, 0xcd, 0x98, 0x31, 0x4b, 0xe0, 0xc4, 0x41, 0xf1, 0xb3, 0x8e, 0x54, 0x02, 0xc7, 0x61, 0x77, - 0x38, 0xe8, 0xff, 0xd2, 0x94, 0xb4, 0xb7, 0x00, 0x46, 0xf2, 0x17, 0x8a, 0x50, 0x2b, 0x14, 0x41, - 0xbf, 0x04, 0x1d, 0xa6, 0x59, 0x17, 0x3b, 0xdb, 0x34, 0x74, 0x03, 0xfe, 0x4b, 0x03, 0x81, 0x13, - 0xc9, 0x17, 0xb0, 0x1a, 0xae, 0xff, 0x73, 0x19, 0xda, 0xc9, 0x9b, 0x14, 0x7d, 0x01, 0xad, 0xc4, - 0x2f, 0x2a, 0xd0, 0x0b, 0xf9, 0x43, 0xcb, 0xff, 0x42, 0x43, 0x7b, 0x71, 0x0c, 0x96, 0x7c, 0x24, - 0x4e, 0x20, 0x03, 0x66, 0x65, 0x17, 0x1e, 0xad, 0x1c, 0xd2, 0xa0, 0x17, 0x54, 0xcf, 0x8f, 0x6d, - 0xe1, 0xeb, 0x13, 0x57, 0x6b, 0xc8, 0x87, 0x93, 0xb9, 0xa6, 0x38, 0xba, 0x9c, 0x5f, 0x5b, 0xd6, - 0x72, 0xd7, 0x5e, 0xa9, 0x84, 0x1b, 0xcb, 0x40, 0x61, 0xa1, 0xa0, 0xcb, 0x8d, 0x5e, 0x1d, 0x43, - 0x25, 0xd5, 0x69, 0xd7, 0xae, 0x54, 0xc4, 0x8e, 0xb9, 0x3e, 0x06, 0x94, 0x6f, 0x81, 0xa3, 0x57, - 0xc6, 0x92, 0x19, 0xb5, 0xd8, 0xb5, 0x57, 0xab, 0x21, 0x97, 0x0a, 0x2a, 0x9a, 0xe3, 0x63, 0x05, - 0x4d, 0xb5, 0xdf, 0xc7, 0x0a, 0x9a, 0xe9, 0xb8, 0x4f, 0xa0, 0x7d, 0x98, 0xcf, 0x36, 0xce, 0xd1, - 0xa5, 0xb2, 0x9f, 0xef, 0xe4, 0xfa, 0xf2, 0xda, 0xe5, 0x2a, 0xa8, 0x31, 0x33, 0x0c, 0x27, 0xd2, - 0x8d, 0x6a, 0xf4, 0x72, 0x7e, 0x7d, 0x61, 0xab, 0x5e, 0xbb, 0x38, 0x1e, 0x31, 0x29, 0x53, 0xb6, - 0x79, 0x5d, 0x24, 0x53, 0x49, 0x67, 0xbc, 0x48, 0xa6, 0xb2, 0x5e, 0xb8, 0x3e, 0x81, 0xbe, 0x56, - 0x1d, 0xd1, 0x4c, 0x53, 0x17, 0xad, 0x96, 0x91, 0x29, 0xee, 0x2a, 0x6b, 0x6b, 0x95, 0xf1, 0x13, - 0xde, 0xf8, 0x05, 0xb4, 0x12, 0xbd, 0xdd, 0xa2, 0xf8, 0x91, 0xef, 0x16, 0x17, 0xc5, 0x8f, 0xa2, - 0x06, 0xf1, 0x04, 0xda, 0x81, 0x4e, 0xaa, 0xdb, 0x8b, 0x5e, 0x2a, 0x5b, 0x99, 0x2e, 0x8a, 0x6a, - 0x2f, 0x8f, 0xc5, 0x8b, 0x79, 0x98, 0x2a, 0x22, 0xca, 0x10, 0x58, 0xba, 0xb9, 0x74, 0x0c, 0x7c, - 0x69, 0x1c, 0x5a, 0xca, 0x95, 0x73, 0x3d, 0xe1, 0x42, 0x57, 0x2e, 0xeb, 0x39, 0x17, 0xba, 0x72, - 0x79, 0x9b, 0x79, 0x02, 0xed, 0xc1, 0x5c, 0xa6, 0x1f, 0x8c, 0x2e, 0x96, 0x91, 0xc8, 0xf6, 0xa2, - 0xb5, 0x4b, 0x15, 0x30, 0x63, 0x4e, 0x3f, 0x52, 0x15, 0x08, 0x6e, 0x72, 0x17, 0xca, 0x97, 0x8e, - 0xec, 0xec, 0x85, 0xc3, 0x91, 0x62, 0xd2, 0x5f, 0xc1, 0xa9, 0xa2, 0x32, 0x21, 0xba, 0x52, 0x54, - 0xd7, 0x28, 0xad, 0x45, 0x6a, 0xab, 0x55, 0xd1, 0x63, 0xc6, 0x9f, 0x40, 0x43, 0xf5, 0x44, 0x51, - 0xc1, 0xa5, 0x94, 0xe9, 0x22, 0x6b, 0xfa, 0x61, 0x28, 0x09, 0x57, 0xe9, 0xab, 0xa8, 0x30, 0x6a, - 0x56, 0x96, 0x47, 0x85, 0x5c, 0x5b, 0xb5, 0x3c, 0x2a, 0xe4, 0x7b, 0x9f, 0x9c, 0x5d, 0x6c, 0x76, - 0xc9, 0xde, 0x5e, 0xb9, 0xd9, 0x15, 0xb4, 0x2e, 0xcb, 0xcd, 0xae, 0xb0, 0x5d, 0x38, 0x81, 0x7e, - 0xaa, 0x7e, 0xdf, 0x90, 0x6d, 0xe9, 0xa1, 0xd2, 0xd8, 0x52, 0xd2, 0x5a, 0xd4, 0xae, 0x56, 0x5f, - 0x10, 0xb3, 0x7f, 0xaa, 0x22, 0x61, 0xa6, 0xa5, 0x57, 0x1e, 0x09, 0x8b, 0x1b, 0x8b, 0xda, 0x5a, - 0x65, 0xfc, 0xbc, 0x93, 0x27, 0x7b, 0x5e, 0xe5, 0xda, 0x2e, 0x68, 0x13, 0x96, 0x6b, 0xbb, 0xb0, - 0x8d, 0xc6, 0xfd, 0xa3, 0xa8, 0x9f, 0x55, 0xe4, 0x1f, 0x87, 0x34, 0xdc, 0xb4, 0xd5, 0xaa, 0xe8, - 0xa9, 0x44, 0x21, 0xdf, 0xb0, 0x42, 0x63, 0xf7, 0x9f, 0xba, 0x03, 0xae, 0x54, 0xc4, 0x2e, 0x3f, - 0x5d, 0x75, 0x27, 0x8c, 0x15, 0x20, 0x73, 0x37, 0xac, 0x55, 0xc6, 0x8f, 0x79, 0x07, 0xea, 0xd7, - 0x32, 0x89, 0x66, 0x13, 0xba, 0x3c, 0x86, 0x4e, 0xa2, 0x59, 0xa6, 0xbd, 0x52, 0x09, 0xb7, 0xc8, - 0x7b, 0x93, 0xed, 0x9f, 0xc3, 0xec, 0x29, 0xd7, 0xb3, 0x3a, 0xcc, 0x9e, 0x0a, 0x3a, 0x4a, 0x05, - 0xde, 0xab, 0xba, 0x3e, 0xe3, 0xbd, 0x37, 0xd3, 0x7d, 0x1a, 0xef, 0xbd, 0xb9, 0x86, 0xd2, 0x04, - 0xfa, 0xc5, 0xe8, 0x57, 0x14, 0xf9, 0x1a, 0x2c, 0x5a, 0x2f, 0x0d, 0x45, 0xa5, 0xa5, 0x67, 0xed, - 0xf5, 0x23, 0xad, 0x49, 0x28, 0xff, 0xd7, 0x35, 0xd5, 0x92, 0x2d, 0x2c, 0x82, 0xa2, 0x37, 0x2a, - 0x10, 0xce, 0xd5, 0x71, 0xb5, 0x37, 0x8f, 0xb8, 0xaa, 0xc8, 0x1a, 0x92, 0xf5, 0xcf, 0x72, 0x6b, - 0x28, 0xa8, 0xa1, 0x96, 0x5b, 0x43, 0x51, 0x49, 0x55, 0x9f, 0x40, 0xf7, 0x61, 0x9a, 0x3f, 0xd7, - 0xd1, 0xd9, 0xc3, 0xdf, 0xf1, 0xda, 0xb9, 0xe2, 0xf9, 0xf8, 0x35, 0xca, 0x04, 0xd8, 0x99, 0xe1, - 0x7f, 0x01, 0x78, 0xfd, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x74, 0xac, 0x01, 0xab, 0x19, 0x30, - 0x00, 0x00, + // 3280 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x3b, 0x4d, 0x6f, 0x1c, 0xc7, + 0xb1, 0x1c, 0x2e, 0x3f, 0x76, 0x6b, 0x77, 0x45, 0xaa, 0x49, 0x53, 0xeb, 0x21, 0x29, 0x51, 0x23, + 0x7f, 0x90, 0xb2, 0x45, 0xca, 0xb4, 0xfd, 0xac, 0x27, 0x3f, 0xfb, 0x59, 0xa2, 0x44, 0x59, 0xb6, + 0x48, 0xd9, 0x43, 0x59, 0x7e, 0x2f, 0x36, 0x32, 0x18, 0xce, 0xf4, 0x92, 0x63, 0xce, 0xce, 0x8c, + 0x66, 0x7a, 0x69, 0xad, 0xe0, 0x9c, 0x1c, 0x20, 0x01, 0x82, 0xe4, 0x10, 0xe4, 0x92, 0x4b, 0x80, + 0x20, 0xf7, 0x5c, 0xf3, 0x17, 0xfc, 0x07, 0x02, 0xe4, 0x94, 0x4b, 0xce, 0x39, 0xe4, 0x10, 0x20, + 0x40, 0x2e, 0x41, 0x7f, 0xcd, 0xce, 0x27, 0x77, 0x18, 0x31, 0x08, 0x72, 0x9b, 0xae, 0xae, 0xae, + 0xea, 0xaa, 0xae, 0xaa, 0xae, 0xae, 0xda, 0x85, 0xb9, 0x63, 0xdf, 0xed, 0xf7, 0xb0, 0x11, 0xe1, + 0xf0, 0x18, 0x87, 0xeb, 0x41, 0xe8, 0x13, 0x1f, 0xcd, 0xa6, 0x80, 0x46, 0xb0, 0xaf, 0x6d, 0x00, + 0xba, 0x6d, 0x12, 0xeb, 0xf0, 0x0e, 0x76, 0x31, 0xc1, 0x3a, 0x7e, 0xd2, 0xc7, 0x11, 0x41, 0x2f, + 0x42, 0xbd, 0xeb, 0xb8, 0xd8, 0x70, 0xec, 0xa8, 0xa3, 0xac, 0xd4, 0x56, 0x1b, 0xfa, 0x34, 0x1d, + 0xdf, 0xb7, 0x23, 0xed, 0x21, 0xcc, 0xa5, 0x16, 0x44, 0x81, 0xef, 0x45, 0x18, 0xdd, 0x80, 0xe9, + 0x10, 0x47, 0x7d, 0x97, 0xf0, 0x05, 0xcd, 0xcd, 0x8b, 0xeb, 0x59, 0x5e, 0xeb, 0xf1, 0x92, 0xbe, + 0x4b, 0x74, 0x89, 0xae, 0x7d, 0xab, 0x40, 0x2b, 0x39, 0x83, 0x2e, 0xc0, 0xb4, 0x60, 0xde, 0x51, + 0x56, 0x94, 0xd5, 0x86, 0x3e, 0xc5, 0x79, 0xa3, 0x05, 0x98, 0x8a, 0x88, 0x49, 0xfa, 0x51, 0x67, + 0x7c, 0x45, 0x59, 0x9d, 0xd4, 0xc5, 0x08, 0xcd, 0xc3, 0x24, 0x0e, 0x43, 0x3f, 0xec, 0xd4, 0x18, + 0x3a, 0x1f, 0x20, 0x04, 0x13, 0x91, 0xf3, 0x0c, 0x77, 0x26, 0x56, 0x94, 0xd5, 0xb6, 0xce, 0xbe, + 0x51, 0x07, 0xa6, 0x8f, 0x71, 0x18, 0x39, 0xbe, 0xd7, 0x99, 0x64, 0x60, 0x39, 0xd4, 0x3e, 0x82, + 0x73, 0xdb, 0x8e, 0x8b, 0xef, 0x61, 0x22, 0x75, 0x50, 0xba, 0x8d, 0x4b, 0xd0, 0x34, 0x2d, 0x0b, + 0x07, 0xc4, 0x38, 0x78, 0xe6, 0x04, 0x6c, 0x2f, 0x75, 0x1d, 0x38, 0xe8, 0xde, 0x33, 0x27, 0xd0, + 0x7e, 0x54, 0x83, 0x99, 0x98, 0x98, 0xd0, 0x0f, 0x82, 0x09, 0xdb, 0x24, 0x26, 0x23, 0xd5, 0xd2, + 0xd9, 0x37, 0x7a, 0x19, 0xce, 0x59, 0xbe, 0x47, 0xb0, 0x47, 0x0c, 0x17, 0x7b, 0x07, 0xe4, 0x90, + 0xd1, 0x6a, 0xeb, 0x6d, 0x01, 0x7d, 0xc0, 0x80, 0xe8, 0x32, 0xb4, 0x24, 0x1a, 0x19, 0x04, 0x58, + 0x48, 0xd9, 0x14, 0xb0, 0x47, 0x83, 0x00, 0xa3, 0x2b, 0xd0, 0x76, 0xcd, 0x88, 0x18, 0x3d, 0xdf, + 0x76, 0xba, 0x0e, 0xb6, 0x99, 0xd0, 0x13, 0x7a, 0x8b, 0x02, 0x77, 0x04, 0x0c, 0xa9, 0xfc, 0x50, + 0x3d, 0xb3, 0x87, 0x99, 0xf4, 0x0d, 0x3d, 0x1e, 0xd3, 0xed, 0x61, 0x62, 0x1e, 0x74, 0xa6, 0x18, + 0x9c, 0x7d, 0xa3, 0x65, 0x00, 0x27, 0x62, 0x32, 0x06, 0xd8, 0xee, 0x4c, 0x33, 0x31, 0x1b, 0x4e, + 0x74, 0x8f, 0x03, 0xd0, 0x87, 0x30, 0x7d, 0x88, 0x4d, 0x1b, 0x87, 0x51, 0xa7, 0xce, 0x4e, 0x7c, + 0x3d, 0x7f, 0xe2, 0x19, 0x2d, 0xac, 0x7f, 0xc8, 0x17, 0xdc, 0xf5, 0x48, 0x38, 0xd0, 0xe5, 0x72, + 0xb4, 0x04, 0x0d, 0x76, 0x64, 0x5b, 0xbe, 0x8d, 0x3b, 0x0d, 0x76, 0xb4, 0x43, 0x80, 0x7a, 0x13, + 0x5a, 0xc9, 0x65, 0x68, 0x16, 0x6a, 0x47, 0x78, 0x20, 0xce, 0x84, 0x7e, 0xd2, 0xf3, 0x3f, 0x36, + 0xdd, 0x3e, 0x66, 0xea, 0x6b, 0xe8, 0x7c, 0x70, 0x73, 0xfc, 0x86, 0xa2, 0x4d, 0xc3, 0xe4, 0xdd, + 0x5e, 0x40, 0x06, 0xda, 0x3b, 0xd0, 0x79, 0x6c, 0x5a, 0xfd, 0x7e, 0xef, 0x31, 0xdb, 0xe2, 0xd6, + 0x21, 0xb6, 0x8e, 0xe4, 0x41, 0x2f, 0x42, 0x43, 0x6c, 0x5c, 0x1c, 0x75, 0x5b, 0xaf, 0x73, 0xc0, + 0x7d, 0x5b, 0xfb, 0x00, 0x5e, 0x2c, 0x58, 0x28, 0x0e, 0xf5, 0x0a, 0xb4, 0x0f, 0xcc, 0x70, 0xdf, + 0x3c, 0xc0, 0x46, 0x68, 0x12, 0xc7, 0x67, 0xab, 0x15, 0xbd, 0x25, 0x80, 0x3a, 0x85, 0x69, 0x5f, + 0x80, 0x9a, 0xa2, 0xe0, 0xf7, 0x02, 0xd3, 0x22, 0x55, 0x98, 0xa3, 0x15, 0x68, 0x06, 0x21, 0x36, + 0x5d, 0xd7, 0xb7, 0x4c, 0xc2, 0xc5, 0xab, 0xe9, 0x49, 0x90, 0xb6, 0x0c, 0x8b, 0x85, 0xc4, 0xf9, + 0x06, 0xb5, 0x1b, 0x99, 0xdd, 0xfb, 0xbd, 0x9e, 0x53, 0x89, 0xb5, 0xb6, 0x94, 0xdb, 0x35, 0x5b, + 0x29, 0xe8, 0xfe, 0x77, 0x66, 0xd6, 0xc5, 0xa6, 0xd7, 0x0f, 0x2a, 0x11, 0xce, 0xee, 0x58, 0x2e, + 0x8d, 0x29, 0x5f, 0xe0, 0xc1, 0x60, 0xcb, 0x77, 0x5d, 0x6c, 0x11, 0xc7, 0xf7, 0x24, 0xd9, 0x8b, + 0x00, 0x56, 0x0c, 0x14, 0xe7, 0x9f, 0x80, 0x68, 0x2a, 0x74, 0xf2, 0x4b, 0x05, 0xd9, 0x3f, 0x2a, + 0xf0, 0xc2, 0x2d, 0xa1, 0x34, 0xce, 0xb8, 0xd2, 0x01, 0xa4, 0x59, 0x8e, 0x67, 0x59, 0x66, 0x0f, + 0xa8, 0x96, 0x3b, 0x20, 0x8a, 0x11, 0xe2, 0xc0, 0x75, 0x2c, 0x93, 0x91, 0x98, 0xe0, 0xbe, 0x9b, + 0x00, 0x51, 0x7b, 0x26, 0xc4, 0x15, 0x1e, 0x49, 0x3f, 0xd1, 0x26, 0x2c, 0xf4, 0x70, 0xcf, 0x0f, + 0x07, 0x46, 0xcf, 0x0c, 0x8c, 0x9e, 0xf9, 0xd4, 0xa0, 0xc1, 0xcb, 0xe8, 0xed, 0x33, 0xf7, 0x6c, + 0xeb, 0x88, 0xcf, 0xee, 0x98, 0xc1, 0x8e, 0xf9, 0x74, 0xcf, 0x79, 0x86, 0x77, 0xf6, 0xb5, 0x0e, + 0x2c, 0x64, 0xe5, 0x13, 0xa2, 0xff, 0x17, 0x5c, 0xe0, 0x90, 0xbd, 0x81, 0x67, 0xed, 0xb1, 0x88, + 0x59, 0xe9, 0xa0, 0xfe, 0xae, 0x40, 0x27, 0xbf, 0x50, 0x58, 0xfe, 0xf3, 0x6a, 0xed, 0xd4, 0x3a, + 0xb9, 0x04, 0x4d, 0x62, 0x3a, 0xae, 0xe1, 0x77, 0xbb, 0x11, 0x26, 0x4c, 0x11, 0x13, 0x3a, 0x50, + 0xd0, 0x43, 0x06, 0x41, 0x6b, 0x30, 0x6b, 0x71, 0xeb, 0x37, 0x42, 0x7c, 0xec, 0xb0, 0x18, 0x3f, + 0xcd, 0x36, 0x36, 0x63, 0x49, 0xaf, 0xe0, 0x60, 0xa4, 0x41, 0xdb, 0xb1, 0x9f, 0x1a, 0x2c, 0xba, + 0xb3, 0x2b, 0xa2, 0xce, 0xa8, 0x35, 0x1d, 0xfb, 0x29, 0x0d, 0x58, 0x54, 0xa3, 0xda, 0x63, 0x58, + 0xe2, 0xc2, 0xdf, 0xf7, 0xac, 0x10, 0xf7, 0xb0, 0x47, 0x4c, 0x77, 0xcb, 0x0f, 0x06, 0x95, 0xcc, + 0xe6, 0x45, 0xa8, 0x47, 0x8e, 0x67, 0x61, 0xc3, 0xe3, 0x57, 0xd5, 0x84, 0x3e, 0xcd, 0xc6, 0xbb, + 0x91, 0x76, 0x1b, 0x96, 0x4b, 0xe8, 0x0a, 0xcd, 0x5e, 0x86, 0x16, 0xdb, 0x98, 0x08, 0xef, 0xe2, + 0xc2, 0x68, 0x52, 0xd8, 0x16, 0x07, 0x69, 0x6f, 0x00, 0xe2, 0x34, 0x76, 0xfc, 0xbe, 0x57, 0xcd, + 0x9d, 0x5f, 0x80, 0xb9, 0xd4, 0x12, 0x61, 0x1b, 0x6f, 0xc2, 0x3c, 0x07, 0x7f, 0xe6, 0xf5, 0x2a, + 0xd3, 0xba, 0x00, 0x2f, 0x64, 0x16, 0x09, 0x6a, 0x9b, 0x92, 0x49, 0x3a, 0x99, 0x38, 0x91, 0xd8, + 0x82, 0xdc, 0x41, 0x3a, 0x9f, 0x60, 0x91, 0x8b, 0x6f, 0xd8, 0x0c, 0x8f, 0x74, 0x6c, 0xda, 0xbe, + 0xe7, 0x0e, 0x2a, 0x47, 0xae, 0x82, 0x95, 0x82, 0xee, 0xe7, 0xb0, 0x20, 0x23, 0x9a, 0xd7, 0x75, + 0x0e, 0xfa, 0x21, 0xae, 0x1a, 0x89, 0x93, 0x26, 0x3b, 0x9e, 0x33, 0x59, 0x6d, 0x43, 0xba, 0x59, + 0x82, 0xb0, 0x38, 0xd2, 0x38, 0x3f, 0x51, 0x12, 0xf9, 0x89, 0xf6, 0x5b, 0x05, 0xce, 0xcb, 0x15, + 0x15, 0xed, 0xea, 0x94, 0x8e, 0x55, 0x2b, 0x75, 0xac, 0x89, 0xa1, 0x63, 0xad, 0xc2, 0x6c, 0xe4, + 0xf7, 0x43, 0x0b, 0x1b, 0x34, 0x27, 0x31, 0x3c, 0x7a, 0x07, 0x73, 0xbf, 0x3b, 0xc7, 0xe1, 0x77, + 0x4c, 0x62, 0xee, 0xfa, 0x36, 0xd6, 0xfe, 0x57, 0x9a, 0x5d, 0xca, 0x5e, 0xd7, 0xe0, 0x3c, 0x4b, + 0x3d, 0xcc, 0x20, 0xc0, 0x9e, 0x6d, 0x98, 0x84, 0x1a, 0xbd, 0xc2, 0x8c, 0xfe, 0x1c, 0x9d, 0xb8, + 0xc5, 0xe0, 0xb7, 0xc8, 0x6e, 0xa4, 0xfd, 0x62, 0x1c, 0x66, 0xe8, 0x5a, 0xea, 0x64, 0x95, 0xe4, + 0x9d, 0x85, 0x1a, 0x7e, 0x4a, 0x84, 0xa0, 0xf4, 0x13, 0x6d, 0xc0, 0x9c, 0xf0, 0x66, 0xc7, 0xf7, + 0x86, 0x8e, 0x5e, 0xe3, 0x71, 0x71, 0x38, 0x15, 0xfb, 0xfa, 0x25, 0x68, 0x46, 0xc4, 0x0f, 0x64, + 0xdc, 0xe0, 0x79, 0x11, 0x50, 0x90, 0x88, 0x1b, 0x69, 0x9d, 0x4e, 0x16, 0xe8, 0xb4, 0xe5, 0x44, + 0x06, 0xb6, 0x0c, 0xbe, 0x2b, 0x16, 0x79, 0xea, 0x3a, 0x38, 0xd1, 0x5d, 0x8b, 0x6b, 0x03, 0xbd, + 0x0f, 0x4b, 0xce, 0x81, 0xe7, 0x87, 0xd8, 0x10, 0x8a, 0x64, 0xfe, 0xeb, 0xf9, 0xc4, 0xe8, 0xfa, + 0x7d, 0x4f, 0x66, 0x4e, 0x1d, 0x8e, 0xb3, 0xc7, 0x50, 0xa8, 0x06, 0x76, 0x7d, 0xb2, 0x4d, 0xe7, + 0xb5, 0xb7, 0x61, 0x76, 0xa8, 0x95, 0xea, 0x51, 0xe0, 0x5b, 0x45, 0x5a, 0xdc, 0x23, 0xd3, 0x71, + 0xf7, 0xb0, 0x67, 0xe3, 0xf0, 0x39, 0xa3, 0x13, 0xba, 0x0e, 0xf3, 0x8e, 0xed, 0x62, 0x83, 0x38, + 0x3d, 0xec, 0xf7, 0x89, 0x11, 0x61, 0xcb, 0xf7, 0xec, 0x48, 0xea, 0x97, 0xce, 0x3d, 0xe2, 0x53, + 0x7b, 0x7c, 0x46, 0xfb, 0x61, 0x7c, 0x4b, 0x24, 0x77, 0x31, 0xcc, 0x8f, 0x3c, 0x8c, 0x29, 0x41, + 0x9e, 0xea, 0x09, 0x31, 0x5a, 0x1c, 0xc8, 0xb3, 0x3a, 0x7a, 0x42, 0x02, 0x69, 0xdf, 0xb7, 0x07, + 0x6c, 0x47, 0x2d, 0x1d, 0x38, 0xe8, 0xb6, 0x6f, 0x0f, 0x58, 0xb8, 0x8e, 0x0c, 0x66, 0x64, 0xd6, + 0x61, 0xdf, 0x3b, 0x62, 0xbb, 0xa9, 0xeb, 0x4d, 0x27, 0x7a, 0x60, 0x46, 0x64, 0x8b, 0x82, 0xb4, + 0xdf, 0x29, 0x32, 0x5e, 0xd0, 0x6d, 0xe8, 0xd8, 0xc2, 0xce, 0xf1, 0xbf, 0x41, 0x1d, 0x74, 0x85, + 0x30, 0x82, 0x54, 0x2e, 0x2c, 0x1c, 0x0e, 0xf1, 0x39, 0x71, 0xab, 0xb2, 0x99, 0x61, 0xb8, 0x4a, + 0x6f, 0x5c, 0x84, 0xab, 0x2f, 0xe5, 0x75, 0x71, 0xd7, 0xda, 0x3b, 0x34, 0x43, 0x3b, 0xba, 0x87, + 0x3d, 0x1c, 0x9a, 0xe4, 0x4c, 0xd2, 0x17, 0x6d, 0x05, 0x2e, 0x96, 0x51, 0x17, 0xfc, 0xbf, 0x90, + 0xd7, 0xa0, 0xc4, 0xd0, 0xf1, 0x7e, 0xdf, 0x71, 0xed, 0x33, 0x61, 0xff, 0x71, 0x56, 0xb8, 0x98, + 0xb8, 0xb0, 0x9f, 0xab, 0x70, 0x3e, 0x64, 0x20, 0x62, 0x44, 0x14, 0x21, 0x7e, 0x8f, 0xb6, 0xf5, + 0x19, 0x31, 0xc1, 0x16, 0xd2, 0x77, 0xe9, 0x4f, 0xc6, 0xa5, 0x05, 0x48, 0x6a, 0x67, 0x16, 0x56, + 0x17, 0xa1, 0x31, 0x64, 0x5f, 0x63, 0xec, 0xeb, 0x91, 0xe0, 0x4b, 0xad, 0xd3, 0xf2, 0x83, 0x81, + 0x81, 0x2d, 0x9e, 0x51, 0xb0, 0xa3, 0xae, 0xd3, 0xe7, 0x59, 0x30, 0xb8, 0x6b, 0xb1, 0x84, 0xa2, + 0x7a, 0x8c, 0x4d, 0x50, 0xfb, 0x8a, 0x53, 0x9b, 0x4a, 0x52, 0xfb, 0x8a, 0x51, 0x93, 0x38, 0xc7, + 0x4e, 0x97, 0xe3, 0x4c, 0x0f, 0x71, 0x1e, 0x3b, 0x5d, 0x8a, 0x33, 0xb4, 0xaa, 0xb4, 0x32, 0xc4, + 0xa9, 0x7e, 0x0d, 0x8b, 0xe9, 0xd9, 0xea, 0x17, 0xf6, 0x73, 0x29, 0x4b, 0xbb, 0x98, 0x35, 0xa7, + 0xcc, 0xad, 0x7f, 0x9c, 0xdd, 0x76, 0xe5, 0x0c, 0xe7, 0xf9, 0xf6, 0xb5, 0x9c, 0x55, 0x48, 0x3a, + 0x4d, 0xfa, 0xbf, 0xec, 0xb6, 0x4f, 0x91, 0x2e, 0x9d, 0xcc, 0xf8, 0x52, 0xd6, 0x05, 0xb2, 0x39, + 0xd5, 0x2f, 0xe3, 0xf8, 0x2a, 0x30, 0x68, 0x46, 0x53, 0x39, 0xae, 0x09, 0xbe, 0xa2, 0xae, 0x30, + 0x2d, 0xd8, 0xa2, 0x05, 0x98, 0x12, 0xf7, 0x21, 0x7f, 0xb1, 0x88, 0x51, 0xaa, 0x64, 0x52, 0x13, + 0x25, 0x13, 0x59, 0x0a, 0xa2, 0x6f, 0xee, 0x49, 0x1e, 0x1e, 0xe9, 0xf8, 0x63, 0x3c, 0xd0, 0x76, + 0x33, 0x1e, 0xc7, 0xb7, 0x76, 0x42, 0xc1, 0x83, 0x57, 0x14, 0x6c, 0x76, 0xe6, 0xb6, 0x28, 0x9c, + 0x34, 0x1c, 0x61, 0x04, 0xb6, 0xf6, 0x53, 0x65, 0x48, 0xf0, 0xb6, 0xeb, 0xef, 0x9f, 0xa1, 0x55, + 0x26, 0xa5, 0xa8, 0xa5, 0xa4, 0x48, 0xd6, 0x84, 0x26, 0xd2, 0x35, 0xa1, 0x84, 0x13, 0x25, 0xb7, + 0x53, 0x16, 0x9a, 0x1f, 0xf9, 0x67, 0xf7, 0xb2, 0xcc, 0x87, 0xe6, 0x21, 0x75, 0xc1, 0xff, 0x26, + 0x2c, 0x52, 0x85, 0x73, 0x28, 0x7b, 0xb7, 0x54, 0x7f, 0xdb, 0xfd, 0x79, 0x1c, 0x96, 0x8a, 0x17, + 0x57, 0x79, 0xdf, 0xbd, 0x0b, 0x6a, 0xfc, 0x7e, 0xa2, 0x57, 0x63, 0x44, 0xcc, 0x5e, 0x10, 0x5f, + 0x8e, 0xfc, 0x0e, 0xbd, 0x20, 0x1e, 0x53, 0x8f, 0xe4, 0xbc, 0xbc, 0x21, 0x73, 0x8f, 0xaf, 0x5a, + 0xee, 0xf1, 0x45, 0x19, 0xd8, 0x26, 0x29, 0x63, 0xc0, 0x73, 0xb8, 0x0b, 0xb6, 0x49, 0xca, 0x18, + 0xc4, 0x8b, 0x19, 0x03, 0x6e, 0xb5, 0x4d, 0x81, 0xcf, 0x18, 0x2c, 0x03, 0x88, 0xf4, 0xaa, 0xef, + 0xc9, 0xc7, 0x64, 0x83, 0x27, 0x57, 0x7d, 0xaf, 0x34, 0xcb, 0x9c, 0x2e, 0xcd, 0x32, 0xd3, 0xa7, + 0x59, 0xcf, 0x9d, 0xe6, 0xaf, 0x14, 0x80, 0x3b, 0x4e, 0x74, 0xc4, 0xb5, 0x4c, 0xf3, 0x5a, 0xdb, + 0x91, 0xcf, 0x01, 0xfa, 0x49, 0x21, 0xa6, 0xeb, 0x0a, 0xdd, 0xd1, 0x4f, 0xea, 0x3f, 0xfd, 0x08, + 0xdb, 0x42, 0x3d, 0xec, 0x9b, 0xc2, 0xba, 0x21, 0xc6, 0x42, 0x03, 0xec, 0x9b, 0x66, 0x8a, 0x01, + 0x0e, 0x2d, 0xec, 0x11, 0x83, 0xcd, 0x51, 0x69, 0xc7, 0xf5, 0xa6, 0x80, 0x6d, 0x67, 0x50, 0x18, + 0xc9, 0xa9, 0x14, 0xca, 0x67, 0x11, 0xb6, 0xb5, 0xdf, 0x28, 0xd0, 0xd8, 0xc1, 0x3d, 0xb1, 0xbf, + 0x8b, 0x00, 0x07, 0x7e, 0xe8, 0xf7, 0x89, 0xe3, 0x61, 0x9e, 0xcc, 0x4f, 0xea, 0x09, 0xc8, 0x73, + 0xec, 0x96, 0x46, 0x18, 0xec, 0x76, 0xc5, 0x99, 0xb0, 0x6f, 0x0a, 0x3b, 0xc4, 0x66, 0x20, 0x8e, + 0x81, 0x7d, 0xd3, 0x27, 0x53, 0x44, 0x4c, 0xeb, 0x88, 0xe9, 0x7c, 0x42, 0xe7, 0x03, 0xed, 0x0f, + 0x0a, 0x80, 0x8e, 0x7b, 0x3e, 0x61, 0x26, 0x4b, 0xe5, 0xda, 0x37, 0xad, 0x23, 0xfa, 0xec, 0x60, + 0x85, 0x51, 0xae, 0xcf, 0xa6, 0x80, 0xb1, 0xc2, 0xe8, 0x32, 0x80, 0x44, 0x11, 0x61, 0xb0, 0xa1, + 0x37, 0x04, 0x84, 0x3f, 0x30, 0x64, 0x44, 0x10, 0xb5, 0xc4, 0x61, 0x68, 0xe4, 0xdb, 0x96, 0xa1, + 0x71, 0x11, 0x1a, 0x59, 0x8b, 0x62, 0x11, 0x85, 0x99, 0xd3, 0x15, 0x68, 0xcb, 0xca, 0x2b, 0xb3, + 0x57, 0x21, 0x4a, 0x4b, 0x02, 0xa9, 0x8d, 0xb2, 0x2a, 0xe7, 0x53, 0x82, 0xbd, 0xd8, 0x94, 0x1a, + 0xfa, 0x10, 0xa0, 0x7d, 0x03, 0x20, 0xeb, 0x02, 0x5d, 0x1f, 0x6d, 0xc2, 0x24, 0x25, 0x2e, 0x6b, + 0xe9, 0x4b, 0xf9, 0xca, 0xea, 0x50, 0x0d, 0x3a, 0x47, 0x4d, 0xc6, 0xb1, 0xf1, 0x54, 0x1c, 0x1b, + 0xfd, 0x2c, 0xd4, 0xbe, 0x53, 0x60, 0x45, 0x64, 0xa1, 0x0e, 0x0e, 0x77, 0xfc, 0x63, 0x9a, 0x91, + 0x3c, 0xf2, 0x39, 0x93, 0x33, 0x09, 0xc0, 0x37, 0xa0, 0x63, 0xe3, 0x88, 0x38, 0x1e, 0x63, 0x68, + 0xc8, 0x43, 0x61, 0xc5, 0x68, 0xbe, 0xa1, 0x85, 0xc4, 0xfc, 0x6d, 0x3e, 0xbd, 0x6b, 0xf6, 0x30, + 0xba, 0x06, 0x73, 0x47, 0x18, 0x07, 0x86, 0xeb, 0x5b, 0xa6, 0x6b, 0x48, 0xd7, 0x16, 0x69, 0xd6, + 0x2c, 0x9d, 0x7a, 0x40, 0x67, 0xee, 0x70, 0xf7, 0xd6, 0x22, 0xb8, 0x7c, 0x82, 0x24, 0x22, 0xbc, + 0x2d, 0x41, 0x23, 0x08, 0x7d, 0x0b, 0x47, 0xd4, 0x66, 0x15, 0x76, 0xdb, 0x0d, 0x01, 0xe8, 0x3a, + 0xcc, 0xc5, 0x83, 0x4f, 0xb8, 0x93, 0x98, 0x07, 0xbc, 0xfc, 0x3a, 0xae, 0x17, 0x4d, 0x69, 0x3f, + 0x57, 0x40, 0xcb, 0x71, 0xdd, 0x0e, 0xfd, 0xde, 0x19, 0x6a, 0x70, 0x03, 0xe6, 0x99, 0x1e, 0x42, + 0x46, 0x72, 0xa8, 0x08, 0xfe, 0x1a, 0x3a, 0x4f, 0xe7, 0x38, 0x37, 0xa9, 0x89, 0x3e, 0x5c, 0x39, + 0x71, 0x4f, 0xff, 0x22, 0x5d, 0x2c, 0xca, 0x4b, 0x9c, 0x3f, 0x70, 0x52, 0xb7, 0x92, 0xf6, 0x6b, + 0x45, 0xde, 0xa9, 0xe9, 0x59, 0xb1, 0x97, 0x5b, 0xd0, 0xb6, 0x9d, 0xe8, 0xc8, 0xe0, 0x8d, 0x9d, + 0x93, 0xec, 0x7f, 0x18, 0x4d, 0xf5, 0x96, 0x1d, 0x7f, 0xe3, 0x08, 0x7d, 0x00, 0x6d, 0x51, 0x3c, + 0x4d, 0xf4, 0x8a, 0x9a, 0x9b, 0x8b, 0x79, 0x12, 0x71, 0xbc, 0xd3, 0x5b, 0x7c, 0x05, 0x1f, 0x69, + 0x7f, 0x6b, 0x41, 0xeb, 0xd3, 0x3e, 0x0e, 0x07, 0x89, 0xc2, 0x73, 0x84, 0xc5, 0x31, 0xc8, 0x7e, + 0x58, 0x02, 0x42, 0x6f, 0x9c, 0x6e, 0xe8, 0xf7, 0x8c, 0xb8, 0x65, 0x36, 0xce, 0x50, 0x9a, 0x14, + 0xb8, 0xcd, 0xdb, 0x66, 0xe8, 0x3d, 0x98, 0xea, 0x3a, 0x2e, 0xc1, 0xbc, 0x49, 0xd5, 0xdc, 0x7c, + 0x39, 0xbf, 0x9f, 0x24, 0xcf, 0xf5, 0x6d, 0x86, 0xac, 0x8b, 0x45, 0x68, 0x1f, 0xe6, 0x1c, 0x2f, + 0x60, 0x4f, 0xd0, 0xd0, 0x31, 0x5d, 0xe7, 0xd9, 0xb0, 0x74, 0xda, 0xdc, 0x7c, 0x63, 0x04, 0xad, + 0xfb, 0x74, 0xe5, 0x5e, 0x72, 0xa1, 0x8e, 0x9c, 0x1c, 0x0c, 0x61, 0x98, 0xf7, 0xfb, 0x24, 0xcf, + 0x64, 0x92, 0x31, 0xd9, 0x1c, 0xc1, 0xe4, 0x21, 0x5b, 0x9a, 0xe6, 0x32, 0xe7, 0xe7, 0x81, 0xea, + 0x2e, 0x4c, 0x71, 0xe1, 0x68, 0x90, 0xef, 0x3a, 0xd8, 0x95, 0xfd, 0x35, 0x3e, 0xa0, 0x71, 0xcc, + 0x0f, 0x70, 0x68, 0x7a, 0x32, 0x5e, 0xcb, 0xe1, 0xb0, 0xcf, 0x53, 0x4b, 0xf4, 0x79, 0xd4, 0xdf, + 0x4f, 0x02, 0xca, 0x4b, 0x28, 0xeb, 0xc1, 0x21, 0x8e, 0x68, 0x0c, 0x4c, 0x5e, 0x10, 0x33, 0x09, + 0x38, 0xbb, 0x24, 0x3e, 0x87, 0x86, 0x15, 0x1d, 0x1b, 0x4c, 0x25, 0xc2, 0x5c, 0x6e, 0x9e, 0x5a, + 0xa5, 0xeb, 0x5b, 0x7b, 0x8f, 0x19, 0x54, 0xaf, 0x5b, 0xd1, 0x31, 0xfb, 0x42, 0xdf, 0x03, 0xf8, + 0x2a, 0xf2, 0x3d, 0x41, 0x99, 0x1f, 0xfc, 0xbb, 0xa7, 0xa7, 0xfc, 0xd1, 0xde, 0xc3, 0x5d, 0x4e, + 0xba, 0x41, 0xc9, 0x71, 0xda, 0x16, 0xb4, 0x03, 0x33, 0x7c, 0xd2, 0xc7, 0x44, 0x90, 0xe7, 0xb6, + 0xf0, 0xfe, 0xe9, 0xc9, 0x7f, 0xc2, 0xc9, 0x70, 0x0e, 0xad, 0x20, 0x31, 0x52, 0xbf, 0x1b, 0x87, + 0xba, 0x94, 0x8b, 0xbe, 0x62, 0x99, 0x85, 0xf3, 0x5a, 0x8e, 0xe1, 0x78, 0x5d, 0x5f, 0x68, 0xf4, + 0x1c, 0x85, 0xf3, 0x72, 0x0e, 0xbb, 0xbe, 0xd6, 0x60, 0x36, 0xc4, 0x96, 0x1f, 0xda, 0x34, 0xd7, + 0x77, 0x7a, 0x0e, 0x35, 0x7b, 0x7e, 0x96, 0x33, 0x1c, 0x7e, 0x47, 0x82, 0xd1, 0xab, 0x30, 0xc3, + 0x8e, 0x3d, 0x81, 0x59, 0x93, 0x34, 0xb1, 0x9b, 0x40, 0x5c, 0x83, 0xd9, 0x27, 0x7d, 0x1a, 0xf8, + 0xac, 0x43, 0x33, 0x34, 0x2d, 0xe2, 0xc7, 0x55, 0x95, 0x19, 0x06, 0xdf, 0x8a, 0xc1, 0xe8, 0x2d, + 0x58, 0xe0, 0xa8, 0x38, 0xb2, 0xcc, 0x20, 0x5e, 0x81, 0x43, 0xf1, 0xe8, 0x9e, 0x67, 0xb3, 0x77, + 0xd9, 0xe4, 0x96, 0x9c, 0x43, 0x2a, 0xd4, 0x2d, 0xbf, 0xd7, 0xc3, 0x1e, 0x89, 0x44, 0x1b, 0x34, + 0x1e, 0xa3, 0x5b, 0xb0, 0x6c, 0xba, 0xae, 0xff, 0xb5, 0xc1, 0x56, 0xda, 0x46, 0x4e, 0x3a, 0xfe, + 0x04, 0x57, 0x19, 0xd2, 0xa7, 0x0c, 0x47, 0x4f, 0x0b, 0xaa, 0x5e, 0x82, 0x46, 0x7c, 0x8e, 0x34, + 0xe5, 0x49, 0x18, 0x24, 0xfb, 0x56, 0xcf, 0x41, 0x2b, 0x79, 0x12, 0xea, 0x5f, 0x6a, 0x30, 0x57, + 0xe0, 0x54, 0xe8, 0x0b, 0x00, 0x6a, 0xad, 0xdc, 0xb5, 0x84, 0xb9, 0xfe, 0xcf, 0xe9, 0x9d, 0x93, + 0xda, 0x2b, 0x07, 0xeb, 0xd4, 0xfa, 0xf9, 0x27, 0xfa, 0x3e, 0x34, 0x99, 0xc5, 0x0a, 0xea, 0xdc, + 0x64, 0xdf, 0xfb, 0x27, 0xa8, 0x53, 0x59, 0x05, 0x79, 0xe6, 0x03, 0xfc, 0x5b, 0xfd, 0x93, 0x02, + 0x8d, 0x98, 0x31, 0x4d, 0xe0, 0xf8, 0x41, 0xb1, 0xb3, 0x8e, 0x64, 0x02, 0xc7, 0x60, 0xdb, 0x0c, + 0xf4, 0x1f, 0x69, 0x4a, 0xea, 0x3b, 0x00, 0x43, 0xf9, 0x0b, 0x45, 0x50, 0x0a, 0x45, 0xd0, 0xd6, + 0xa0, 0x4d, 0x35, 0xeb, 0x60, 0x7b, 0x8f, 0x84, 0x4e, 0xc0, 0x7e, 0xb0, 0xc0, 0x71, 0x22, 0xf1, + 0x90, 0x96, 0xc3, 0xcd, 0xbf, 0x2e, 0x41, 0x2b, 0x79, 0x93, 0xa2, 0x2f, 0xa1, 0x99, 0xf8, 0x61, + 0x06, 0x7a, 0x29, 0x7f, 0x68, 0xf9, 0x1f, 0x7a, 0xa8, 0x2f, 0x8f, 0xc0, 0x12, 0x6f, 0xcd, 0x31, + 0xa4, 0xc3, 0xb4, 0x68, 0xe6, 0xa3, 0x95, 0x13, 0xfa, 0xfc, 0x9c, 0xea, 0xe5, 0x91, 0xbf, 0x04, + 0xd0, 0xc6, 0xae, 0x2b, 0xc8, 0x83, 0xf3, 0xb9, 0xde, 0x3a, 0xba, 0x9a, 0x5f, 0x5b, 0xd6, 0xb9, + 0x57, 0x5f, 0xab, 0x84, 0x1b, 0xcb, 0x40, 0x60, 0xae, 0xa0, 0x59, 0x8e, 0x5e, 0x1f, 0x41, 0x25, + 0xd5, 0xb0, 0x57, 0xaf, 0x55, 0xc4, 0x8e, 0xb9, 0x3e, 0x01, 0x94, 0xef, 0xa4, 0xa3, 0xd7, 0x46, + 0x92, 0x19, 0x76, 0xea, 0xd5, 0xd7, 0xab, 0x21, 0x97, 0x0a, 0xca, 0x7b, 0xec, 0x23, 0x05, 0x4d, + 0x75, 0xf1, 0x47, 0x0a, 0x9a, 0x69, 0xdc, 0x8f, 0xa1, 0x23, 0x98, 0xcd, 0xf6, 0xdf, 0xd1, 0x5a, + 0xd9, 0xaf, 0x80, 0x72, 0xed, 0x7d, 0xf5, 0x6a, 0x15, 0xd4, 0x98, 0x19, 0x86, 0x73, 0xe9, 0x7e, + 0x37, 0x7a, 0x35, 0xbf, 0xbe, 0xb0, 0xe3, 0xaf, 0xae, 0x8e, 0x46, 0x4c, 0xca, 0x94, 0xed, 0x81, + 0x17, 0xc9, 0x54, 0xd2, 0x60, 0x2f, 0x92, 0xa9, 0xac, 0xa5, 0xae, 0x8d, 0xa1, 0x6f, 0x64, 0x63, + 0x35, 0xd3, 0x1b, 0x46, 0xeb, 0x65, 0x64, 0x8a, 0x9b, 0xd3, 0xea, 0x46, 0x65, 0xfc, 0x84, 0x37, + 0x7e, 0x09, 0xcd, 0x44, 0x8b, 0xb8, 0x28, 0x7e, 0xe4, 0x9b, 0xce, 0x45, 0xf1, 0xa3, 0xa8, 0xcf, + 0x3c, 0x86, 0xf6, 0xa1, 0x9d, 0x6a, 0x1a, 0xa3, 0x57, 0xca, 0x56, 0xa6, 0x6b, 0xab, 0xea, 0xab, + 0x23, 0xf1, 0x62, 0x1e, 0x86, 0x8c, 0x88, 0x22, 0x04, 0x96, 0x6e, 0x2e, 0x1d, 0x03, 0x5f, 0x19, + 0x85, 0x96, 0x72, 0xe5, 0x5c, 0x6b, 0xb9, 0xd0, 0x95, 0xcb, 0x5a, 0xd7, 0x85, 0xae, 0x5c, 0xde, + 0xad, 0x1e, 0x43, 0x87, 0x30, 0x93, 0x69, 0x2b, 0xa3, 0xd5, 0x32, 0x12, 0xd9, 0x96, 0xb6, 0xba, + 0x56, 0x01, 0x33, 0xe6, 0xf4, 0xff, 0xb2, 0x02, 0xc1, 0x4c, 0xee, 0x4a, 0xf9, 0xd2, 0xa1, 0x9d, + 0xbd, 0x74, 0x32, 0x52, 0x4c, 0xfa, 0x6b, 0x98, 0x2f, 0xaa, 0x36, 0xa2, 0x6b, 0x45, 0x75, 0x8d, + 0xd2, 0x92, 0xa6, 0xba, 0x5e, 0x15, 0x3d, 0x66, 0xfc, 0x19, 0xd4, 0x65, 0x6b, 0x15, 0x15, 0x5c, + 0x4a, 0x99, 0x66, 0xb4, 0xaa, 0x9d, 0x84, 0x92, 0x70, 0x95, 0x9e, 0x8c, 0x0a, 0xc3, 0x9e, 0x67, + 0x79, 0x54, 0xc8, 0x75, 0x67, 0xcb, 0xa3, 0x42, 0xbe, 0x85, 0xca, 0xd8, 0xc5, 0x66, 0x97, 0x6c, + 0x11, 0x96, 0x9b, 0x5d, 0x41, 0x07, 0xb4, 0xdc, 0xec, 0x0a, 0xbb, 0x8e, 0x63, 0xe8, 0x07, 0xf2, + 0x67, 0x12, 0xd9, 0xce, 0x20, 0x2a, 0x8d, 0x2d, 0x25, 0x1d, 0x4a, 0xf5, 0x7a, 0xf5, 0x05, 0x31, + 0xfb, 0x67, 0x32, 0x12, 0x66, 0x3a, 0x83, 0xe5, 0x91, 0xb0, 0xb8, 0x3f, 0xa9, 0x6e, 0x54, 0xc6, + 0xcf, 0x3b, 0x79, 0xb2, 0x75, 0x56, 0xae, 0xed, 0x82, 0x6e, 0x63, 0xb9, 0xb6, 0x0b, 0xbb, 0x71, + 0xcc, 0x3f, 0x8a, 0xda, 0x62, 0x45, 0xfe, 0x71, 0x42, 0xdf, 0x4e, 0x5d, 0xaf, 0x8a, 0x9e, 0x4a, + 0x14, 0xf2, 0x7d, 0x2f, 0x34, 0x72, 0xff, 0xa9, 0x3b, 0xe0, 0x5a, 0x45, 0xec, 0xf2, 0xd3, 0x95, + 0x77, 0xc2, 0x48, 0x01, 0x32, 0x77, 0xc3, 0x46, 0x65, 0xfc, 0x98, 0x77, 0x20, 0x7f, 0x74, 0x93, + 0xe8, 0x59, 0xa1, 0xab, 0x23, 0xe8, 0x24, 0x7a, 0x6e, 0xea, 0x6b, 0x95, 0x70, 0x8b, 0xbc, 0x37, + 0xd9, 0x45, 0x3a, 0xc9, 0x9e, 0x72, 0xad, 0xaf, 0x93, 0xec, 0xa9, 0xa0, 0x31, 0x55, 0xe0, 0xbd, + 0xb2, 0x79, 0x34, 0xda, 0x7b, 0x33, 0x4d, 0xac, 0xd1, 0xde, 0x9b, 0xeb, 0x4b, 0x8d, 0xa1, 0x1f, + 0x0f, 0x7f, 0x8c, 0x91, 0xaf, 0xc1, 0xa2, 0xcd, 0xd2, 0x50, 0x54, 0x5a, 0x7a, 0x56, 0xdf, 0x3c, + 0xd5, 0x9a, 0x84, 0xf2, 0x7f, 0xa6, 0xc8, 0xce, 0x6e, 0x61, 0x11, 0x14, 0xbd, 0x55, 0x81, 0x70, + 0xae, 0x8e, 0xab, 0xbe, 0x7d, 0xca, 0x55, 0x45, 0xd6, 0x90, 0xac, 0x7f, 0x96, 0x5b, 0x43, 0x41, + 0x0d, 0xb5, 0xdc, 0x1a, 0x8a, 0x4a, 0xaa, 0xda, 0x18, 0x7a, 0x00, 0x93, 0xec, 0xb9, 0x8e, 0x2e, + 0x9e, 0xfc, 0x8e, 0x57, 0x2f, 0x15, 0xcf, 0xc7, 0xaf, 0x51, 0x2a, 0xc0, 0xfe, 0x14, 0xfb, 0x27, + 0xc1, 0x9b, 0xff, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x4b, 0x03, 0xaa, 0x60, 0x30, 0x00, 0x00, } From 0d60bb4427413cc96bbafdddf9d123d1ffcf19c4 Mon Sep 17 00:00:00 2001 From: Yoni Nakache <45972051+LazyDBA247-Anyvision@users.noreply.github.com> Date: Mon, 24 Feb 2020 07:47:27 +0200 Subject: [PATCH 0141/2432] display bug, header is Free but data was Used --- weed/server/volume_server_ui/templates.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go index a2d1dd5bf..ac2a2bf69 100644 --- a/weed/server/volume_server_ui/templates.go +++ b/weed/server/volume_server_ui/templates.go @@ -91,7 +91,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` {{ .Dir }} {{ bytesToHumanReadble .All }} - {{ bytesToHumanReadble .Used }} + {{ bytesToHumanReadble .Free }} {{ percentFrom .All .Used}} {{ end }} From b06b7ca6e6257c16e006adeb8910864068b432b3 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 23 Feb 2020 22:00:19 -0800 Subject: [PATCH 0142/2432] adjust UI --- weed/server/volume_server_ui/templates.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go index ac2a2bf69..1c1394369 100644 --- a/weed/server/volume_server_ui/templates.go +++ b/weed/server/volume_server_ui/templates.go @@ -83,7 +83,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`Path Total Free - % Usage + Usage @@ -92,7 +92,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`{{ .Dir }} {{ bytesToHumanReadble .All }} {{ bytesToHumanReadble .Free }} - {{ percentFrom .All .Used}} + {{ percentFrom .All .Used}}% {{ end }} From d8dec2323bde1a5ab787b719a240969852004456 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 24 Feb 2020 14:34:14 -0800 Subject: [PATCH 0143/2432] s3: move buckets folder configuration to filer --- other/java/client/src/main/proto/filer.proto | 1 + weed/command/filer.go | 3 + weed/command/s3.go | 38 +++- weed/command/server.go | 2 +- weed/pb/filer.proto | 1 + weed/pb/filer_pb/filer.pb.go | 217 ++++++++++--------- weed/server/filer_grpc_server.go | 1 + weed/server/filer_server.go | 1 + 8 files changed, 149 insertions(+), 115 deletions(-) diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 6357d971f..909458daf 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -219,4 +219,5 @@ message GetFilerConfigurationResponse { string replication = 2; string collection = 3; uint32 max_mb = 4; + string dir_buckets = 5; } diff --git a/weed/command/filer.go b/weed/command/filer.go index ea8392fac..0156fe1b9 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -33,6 +33,7 @@ type FilerOptions struct { dataCenter *string enableNotification *bool disableHttp *bool + dirBucketsPath *string // default leveldb directory, used in "weed server" mode defaultLevelDbDirectory *string @@ -52,6 +53,7 @@ func init() { f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") + f.dirBucketsPath = cmdFiler.Flag.String("dir.buckets", "/buckets", "folder to store all buckets") } var cmdFiler = &Command{ @@ -109,6 +111,7 @@ func (fo *FilerOptions) startFiler() { DataCenter: *fo.dataCenter, DefaultLevelDbDir: defaultLevelDbDirectory, DisableHttp: *fo.disableHttp, + DirBucketsPath: *fo.dirBucketsPath, Port: *fo.port, }) if nfs_err != nil { diff --git a/weed/command/s3.go b/weed/command/s3.go index 5fb59fcca..c1ccca820 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -1,10 +1,12 @@ package command import ( + "context" "fmt" "net/http" "time" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/gorilla/mux" @@ -19,19 +21,17 @@ var ( ) type S3Options struct { - filer *string - filerBucketsPath *string - port *int - config *string - domainName *string - tlsPrivateKey *string - tlsCertificate *string + filer *string + port *int + config *string + domainName *string + tlsPrivateKey *string + tlsCertificate *string } func init() { cmdS3.Run = runS3 // break init cycle s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address") - s3StandaloneOptions.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port") s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}") s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file") @@ -123,6 +123,24 @@ func (s3opt *S3Options) startS3Server() bool { return false } + filerBucketsPath := "/buckets" + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + ctx := context.Background() + + err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + filerBucketsPath = resp.DirBuckets + return nil + }) + if err != nil { + glog.Fatal(err) + return false + } + router := mux.NewRouter().SkipClean(true) _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{ @@ -130,8 +148,8 @@ func (s3opt *S3Options) startS3Server() bool { FilerGrpcAddress: filerGrpcAddress, Config: *s3opt.config, DomainName: *s3opt.domainName, - BucketsPath: *s3opt.filerBucketsPath, - GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), + BucketsPath: filerBucketsPath, + GrpcDialOption: grpcDialOption, }) if s3ApiServer_err != nil { glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) diff --git a/weed/command/server.go b/weed/command/server.go index d7d768df1..aa693618c 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -82,6 +82,7 @@ func init() { filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") + filerOptions.dirBucketsPath = cmdServer.Flag.String("filer.dir.buckets", "/buckets", "folder to store all buckets") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") @@ -92,7 +93,6 @@ func init() { serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") - s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}") s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file") diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 6357d971f..909458daf 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -219,4 +219,5 @@ message GetFilerConfigurationResponse { string replication = 2; string collection = 3; uint32 max_mb = 4; + string dir_buckets = 5; } diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 01b3e8d90..84869de8c 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -972,6 +972,7 @@ type GetFilerConfigurationResponse struct { Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"` + DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets" json:"dir_buckets,omitempty"` } func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} } @@ -1007,6 +1008,13 @@ func (m *GetFilerConfigurationResponse) GetMaxMb() uint32 { return 0 } +func (m *GetFilerConfigurationResponse) GetDirBuckets() string { + if m != nil { + return m.DirBuckets + } + return "" +} + func init() { proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest") proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse") @@ -1473,108 +1481,109 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1633 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x18, 0x4b, 0x6f, 0xdc, 0xc6, - 0x59, 0xdc, 0x37, 0xbf, 0xdd, 0xb5, 0xa5, 0x59, 0xc9, 0x5e, 0xaf, 0x1e, 0x95, 0xa9, 0xda, 0x55, - 0x61, 0x43, 0x35, 0x54, 0x1f, 0xec, 0xba, 0x3d, 0xd8, 0x7a, 0x14, 0x42, 0xe5, 0x07, 0x28, 0xbb, - 0x68, 0x11, 0x20, 0x04, 0x45, 0xce, 0xae, 0x26, 0x22, 0x39, 0x9b, 0xe1, 0x50, 0x92, 0xf3, 0x13, - 0x72, 0xcc, 0x31, 0x40, 0xce, 0xf9, 0x13, 0x41, 0x2e, 0x41, 0x90, 0x7f, 0x93, 0x63, 0xce, 0xc1, - 0xcc, 0x90, 0xdc, 0xe1, 0x72, 0x25, 0xd9, 0x08, 0x7c, 0x9b, 0xf9, 0xde, 0xef, 0x6f, 0x48, 0x68, - 0x0f, 0x49, 0x80, 0xd9, 0xd6, 0x98, 0x51, 0x4e, 0x51, 0x4b, 0x5e, 0x9c, 0xf1, 0xb1, 0xf5, 0x1a, - 0x96, 0x0f, 0x29, 0x3d, 0x4d, 0xc6, 0xbb, 0x84, 0x61, 0x8f, 0x53, 0xf6, 0x7e, 0x2f, 0xe2, 0xec, - 0xbd, 0x8d, 0xbf, 0x4c, 0x70, 0xcc, 0xd1, 0x0a, 0x98, 0x7e, 0x86, 0xe8, 0x1b, 0xeb, 0xc6, 0xa6, - 0x69, 0x4f, 0x00, 0x08, 0x41, 0x2d, 0x72, 0x43, 0xdc, 0xaf, 0x48, 0x84, 0x3c, 0x5b, 0x7b, 0xb0, - 0x32, 0x5b, 0x60, 0x3c, 0xa6, 0x51, 0x8c, 0xd1, 0x3d, 0xa8, 0x63, 0x01, 0x90, 0xd2, 0xda, 0xdb, - 0x37, 0xb7, 0x32, 0x53, 0xb6, 0x14, 0x9d, 0xc2, 0x5a, 0x3f, 0x1a, 0x80, 0x0e, 0x49, 0xcc, 0x05, - 0x90, 0xe0, 0xf8, 0xc3, 0xec, 0xb9, 0x05, 0x8d, 0x31, 0xc3, 0x43, 0x72, 0x91, 0x5a, 0x94, 0xde, - 0xd0, 0x43, 0x58, 0x88, 0xb9, 0xcb, 0xf8, 0x3e, 0xa3, 0xe1, 0x3e, 0x09, 0xf0, 0x2b, 0x61, 0x74, - 0x55, 0x92, 0x94, 0x11, 0x68, 0x0b, 0x10, 0x89, 0xbc, 0x20, 0x89, 0xc9, 0x19, 0x3e, 0xca, 0xb0, - 0xfd, 0xda, 0xba, 0xb1, 0xd9, 0xb2, 0x67, 0x60, 0xd0, 0x22, 0xd4, 0x03, 0x12, 0x12, 0xde, 0xaf, - 0xaf, 0x1b, 0x9b, 0x5d, 0x5b, 0x5d, 0xac, 0x7f, 0x42, 0xaf, 0x60, 0xff, 0xc7, 0xb9, 0xff, 0x5d, - 0x05, 0xea, 0x12, 0x90, 0xc7, 0xd8, 0x98, 0xc4, 0x18, 0xdd, 0x85, 0x0e, 0x89, 0x9d, 0x49, 0x20, - 0x2a, 0xd2, 0xb6, 0x36, 0x89, 0xf3, 0x98, 0xa3, 0x07, 0xd0, 0xf0, 0x4e, 0x92, 0xe8, 0x34, 0xee, - 0x57, 0xd7, 0xab, 0x9b, 0xed, 0xed, 0xde, 0x44, 0x91, 0x70, 0x74, 0x47, 0xe0, 0xec, 0x94, 0x04, - 0x3d, 0x01, 0x70, 0x39, 0x67, 0xe4, 0x38, 0xe1, 0x38, 0x96, 0x9e, 0xb6, 0xb7, 0xfb, 0x1a, 0x43, - 0x12, 0xe3, 0xe7, 0x39, 0xde, 0xd6, 0x68, 0xd1, 0x53, 0x68, 0xe1, 0x0b, 0x8e, 0x23, 0x1f, 0xfb, - 0xfd, 0xba, 0x54, 0xb4, 0x3a, 0xe5, 0xd1, 0xd6, 0x5e, 0x8a, 0x57, 0xfe, 0xe5, 0xe4, 0x83, 0x67, - 0xd0, 0x2d, 0xa0, 0xd0, 0x3c, 0x54, 0x4f, 0x71, 0x96, 0x55, 0x71, 0x14, 0x91, 0x3d, 0x73, 0x83, - 0x44, 0x15, 0x58, 0xc7, 0x56, 0x97, 0x7f, 0x54, 0x9e, 0x18, 0xd6, 0x2e, 0x98, 0xfb, 0x49, 0x10, - 0xe4, 0x8c, 0x3e, 0x61, 0x19, 0xa3, 0x4f, 0xd8, 0x24, 0xca, 0x95, 0x2b, 0xa3, 0xfc, 0x83, 0x01, - 0x0b, 0x7b, 0x67, 0x38, 0xe2, 0xaf, 0x28, 0x27, 0x43, 0xe2, 0xb9, 0x9c, 0xd0, 0x08, 0x3d, 0x04, - 0x93, 0x06, 0xbe, 0x73, 0x65, 0x9a, 0x5a, 0x34, 0x48, 0xad, 0x7e, 0x08, 0x66, 0x84, 0xcf, 0x9d, - 0x2b, 0xd5, 0xb5, 0x22, 0x7c, 0xae, 0xa8, 0x37, 0xa0, 0xeb, 0xe3, 0x00, 0x73, 0xec, 0xe4, 0xd9, - 0x11, 0xa9, 0xeb, 0x28, 0xe0, 0x8e, 0x4a, 0xc7, 0x7d, 0xb8, 0x29, 0x44, 0x8e, 0x5d, 0x86, 0x23, - 0xee, 0x8c, 0x5d, 0x7e, 0x22, 0x73, 0x62, 0xda, 0xdd, 0x08, 0x9f, 0xbf, 0x91, 0xd0, 0x37, 0x2e, - 0x3f, 0xb1, 0x7e, 0x33, 0xc0, 0xcc, 0x93, 0x89, 0x6e, 0x43, 0x53, 0xa8, 0x75, 0x88, 0x9f, 0x46, - 0xa2, 0x21, 0xae, 0x07, 0xbe, 0xe8, 0x0a, 0x3a, 0x1c, 0xc6, 0x98, 0x4b, 0xf3, 0xaa, 0x76, 0x7a, - 0x13, 0x95, 0x15, 0x93, 0xaf, 0x54, 0x23, 0xd4, 0x6c, 0x79, 0x16, 0x11, 0x0f, 0x39, 0x09, 0xb1, - 0x54, 0x58, 0xb5, 0xd5, 0x05, 0xf5, 0xa0, 0x8e, 0x1d, 0xee, 0x8e, 0x64, 0x85, 0x9b, 0x76, 0x0d, - 0xbf, 0x75, 0x47, 0xe8, 0xcf, 0x70, 0x23, 0xa6, 0x09, 0xf3, 0xb0, 0x93, 0xa9, 0x6d, 0x48, 0x6c, - 0x47, 0x41, 0xf7, 0x95, 0x72, 0x0b, 0xaa, 0x43, 0xe2, 0xf7, 0x9b, 0x32, 0x30, 0xf3, 0xc5, 0x22, - 0x3c, 0xf0, 0x6d, 0x81, 0x44, 0x7f, 0x03, 0xc8, 0x25, 0xf9, 0xfd, 0xd6, 0x25, 0xa4, 0x66, 0x26, - 0xd7, 0xb7, 0xfe, 0x07, 0x8d, 0x54, 0xfc, 0x32, 0x98, 0x67, 0x34, 0x48, 0xc2, 0xdc, 0xed, 0xae, - 0xdd, 0x52, 0x80, 0x03, 0x1f, 0xdd, 0x01, 0x39, 0xe7, 0x1c, 0x51, 0x55, 0x15, 0xe9, 0xa4, 0x8c, - 0xd0, 0x7f, 0xb0, 0x9c, 0x14, 0x1e, 0xa5, 0xa7, 0x44, 0x79, 0xdf, 0xb4, 0xd3, 0x9b, 0xf5, 0x6b, - 0x05, 0x6e, 0x14, 0xcb, 0x5d, 0xa8, 0x90, 0x52, 0x64, 0xac, 0x0c, 0x29, 0x46, 0x8a, 0x3d, 0x2a, - 0xc4, 0xab, 0xa2, 0xc7, 0x2b, 0x63, 0x09, 0xa9, 0xaf, 0x14, 0x74, 0x15, 0xcb, 0x4b, 0xea, 0x63, - 0x51, 0xad, 0x09, 0xf1, 0x65, 0x80, 0xbb, 0xb6, 0x38, 0x0a, 0xc8, 0x88, 0xf8, 0xe9, 0xf8, 0x10, - 0x47, 0x69, 0x1e, 0x93, 0x72, 0x1b, 0x2a, 0x65, 0xea, 0x26, 0x52, 0x16, 0x0a, 0x68, 0x53, 0xe5, - 0x41, 0x9c, 0xd1, 0x3a, 0xb4, 0x19, 0x1e, 0x07, 0x69, 0xf5, 0xca, 0xf0, 0x99, 0xb6, 0x0e, 0x42, - 0x6b, 0x00, 0x1e, 0x0d, 0x02, 0xec, 0x49, 0x02, 0x53, 0x12, 0x68, 0x10, 0x51, 0x39, 0x9c, 0x07, - 0x4e, 0x8c, 0xbd, 0x3e, 0xac, 0x1b, 0x9b, 0x75, 0xbb, 0xc1, 0x79, 0x70, 0x84, 0x3d, 0xe1, 0x47, - 0x12, 0x63, 0xe6, 0xc8, 0x01, 0xd4, 0x96, 0x7c, 0x2d, 0x01, 0x90, 0x63, 0x72, 0x15, 0x60, 0xc4, - 0x68, 0x32, 0x56, 0xd8, 0xce, 0x7a, 0x55, 0xcc, 0x62, 0x09, 0x91, 0xe8, 0x7b, 0x70, 0x23, 0x7e, - 0x1f, 0x06, 0x24, 0x3a, 0x75, 0xb8, 0xcb, 0x46, 0x98, 0xf7, 0xbb, 0xaa, 0x86, 0x53, 0xe8, 0x5b, - 0x09, 0xb4, 0xc6, 0x80, 0x76, 0x18, 0x76, 0x39, 0xfe, 0x88, 0xb5, 0xf3, 0x61, 0xdd, 0x8d, 0x96, - 0xa0, 0x41, 0x1d, 0x7c, 0xe1, 0x05, 0x69, 0x93, 0xd5, 0xe9, 0xde, 0x85, 0x17, 0x58, 0x0f, 0xa0, - 0x57, 0xd0, 0x98, 0x0e, 0xe6, 0x45, 0xa8, 0x63, 0xc6, 0x68, 0x36, 0x46, 0xd4, 0xc5, 0xfa, 0x3f, - 0xa0, 0x77, 0x63, 0xff, 0x53, 0x98, 0x67, 0x2d, 0x41, 0xaf, 0x20, 0x5a, 0xd9, 0x61, 0xfd, 0x6c, - 0x00, 0xda, 0x95, 0xd3, 0xe0, 0x8f, 0x2d, 0x62, 0xd1, 0x9f, 0x62, 0x49, 0xa8, 0x69, 0xe3, 0xbb, - 0xdc, 0x4d, 0x57, 0x58, 0x87, 0xc4, 0x4a, 0xfe, 0xae, 0xcb, 0xdd, 0x74, 0x95, 0x30, 0xec, 0x25, - 0x4c, 0x6c, 0x35, 0x59, 0x84, 0x72, 0x95, 0xd8, 0x19, 0x08, 0x3d, 0x86, 0x5b, 0x64, 0x14, 0x51, - 0x86, 0x27, 0x64, 0x8e, 0x0a, 0x55, 0x43, 0x12, 0x2f, 0x2a, 0x6c, 0xce, 0xb0, 0x27, 0x23, 0xb7, - 0x04, 0xbd, 0x82, 0x1b, 0xa9, 0x7b, 0xdf, 0x1a, 0xd0, 0x7f, 0xce, 0x69, 0x48, 0x3c, 0x1b, 0x0b, - 0x33, 0x0b, 0x4e, 0x6e, 0x40, 0x57, 0x4c, 0xde, 0x69, 0x47, 0x3b, 0x34, 0xf0, 0x27, 0x9b, 0xed, - 0x0e, 0x88, 0xe1, 0xeb, 0x68, 0xfe, 0x36, 0x69, 0xe0, 0xcb, 0x9a, 0xdb, 0x00, 0x31, 0x21, 0x35, - 0x7e, 0xb5, 0xe3, 0x3b, 0x11, 0x3e, 0x2f, 0xf0, 0x0b, 0x22, 0xc9, 0xaf, 0xc6, 0x6a, 0x33, 0xc2, - 0xe7, 0x82, 0xdf, 0x5a, 0x86, 0x3b, 0x33, 0x6c, 0x4b, 0x2d, 0xff, 0xde, 0x80, 0xde, 0xf3, 0x38, - 0x26, 0xa3, 0xe8, 0xbf, 0x72, 0xc0, 0x64, 0x46, 0x2f, 0x42, 0xdd, 0xa3, 0x49, 0xc4, 0xa5, 0xb1, - 0x75, 0x5b, 0x5d, 0xa6, 0x7a, 0xae, 0x52, 0xea, 0xb9, 0xa9, 0xae, 0xad, 0x96, 0xbb, 0x56, 0xeb, - 0xca, 0x5a, 0xa1, 0x2b, 0xff, 0x04, 0x6d, 0x91, 0x4e, 0xc7, 0xc3, 0x11, 0xc7, 0x2c, 0x9d, 0xc9, - 0x20, 0x40, 0x3b, 0x12, 0x62, 0x7d, 0x6d, 0xc0, 0x62, 0xd1, 0xd2, 0xb4, 0xc6, 0x2f, 0x5d, 0x11, - 0x62, 0x26, 0xb1, 0x20, 0x35, 0x53, 0x1c, 0x45, 0x77, 0x8f, 0x93, 0xe3, 0x80, 0x78, 0x8e, 0x40, - 0x28, 0xf3, 0x4c, 0x05, 0x79, 0xc7, 0x82, 0x89, 0xd3, 0x35, 0xdd, 0x69, 0x04, 0x35, 0x37, 0xe1, - 0x27, 0xd9, 0x9a, 0x10, 0x67, 0xeb, 0x31, 0xf4, 0xd4, 0x7b, 0xb0, 0x18, 0xb5, 0x55, 0x80, 0x7c, - 0x70, 0xc7, 0x7d, 0x43, 0x4d, 0x8f, 0x6c, 0x72, 0xc7, 0xd6, 0xbf, 0xc0, 0x3c, 0xa4, 0x2a, 0x10, - 0x31, 0x7a, 0x04, 0x66, 0x90, 0x5d, 0x24, 0x69, 0x7b, 0x1b, 0x4d, 0x9a, 0x2a, 0xa3, 0xb3, 0x27, - 0x44, 0xd6, 0x33, 0x68, 0x65, 0xe0, 0xcc, 0x37, 0xe3, 0x32, 0xdf, 0x2a, 0x53, 0xbe, 0x59, 0x3f, - 0x19, 0xb0, 0x58, 0x34, 0x39, 0x0d, 0xdf, 0x3b, 0xe8, 0xe6, 0x2a, 0x9c, 0xd0, 0x1d, 0xa7, 0xb6, - 0x3c, 0xd2, 0x6d, 0x29, 0xb3, 0xe5, 0x06, 0xc6, 0x2f, 0xdd, 0xb1, 0x2a, 0xa9, 0x4e, 0xa0, 0x81, - 0x06, 0x6f, 0x61, 0xa1, 0x44, 0x32, 0xe3, 0x31, 0xf4, 0x57, 0xfd, 0x31, 0x54, 0x78, 0xd0, 0xe5, - 0xdc, 0xfa, 0x0b, 0xe9, 0x29, 0xdc, 0x56, 0xfd, 0xb7, 0x93, 0x17, 0x5d, 0x16, 0xfb, 0x62, 0x6d, - 0x1a, 0xd3, 0xb5, 0x69, 0x0d, 0xa0, 0x5f, 0x66, 0x4d, 0xbb, 0x60, 0x04, 0x0b, 0x47, 0xdc, 0xe5, - 0x24, 0xe6, 0xc4, 0xcb, 0x5f, 0xe5, 0x53, 0xc5, 0x6c, 0x5c, 0xb7, 0x82, 0xca, 0xed, 0x30, 0x0f, - 0x55, 0xce, 0xb3, 0x3a, 0x13, 0x47, 0x91, 0x05, 0xa4, 0x6b, 0x4a, 0x73, 0xf0, 0x09, 0x54, 0x89, - 0x7a, 0xe0, 0x94, 0xbb, 0x81, 0x5a, 0xf1, 0x35, 0xb9, 0xe2, 0x4d, 0x09, 0x91, 0x3b, 0x5e, 0x6d, - 0x41, 0x5f, 0x61, 0xeb, 0xea, 0x01, 0x20, 0x00, 0x12, 0xb9, 0x0a, 0x20, 0x5b, 0x4a, 0x75, 0x43, - 0x43, 0xf1, 0x0a, 0xc8, 0x8e, 0x00, 0x58, 0x6b, 0xb0, 0xf2, 0x6f, 0xcc, 0xc5, 0x63, 0x85, 0xed, - 0xd0, 0x68, 0x48, 0x46, 0x09, 0x73, 0xb5, 0x54, 0x58, 0xdf, 0x18, 0xb0, 0x7a, 0x09, 0x41, 0xea, - 0x70, 0x1f, 0x9a, 0xa1, 0x1b, 0x73, 0xcc, 0xb2, 0x2e, 0xc9, 0xae, 0xd3, 0xa1, 0xa8, 0x5c, 0x17, - 0x8a, 0x6a, 0x29, 0x14, 0x4b, 0xd0, 0x08, 0xdd, 0x0b, 0x27, 0x3c, 0x4e, 0x5f, 0x23, 0xf5, 0xd0, - 0xbd, 0x78, 0x79, 0xbc, 0xfd, 0x4b, 0x13, 0x3a, 0x47, 0xd8, 0x3d, 0xc7, 0xd8, 0x97, 0x86, 0xa1, - 0x51, 0xd6, 0x10, 0xc5, 0x6f, 0x3a, 0x74, 0x6f, 0xba, 0xf2, 0x67, 0x7e, 0x44, 0x0e, 0xee, 0x5f, - 0x47, 0x96, 0xd6, 0xd6, 0x1c, 0x7a, 0x05, 0x6d, 0xed, 0xa3, 0x09, 0xad, 0x68, 0x8c, 0xa5, 0x6f, - 0xc1, 0xc1, 0xea, 0x25, 0xd8, 0x4c, 0xda, 0x23, 0x03, 0x1d, 0x42, 0x5b, 0xdb, 0xf5, 0xba, 0xbc, - 0xf2, 0xa3, 0x43, 0x97, 0x37, 0xe3, 0x81, 0x60, 0xcd, 0x09, 0x69, 0xda, 0xc6, 0xd6, 0xa5, 0x95, - 0xdf, 0x08, 0xba, 0xb4, 0x59, 0x6b, 0x5e, 0x4a, 0xd3, 0x16, 0xa4, 0x2e, 0xad, 0xbc, 0xfe, 0x75, - 0x69, 0xb3, 0xb6, 0xea, 0x1c, 0xfa, 0x1c, 0x16, 0x4a, 0xab, 0x0b, 0x59, 0x13, 0xae, 0xcb, 0x76, - 0xee, 0x60, 0xe3, 0x4a, 0x9a, 0x5c, 0xfe, 0x6b, 0xe8, 0xe8, 0x2b, 0x05, 0x69, 0x06, 0xcd, 0x58, - 0x8a, 0x83, 0xb5, 0xcb, 0xd0, 0xba, 0x40, 0x7d, 0x5a, 0xea, 0x02, 0x67, 0xec, 0x0b, 0x5d, 0xe0, - 0xac, 0x21, 0x6b, 0xcd, 0xa1, 0xcf, 0x60, 0x7e, 0x7a, 0x6a, 0xa1, 0xbb, 0xd3, 0x61, 0x2b, 0x0d, - 0xc3, 0x81, 0x75, 0x15, 0x49, 0x2e, 0xfc, 0x00, 0x60, 0x32, 0x8c, 0xd0, 0xf2, 0x84, 0xa7, 0x34, - 0x0c, 0x07, 0x2b, 0xb3, 0x91, 0xb9, 0xa8, 0x2f, 0x60, 0x69, 0x66, 0xc7, 0x23, 0xad, 0x4d, 0xae, - 0x9a, 0x19, 0x83, 0xbf, 0x5c, 0x4b, 0x97, 0xe9, 0x7a, 0xb1, 0x06, 0xf3, 0xb1, 0x6a, 0xe4, 0x61, - 0xbc, 0xe5, 0x05, 0x04, 0x47, 0xfc, 0x05, 0x48, 0x8e, 0x37, 0x8c, 0x72, 0x7a, 0xdc, 0x90, 0xbf, - 0x83, 0xfe, 0xfe, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xce, 0x15, 0x02, 0x1d, 0x12, 0x00, - 0x00, + // 1655 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x4b, 0x6f, 0x1b, 0x47, + 0x12, 0xd6, 0xf0, 0x3d, 0x45, 0xd2, 0x96, 0x9a, 0x92, 0x4d, 0x53, 0x8f, 0x95, 0x47, 0x6b, 0xaf, + 0x16, 0x36, 0xb4, 0x86, 0xd6, 0x07, 0x7b, 0xbd, 0x7b, 0xb0, 0xf5, 0x58, 0x08, 0x2b, 0x3f, 0x30, + 0xb2, 0x17, 0xbb, 0x08, 0x90, 0xc1, 0x68, 0xa6, 0x49, 0x75, 0x34, 0x9c, 0x66, 0xba, 0x7b, 0x24, + 0x39, 0x3f, 0x21, 0x3f, 0x21, 0x40, 0xce, 0xf9, 0x01, 0xb9, 0x06, 0xb9, 0x04, 0x41, 0xfe, 0x4d, + 0x8e, 0x39, 0x07, 0xdd, 0x3d, 0x33, 0xec, 0x21, 0x29, 0xc9, 0x46, 0xe0, 0xdb, 0x74, 0xbd, 0xba, + 0xaa, 0xba, 0xea, 0xab, 0x22, 0xa1, 0xd9, 0x27, 0x11, 0x66, 0x5b, 0x23, 0x46, 0x05, 0x45, 0x0d, + 0x75, 0xf0, 0x46, 0xc7, 0xce, 0x6b, 0x58, 0x3e, 0xa4, 0xf4, 0x34, 0x19, 0xed, 0x12, 0x86, 0x03, + 0x41, 0xd9, 0xfb, 0xbd, 0x58, 0xb0, 0xf7, 0x2e, 0xfe, 0x32, 0xc1, 0x5c, 0xa0, 0x15, 0xb0, 0xc3, + 0x8c, 0xd1, 0xb5, 0xd6, 0xad, 0x4d, 0xdb, 0x1d, 0x13, 0x10, 0x82, 0x4a, 0xec, 0x0f, 0x71, 0xb7, + 0xa4, 0x18, 0xea, 0xdb, 0xd9, 0x83, 0x95, 0xd9, 0x06, 0xf9, 0x88, 0xc6, 0x1c, 0xa3, 0x7b, 0x50, + 0xc5, 0x92, 0xa0, 0xac, 0x35, 0xb7, 0x6f, 0x6e, 0x65, 0xae, 0x6c, 0x69, 0x39, 0xcd, 0x75, 0x7e, + 0xb4, 0x00, 0x1d, 0x12, 0x2e, 0x24, 0x91, 0x60, 0xfe, 0x61, 0xfe, 0xdc, 0x82, 0xda, 0x88, 0xe1, + 0x3e, 0xb9, 0x48, 0x3d, 0x4a, 0x4f, 0xe8, 0x21, 0x2c, 0x70, 0xe1, 0x33, 0xb1, 0xcf, 0xe8, 0x70, + 0x9f, 0x44, 0xf8, 0x95, 0x74, 0xba, 0xac, 0x44, 0xa6, 0x19, 0x68, 0x0b, 0x10, 0x89, 0x83, 0x28, + 0xe1, 0xe4, 0x0c, 0x1f, 0x65, 0xdc, 0x6e, 0x65, 0xdd, 0xda, 0x6c, 0xb8, 0x33, 0x38, 0x68, 0x11, + 0xaa, 0x11, 0x19, 0x12, 0xd1, 0xad, 0xae, 0x5b, 0x9b, 0x6d, 0x57, 0x1f, 0x9c, 0x7f, 0x42, 0xa7, + 0xe0, 0xff, 0xc7, 0x85, 0xff, 0x6d, 0x09, 0xaa, 0x8a, 0x90, 0xe7, 0xd8, 0x1a, 0xe7, 0x18, 0xdd, + 0x85, 0x16, 0xe1, 0xde, 0x38, 0x11, 0x25, 0xe5, 0x5b, 0x93, 0xf0, 0x3c, 0xe7, 0xe8, 0x01, 0xd4, + 0x82, 0x93, 0x24, 0x3e, 0xe5, 0xdd, 0xf2, 0x7a, 0x79, 0xb3, 0xb9, 0xdd, 0x19, 0x5f, 0x24, 0x03, + 0xdd, 0x91, 0x3c, 0x37, 0x15, 0x41, 0x4f, 0x00, 0x7c, 0x21, 0x18, 0x39, 0x4e, 0x04, 0xe6, 0x2a, + 0xd2, 0xe6, 0x76, 0xd7, 0x50, 0x48, 0x38, 0x7e, 0x9e, 0xf3, 0x5d, 0x43, 0x16, 0x3d, 0x85, 0x06, + 0xbe, 0x10, 0x38, 0x0e, 0x71, 0xd8, 0xad, 0xaa, 0x8b, 0x56, 0x27, 0x22, 0xda, 0xda, 0x4b, 0xf9, + 0x3a, 0xbe, 0x5c, 0xbc, 0xf7, 0x0c, 0xda, 0x05, 0x16, 0x9a, 0x87, 0xf2, 0x29, 0xce, 0x5e, 0x55, + 0x7e, 0xca, 0xcc, 0x9e, 0xf9, 0x51, 0xa2, 0x0b, 0xac, 0xe5, 0xea, 0xc3, 0x3f, 0x4a, 0x4f, 0x2c, + 0x67, 0x17, 0xec, 0xfd, 0x24, 0x8a, 0x72, 0xc5, 0x90, 0xb0, 0x4c, 0x31, 0x24, 0x6c, 0x9c, 0xe5, + 0xd2, 0x95, 0x59, 0xfe, 0xc1, 0x82, 0x85, 0xbd, 0x33, 0x1c, 0x8b, 0x57, 0x54, 0x90, 0x3e, 0x09, + 0x7c, 0x41, 0x68, 0x8c, 0x1e, 0x82, 0x4d, 0xa3, 0xd0, 0xbb, 0xf2, 0x99, 0x1a, 0x34, 0x4a, 0xbd, + 0x7e, 0x08, 0x76, 0x8c, 0xcf, 0xbd, 0x2b, 0xaf, 0x6b, 0xc4, 0xf8, 0x5c, 0x4b, 0x6f, 0x40, 0x3b, + 0xc4, 0x11, 0x16, 0xd8, 0xcb, 0x5f, 0x47, 0x3e, 0x5d, 0x4b, 0x13, 0x77, 0xf4, 0x73, 0xdc, 0x87, + 0x9b, 0xd2, 0xe4, 0xc8, 0x67, 0x38, 0x16, 0xde, 0xc8, 0x17, 0x27, 0xea, 0x4d, 0x6c, 0xb7, 0x1d, + 0xe3, 0xf3, 0x37, 0x8a, 0xfa, 0xc6, 0x17, 0x27, 0xce, 0x6f, 0x16, 0xd8, 0xf9, 0x63, 0xa2, 0xdb, + 0x50, 0x97, 0xd7, 0x7a, 0x24, 0x4c, 0x33, 0x51, 0x93, 0xc7, 0x83, 0x50, 0x76, 0x05, 0xed, 0xf7, + 0x39, 0x16, 0xca, 0xbd, 0xb2, 0x9b, 0x9e, 0x64, 0x65, 0x71, 0xf2, 0x95, 0x6e, 0x84, 0x8a, 0xab, + 0xbe, 0x65, 0xc6, 0x87, 0x82, 0x0c, 0xb1, 0xba, 0xb0, 0xec, 0xea, 0x03, 0xea, 0x40, 0x15, 0x7b, + 0xc2, 0x1f, 0xa8, 0x0a, 0xb7, 0xdd, 0x0a, 0x7e, 0xeb, 0x0f, 0xd0, 0x9f, 0xe1, 0x06, 0xa7, 0x09, + 0x0b, 0xb0, 0x97, 0x5d, 0x5b, 0x53, 0xdc, 0x96, 0xa6, 0xee, 0xeb, 0xcb, 0x1d, 0x28, 0xf7, 0x49, + 0xd8, 0xad, 0xab, 0xc4, 0xcc, 0x17, 0x8b, 0xf0, 0x20, 0x74, 0x25, 0x13, 0xfd, 0x0d, 0x20, 0xb7, + 0x14, 0x76, 0x1b, 0x97, 0x88, 0xda, 0x99, 0xdd, 0xd0, 0xf9, 0x1f, 0xd4, 0x52, 0xf3, 0xcb, 0x60, + 0x9f, 0xd1, 0x28, 0x19, 0xe6, 0x61, 0xb7, 0xdd, 0x86, 0x26, 0x1c, 0x84, 0xe8, 0x0e, 0x28, 0x9c, + 0xf3, 0x64, 0x55, 0x95, 0x54, 0x90, 0x2a, 0x43, 0xff, 0xc1, 0x0a, 0x29, 0x02, 0x4a, 0x4f, 0x89, + 0x8e, 0xbe, 0xee, 0xa6, 0x27, 0xe7, 0xd7, 0x12, 0xdc, 0x28, 0x96, 0xbb, 0xbc, 0x42, 0x59, 0x51, + 0xb9, 0xb2, 0x94, 0x19, 0x65, 0xf6, 0xa8, 0x90, 0xaf, 0x92, 0x99, 0xaf, 0x4c, 0x65, 0x48, 0x43, + 0x7d, 0x41, 0x5b, 0xab, 0xbc, 0xa4, 0x21, 0x96, 0xd5, 0x9a, 0x90, 0x50, 0x25, 0xb8, 0xed, 0xca, + 0x4f, 0x49, 0x19, 0x90, 0x30, 0x85, 0x0f, 0xf9, 0xa9, 0xdc, 0x63, 0xca, 0x6e, 0x4d, 0x3f, 0x99, + 0x3e, 0xc9, 0x27, 0x1b, 0x4a, 0x6a, 0x5d, 0xbf, 0x83, 0xfc, 0x46, 0xeb, 0xd0, 0x64, 0x78, 0x14, + 0xa5, 0xd5, 0xab, 0xd2, 0x67, 0xbb, 0x26, 0x09, 0xad, 0x01, 0x04, 0x34, 0x8a, 0x70, 0xa0, 0x04, + 0x6c, 0x25, 0x60, 0x50, 0x64, 0xe5, 0x08, 0x11, 0x79, 0x1c, 0x07, 0x5d, 0x58, 0xb7, 0x36, 0xab, + 0x6e, 0x4d, 0x88, 0xe8, 0x08, 0x07, 0x32, 0x8e, 0x84, 0x63, 0xe6, 0x29, 0x00, 0x6a, 0x2a, 0xbd, + 0x86, 0x24, 0x28, 0x98, 0x5c, 0x05, 0x18, 0x30, 0x9a, 0x8c, 0x34, 0xb7, 0xb5, 0x5e, 0x96, 0x58, + 0xac, 0x28, 0x8a, 0x7d, 0x0f, 0x6e, 0xf0, 0xf7, 0xc3, 0x88, 0xc4, 0xa7, 0x9e, 0xf0, 0xd9, 0x00, + 0x8b, 0x6e, 0x5b, 0xd7, 0x70, 0x4a, 0x7d, 0xab, 0x88, 0xce, 0x08, 0xd0, 0x0e, 0xc3, 0xbe, 0xc0, + 0x1f, 0x31, 0x76, 0x3e, 0xac, 0xbb, 0xd1, 0x12, 0xd4, 0xa8, 0x87, 0x2f, 0x82, 0x28, 0x6d, 0xb2, + 0x2a, 0xdd, 0xbb, 0x08, 0x22, 0xe7, 0x01, 0x74, 0x0a, 0x37, 0xa6, 0xc0, 0xbc, 0x08, 0x55, 0xcc, + 0x18, 0xcd, 0x60, 0x44, 0x1f, 0x9c, 0xff, 0x03, 0x7a, 0x37, 0x0a, 0x3f, 0x85, 0x7b, 0xce, 0x12, + 0x74, 0x0a, 0xa6, 0xb5, 0x1f, 0xce, 0xcf, 0x16, 0xa0, 0x5d, 0x85, 0x06, 0x7f, 0x6c, 0x10, 0xcb, + 0xfe, 0x94, 0x43, 0x42, 0xa3, 0x4d, 0xe8, 0x0b, 0x3f, 0x1d, 0x61, 0x2d, 0xc2, 0xb5, 0xfd, 0x5d, + 0x5f, 0xf8, 0xe9, 0x28, 0x61, 0x38, 0x48, 0x98, 0x9c, 0x6a, 0xaa, 0x08, 0xd5, 0x28, 0x71, 0x33, + 0x12, 0x7a, 0x0c, 0xb7, 0xc8, 0x20, 0xa6, 0x0c, 0x8f, 0xc5, 0x3c, 0x9d, 0xaa, 0x9a, 0x12, 0x5e, + 0xd4, 0xdc, 0x5c, 0x61, 0x4f, 0x65, 0x6e, 0x09, 0x3a, 0x85, 0x30, 0xd2, 0xf0, 0xbe, 0xb1, 0xa0, + 0xfb, 0x5c, 0xd0, 0x21, 0x09, 0x5c, 0x2c, 0xdd, 0x2c, 0x04, 0xb9, 0x01, 0x6d, 0x89, 0xbc, 0x93, + 0x81, 0xb6, 0x68, 0x14, 0x8e, 0x27, 0xdb, 0x1d, 0x90, 0xe0, 0xeb, 0x19, 0xf1, 0xd6, 0x69, 0x14, + 0xaa, 0x9a, 0xdb, 0x00, 0x89, 0x90, 0x86, 0xbe, 0x9e, 0xf1, 0xad, 0x18, 0x9f, 0x17, 0xf4, 0xa5, + 0x90, 0xd2, 0xd7, 0xb0, 0x5a, 0x8f, 0xf1, 0xb9, 0xd4, 0x77, 0x96, 0xe1, 0xce, 0x0c, 0xdf, 0x52, + 0xcf, 0xbf, 0xb3, 0xa0, 0xf3, 0x9c, 0x73, 0x32, 0x88, 0xff, 0xab, 0x00, 0x26, 0x73, 0x7a, 0x11, + 0xaa, 0x01, 0x4d, 0x62, 0xa1, 0x9c, 0xad, 0xba, 0xfa, 0x30, 0xd1, 0x73, 0xa5, 0xa9, 0x9e, 0x9b, + 0xe8, 0xda, 0xf2, 0x74, 0xd7, 0x1a, 0x5d, 0x59, 0x29, 0x74, 0xe5, 0x9f, 0xa0, 0x29, 0x9f, 0xd3, + 0x0b, 0x70, 0x2c, 0x30, 0x4b, 0x31, 0x19, 0x24, 0x69, 0x47, 0x51, 0x9c, 0xaf, 0x2d, 0x58, 0x2c, + 0x7a, 0x9a, 0xd6, 0xf8, 0xa5, 0x23, 0x42, 0x62, 0x12, 0x8b, 0x52, 0x37, 0xe5, 0xa7, 0xec, 0xee, + 0x51, 0x72, 0x1c, 0x91, 0xc0, 0x93, 0x0c, 0xed, 0x9e, 0xad, 0x29, 0xef, 0x58, 0x34, 0x0e, 0xba, + 0x62, 0x06, 0x8d, 0xa0, 0xe2, 0x27, 0xe2, 0x24, 0x1b, 0x13, 0xf2, 0xdb, 0x79, 0x0c, 0x1d, 0xbd, + 0x0f, 0x16, 0xb3, 0xb6, 0x0a, 0x90, 0x03, 0x37, 0xef, 0x5a, 0x1a, 0x3d, 0x32, 0xe4, 0xe6, 0xce, + 0xbf, 0xc0, 0x3e, 0xa4, 0x3a, 0x11, 0x1c, 0x3d, 0x02, 0x3b, 0xca, 0x0e, 0x4a, 0xb4, 0xb9, 0x8d, + 0xc6, 0x4d, 0x95, 0xc9, 0xb9, 0x63, 0x21, 0xe7, 0x19, 0x34, 0x32, 0x72, 0x16, 0x9b, 0x75, 0x59, + 0x6c, 0xa5, 0x89, 0xd8, 0x9c, 0x9f, 0x2c, 0x58, 0x2c, 0xba, 0x9c, 0xa6, 0xef, 0x1d, 0xb4, 0xf3, + 0x2b, 0xbc, 0xa1, 0x3f, 0x4a, 0x7d, 0x79, 0x64, 0xfa, 0x32, 0xad, 0x96, 0x3b, 0xc8, 0x5f, 0xfa, + 0x23, 0x5d, 0x52, 0xad, 0xc8, 0x20, 0xf5, 0xde, 0xc2, 0xc2, 0x94, 0xc8, 0x8c, 0x65, 0xe8, 0xaf, + 0xe6, 0x32, 0x54, 0x58, 0xe8, 0x72, 0x6d, 0x73, 0x43, 0x7a, 0x0a, 0xb7, 0x75, 0xff, 0xed, 0xe4, + 0x45, 0x97, 0xe5, 0xbe, 0x58, 0x9b, 0xd6, 0x64, 0x6d, 0x3a, 0x3d, 0xe8, 0x4e, 0xab, 0xa6, 0x5d, + 0x30, 0x80, 0x85, 0x23, 0xe1, 0x0b, 0xc2, 0x05, 0x09, 0xf2, 0xad, 0x7c, 0xa2, 0x98, 0xad, 0xeb, + 0x46, 0xd0, 0x74, 0x3b, 0xcc, 0x43, 0x59, 0x88, 0xac, 0xce, 0xe4, 0xa7, 0x7c, 0x05, 0x64, 0xde, + 0x94, 0xbe, 0xc1, 0x27, 0xb8, 0x4a, 0xd6, 0x83, 0xa0, 0xc2, 0x8f, 0xf4, 0x88, 0xaf, 0xa8, 0x11, + 0x6f, 0x2b, 0x8a, 0x9a, 0xf1, 0x7a, 0x0a, 0x86, 0x9a, 0x5b, 0xd5, 0x0b, 0x80, 0x24, 0x28, 0xe6, + 0x2a, 0x80, 0x6a, 0x29, 0xdd, 0x0d, 0x35, 0xad, 0x2b, 0x29, 0x3b, 0x92, 0xe0, 0xac, 0xc1, 0xca, + 0xbf, 0xb1, 0x90, 0xcb, 0x0a, 0xdb, 0xa1, 0x71, 0x9f, 0x0c, 0x12, 0xe6, 0x1b, 0x4f, 0xe1, 0x7c, + 0x6f, 0xc1, 0xea, 0x25, 0x02, 0x69, 0xc0, 0x5d, 0xa8, 0x0f, 0x7d, 0x2e, 0x30, 0xcb, 0xba, 0x24, + 0x3b, 0x4e, 0xa6, 0xa2, 0x74, 0x5d, 0x2a, 0xca, 0x53, 0xa9, 0x58, 0x82, 0xda, 0xd0, 0xbf, 0xf0, + 0x86, 0xc7, 0xe9, 0x36, 0x52, 0x1d, 0xfa, 0x17, 0x2f, 0x8f, 0x15, 0xc0, 0x10, 0xe6, 0x1d, 0x27, + 0xc1, 0x29, 0x16, 0x3c, 0x07, 0x18, 0xc2, 0x5e, 0x68, 0xca, 0xf6, 0x2f, 0x75, 0x68, 0x1d, 0x61, + 0xff, 0x1c, 0xe3, 0x50, 0x79, 0x8e, 0x06, 0x59, 0xc7, 0x14, 0x7f, 0xf4, 0xa1, 0x7b, 0x93, 0xad, + 0x31, 0xf3, 0x57, 0x66, 0xef, 0xfe, 0x75, 0x62, 0x69, 0xf1, 0xcd, 0xa1, 0x57, 0xd0, 0x34, 0x7e, + 0x55, 0xa1, 0x15, 0x43, 0x71, 0xea, 0xc7, 0x62, 0x6f, 0xf5, 0x12, 0x6e, 0x66, 0xed, 0x91, 0x85, + 0x0e, 0xa1, 0x69, 0x2c, 0x03, 0xa6, 0xbd, 0xe9, 0xad, 0xc4, 0xb4, 0x37, 0x63, 0x83, 0x70, 0xe6, + 0xa4, 0x35, 0x63, 0xa4, 0x9b, 0xd6, 0xa6, 0x97, 0x08, 0xd3, 0xda, 0xac, 0x3d, 0x40, 0x59, 0x33, + 0x26, 0xa8, 0x69, 0x6d, 0x7a, 0x3f, 0x30, 0xad, 0xcd, 0x1a, 0xbb, 0x73, 0xe8, 0x73, 0x58, 0x98, + 0x9a, 0x6d, 0xc8, 0x19, 0x6b, 0x5d, 0x36, 0x94, 0x7b, 0x1b, 0x57, 0xca, 0xe4, 0xf6, 0x5f, 0x43, + 0xcb, 0x9c, 0x39, 0xc8, 0x70, 0x68, 0xc6, 0xd4, 0xec, 0xad, 0x5d, 0xc6, 0x36, 0x0d, 0x9a, 0x70, + 0x6a, 0x1a, 0x9c, 0x31, 0x50, 0x4c, 0x83, 0xb3, 0x50, 0xd8, 0x99, 0x43, 0x9f, 0xc1, 0xfc, 0x24, + 0xac, 0xa1, 0xbb, 0x93, 0x69, 0x9b, 0x42, 0xcb, 0x9e, 0x73, 0x95, 0x48, 0x6e, 0xfc, 0x00, 0x60, + 0x8c, 0x56, 0x68, 0x79, 0xac, 0x33, 0x85, 0x96, 0xbd, 0x95, 0xd9, 0xcc, 0xdc, 0xd4, 0x17, 0xb0, + 0x34, 0x13, 0x12, 0x90, 0xd1, 0x26, 0x57, 0x81, 0x4a, 0xef, 0x2f, 0xd7, 0xca, 0x65, 0x77, 0xbd, + 0x58, 0x83, 0x79, 0xae, 0x1b, 0xb9, 0xcf, 0xb7, 0x82, 0x88, 0xe0, 0x58, 0xbc, 0x00, 0xa5, 0xf1, + 0x86, 0x51, 0x41, 0x8f, 0x6b, 0xea, 0xff, 0xa2, 0xbf, 0xff, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x7f, + 0xd8, 0x6f, 0xb2, 0x3e, 0x12, 0x00, 0x00, } diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 03954a58c..98951c347 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -308,5 +308,6 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb. Collection: fs.option.Collection, Replication: fs.option.DefaultReplication, MaxMb: uint32(fs.option.MaxMB), + DirBuckets: fs.option.DirBucketsPath, }, nil } diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 72cca1f6f..51444a5b0 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -46,6 +46,7 @@ type FilerOption struct { DisableHttp bool Port int recursiveDelete bool + DirBucketsPath string } type FilerServer struct { From 5bcb44eda9b1dba57abf8cd9ce3b2d18518bd100 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 24 Feb 2020 14:42:57 -0800 Subject: [PATCH 0144/2432] filer: move buckets folder to filer.toml since it should not be changed often --- weed/command/filer.go | 3 --- weed/command/scaffold.go | 3 ++- weed/command/server.go | 1 - weed/server/filer_server.go | 1 + 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/weed/command/filer.go b/weed/command/filer.go index 0156fe1b9..ea8392fac 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -33,7 +33,6 @@ type FilerOptions struct { dataCenter *string enableNotification *bool disableHttp *bool - dirBucketsPath *string // default leveldb directory, used in "weed server" mode defaultLevelDbDirectory *string @@ -53,7 +52,6 @@ func init() { f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") - f.dirBucketsPath = cmdFiler.Flag.String("dir.buckets", "/buckets", "folder to store all buckets") } var cmdFiler = &Command{ @@ -111,7 +109,6 @@ func (fo *FilerOptions) startFiler() { DataCenter: *fo.dataCenter, DefaultLevelDbDir: defaultLevelDbDirectory, DisableHttp: *fo.disableHttp, - DirBucketsPath: *fo.dirBucketsPath, Port: *fo.port, }) if nfs_err != nil { diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index ab658735f..09f1cac6c 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -74,7 +74,8 @@ const ( # with http DELETE, by default the filer would check whether a folder is empty. # recursive_delete will delete all sub folders and files, similar to "rm -Rf" recursive_delete = false - +# directories under this folder will be automatically creating a separate bucket +buckets_folder = /buckets #################################################### # The following are filer store options diff --git a/weed/command/server.go b/weed/command/server.go index aa693618c..c9d27555c 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -82,7 +82,6 @@ func init() { filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") - filerOptions.dirBucketsPath = cmdServer.Flag.String("filer.dir.buckets", "/buckets", "folder to store all buckets") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 51444a5b0..55888a4a4 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -83,6 +83,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) util.LoadConfiguration("notification", false) fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") + fs.option.DirBucketsPath = v.GetString("filer.option.buckets_folder") fs.filer.LoadConfiguration(v) notification.LoadConfiguration(v, "notification.") From 6ab7368ef2556ef086d13c6d0d4454f1e98a5cd8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 24 Feb 2020 22:28:45 -0800 Subject: [PATCH 0145/2432] filer: dynamically create bucket under /buckets folder --- other/java/client/src/main/proto/filer.proto | 3 + weed/command/filer_copy.go | 75 ++++-- weed/command/mount_std.go | 2 +- weed/command/s3.go | 1 + weed/filer2/filer.go | 18 +- weed/filer2/filer_buckets.go | 113 ++++++++ weed/filer2/filer_delete_entry.go | 32 ++- weed/filer2/leveldb/leveldb_store_test.go | 4 +- weed/filer2/leveldb2/leveldb2_store_test.go | 4 +- weed/filesys/dirty_page.go | 12 +- weed/filesys/filehandle.go | 2 + weed/operation/assign_file_id.go | 6 +- weed/pb/filer.proto | 3 + weed/pb/filer_pb/filer.pb.go | 245 ++++++++++-------- .../replication/sink/filersink/fetch_write.go | 14 +- weed/replication/sink/filersink/filer_sink.go | 4 +- weed/s3api/s3api_object_handlers.go | 5 +- weed/server/filer_grpc_server.go | 23 +- weed/server/filer_server.go | 5 +- weed/server/filer_server_handlers_write.go | 38 ++- weed/server/webdav_server.go | 10 +- 21 files changed, 436 insertions(+), 183 deletions(-) create mode 100644 weed/filer2/filer_buckets.go diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 909458daf..6892effe8 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -165,6 +165,7 @@ message AssignVolumeRequest { string replication = 3; int32 ttl_sec = 4; string data_center = 5; + string parent_path = 6; } message AssignVolumeResponse { @@ -173,6 +174,8 @@ message AssignVolumeResponse { string public_url = 3; int32 count = 4; string auth = 5; + string collection = 6; + string replication = 7; } message LookupVolumeRequest { diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index e5979d786..a359bf32b 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -125,10 +125,6 @@ func runCopy(cmd *Command, args []string) bool { } copy.masters = masters - copy.masterClient = wdclient.NewMasterClient(ctx, copy.grpcDialOption, "client", copy.masters) - go copy.masterClient.KeepConnectedToMaster() - copy.masterClient.WaitUntilConnected() - if *cmdCopy.IsDebug { util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") } @@ -274,23 +270,35 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy mimeType := detectMimeType(f) var chunks []*filer_pb.FileChunk + var assignResult *filer_pb.AssignVolumeResponse + var assignError error if task.fileSize > 0 { // assign a volume - assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - Ttl: *worker.options.ttl, + err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + ParentPath: task.destinationUrlPath, + } + + assignResult, assignError = client.AssignVolume(ctx, request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + return nil }) if err != nil { fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) } - targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId - uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth, *worker.options.compressionLevel) + uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, security.EncodedJwt(assignResult.Auth), *worker.options.compressionLevel) if err != nil { return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } @@ -300,7 +308,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy fmt.Printf("uploaded %s to %s\n", fileName, targetUrl) chunks = append(chunks, &filer_pb.FileChunk{ - FileId: assignResult.Fid, + FileId: assignResult.FileId, Offset: 0, Size: uint64(uploadResult.Size), Mtime: time.Now().UnixNano(), @@ -352,6 +360,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks) var wg sync.WaitGroup var uploadError error + var collection, replication string fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount) for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ { @@ -363,22 +372,42 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC <-concurrentChunks }() // assign a volume - assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - Ttl: *worker.options.ttl, + var assignResult *filer_pb.AssignVolumeResponse + var assignError error + err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + ParentPath: task.destinationUrlPath, + } + + assignResult, assignError = client.AssignVolume(ctx, request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + return nil }) if err != nil { fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) } + if err != nil { + fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) + } - targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId + if collection == "" { + collection = assignResult.Collection + } + if replication == "" { + replication = assignResult.Replication + } uploadResult, err := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), io.NewSectionReader(f, i*chunkSize, chunkSize), - false, "", nil, assignResult.Auth) + false, "", nil, security.EncodedJwt(assignResult.Auth)) if err != nil { uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) return @@ -388,7 +417,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC return } chunksChan <- &filer_pb.FileChunk{ - FileId: assignResult.Fid, + FileId: assignResult.FileId, Offset: i * chunkSize, Size: uint64(uploadResult.Size), Mtime: time.Now().UnixNano(), @@ -410,7 +439,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC for _, chunk := range chunks { fileIds = append(fileIds, chunk.FileId) } - operation.DeleteFiles(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, fileIds) + operation.DeleteFiles(copy.masters[0], worker.options.grpcDialOption, fileIds) return uploadError } @@ -427,8 +456,8 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC FileSize: uint64(task.fileSize), FileMode: uint32(task.fileMode), Mime: mimeType, - Replication: *worker.options.replication, - Collection: *worker.options.collection, + Replication: replication, + Collection: collection, TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), }, Chunks: chunks, diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 891810e61..ba7fb852b 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -99,7 +99,7 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente fuse.VolumeName(mountName), fuse.FSName(filer + ":" + filerMountRootPath), fuse.Subtype("seaweedfs"), - fuse.NoAppleDouble(), + // fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders fuse.NoAppleXattr(), fuse.NoBrowse(), fuse.AutoXattr(), diff --git a/weed/command/s3.go b/weed/command/s3.go index c1ccca820..0eff76d32 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -134,6 +134,7 @@ func (s3opt *S3Options) startS3Server() bool { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) } filerBucketsPath = resp.DirBuckets + glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath) return nil }) if err != nil { diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 4db48e386..b84a67b22 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -30,9 +30,11 @@ type Filer struct { MasterClient *wdclient.MasterClient fileIdDeletionQueue *util.UnboundedQueue GrpcDialOption grpc.DialOption + DirBucketsPath string + buckets *FilerBuckets } -func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer { +func NewFiler(masters []string, grpcDialOption grpc.DialOption, bucketFolder string) *Filer { f := &Filer{ directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters), @@ -109,11 +111,13 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro dirEntry = &Entry{ FullPath: FullPath(dirPath), Attr: Attr{ - Mtime: now, - Crtime: now, - Mode: os.ModeDir | 0770, - Uid: entry.Uid, - Gid: entry.Gid, + Mtime: now, + Crtime: now, + Mode: os.ModeDir | 0770, + Uid: entry.Uid, + Gid: entry.Gid, + Collection: entry.Collection, + Replication: entry.Replication, }, } @@ -125,6 +129,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { + f.maybeAddBucket(dirEntry) f.NotifyUpdateEvent(nil, dirEntry, false) } @@ -175,6 +180,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro } } + f.maybeAddBucket(entry) f.NotifyUpdateEvent(oldEntry, entry, true) f.deleteChunksIfNotNew(oldEntry, entry) diff --git a/weed/filer2/filer_buckets.go b/weed/filer2/filer_buckets.go new file mode 100644 index 000000000..601b7dbf3 --- /dev/null +++ b/weed/filer2/filer_buckets.go @@ -0,0 +1,113 @@ +package filer2 + +import ( + "context" + "math" + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +type BucketName string +type BucketOption struct { + Name BucketName + Replication string +} +type FilerBuckets struct { + dirBucketsPath string + buckets map[BucketName]*BucketOption + sync.RWMutex +} + +func (f *Filer) LoadBuckets(dirBucketsPath string) { + + f.buckets = &FilerBuckets{ + buckets: make(map[BucketName]*BucketOption), + } + f.DirBucketsPath = dirBucketsPath + + limit := math.MaxInt32 + + entries, err := f.ListDirectoryEntries(context.Background(), FullPath(dirBucketsPath), "", false, limit) + + if err != nil { + glog.V(1).Infof("no buckets found: %v", err) + return + } + + glog.V(1).Infof("buckets found: %d", len(entries)) + + f.buckets.Lock() + for _, entry := range entries { + f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{ + Name: BucketName(entry.Name()), + Replication: entry.Replication, + } + } + f.buckets.Unlock() + +} + +func (f *Filer) ReadBucketOption(buketName string) (replication string) { + + f.buckets.RLock() + defer f.buckets.RUnlock() + + option, found := f.buckets.buckets[BucketName(buketName)] + + if !found { + return "" + } + return option.Replication + +} + +func (f *Filer) isBucket(entry *Entry) bool { + if !entry.IsDirectory() { + return false + } + parent, dirName := entry.FullPath.DirAndName() + if parent != f.DirBucketsPath { + return false + } + + f.buckets.RLock() + defer f.buckets.RUnlock() + + _, found := f.buckets.buckets[BucketName(dirName)] + + return found + +} + +func (f *Filer) maybeAddBucket(entry *Entry) { + if !entry.IsDirectory() { + return + } + parent, dirName := entry.FullPath.DirAndName() + if parent != f.DirBucketsPath { + return + } + f.addBucket(dirName, &BucketOption{ + Name: BucketName(dirName), + Replication: entry.Replication, + }) +} + +func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) { + + f.buckets.Lock() + defer f.buckets.Unlock() + + f.buckets.buckets[BucketName(buketName)] = bucketOption + +} + +func (f *Filer) deleteBucket(buketName string) { + + f.buckets.Lock() + defer f.buckets.Unlock() + + delete(f.buckets.buckets, BucketName(buketName)) + +} diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go index ab7119042..2d3654df6 100644 --- a/weed/filer2/filer_delete_entry.go +++ b/weed/filer2/filer_delete_entry.go @@ -6,6 +6,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" ) func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) { @@ -18,27 +19,35 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecurs return findErr } + isCollection := f.isBucket(entry) + var chunks []*filer_pb.FileChunk chunks = append(chunks, entry.Chunks...) if entry.IsDirectory() { // delete the folder children, not including the folder itself var dirChunks []*filer_pb.FileChunk - dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks) + dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection) if err != nil { + glog.V(0).Infof("delete directory %s: %v", p, err) return fmt.Errorf("delete directory %s: %v", p, err) } chunks = append(chunks, dirChunks...) - f.cacheDelDirectory(string(p)) } + // delete the file or folder err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks) if err != nil { return fmt.Errorf("delete file %s: %v", p, err) } - if shouldDeleteChunks { + if shouldDeleteChunks && !isCollection { go f.DeleteChunks(chunks) } + if isCollection { + collectionName := entry.Name() + f.doDeleteCollection(ctx, collectionName) + f.deleteBucket(collectionName) + } return nil } @@ -55,6 +64,9 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry } if lastFileName == "" && !isRecursive && len(entries) > 0 { // only for first iteration in the loop + for _, child := range entries { + println("existing children", child.Name()) + } return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath) } @@ -100,3 +112,17 @@ func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shou return nil } + +func (f *Filer) doDeleteCollection(ctx context.Context, collectionName string) (err error) { + + return f.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + Name: collectionName, + }) + if err != nil { + glog.Infof("delete collection %s: %v", collectionName, err) + } + return err + }) + +} diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 983e1cbe9..aaed5a8ae 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -9,7 +9,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, "") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDBStore{} @@ -64,7 +64,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, "") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDBStore{} diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go index 58637b7b6..e5146e8bd 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -9,7 +9,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, "") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDB2Store{} @@ -64,7 +64,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, "") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDB2Store{} diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 5ff128323..a4d9d1df9 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -15,9 +15,11 @@ import ( ) type ContinuousDirtyPages struct { - intervals *ContinuousIntervals - f *File - lock sync.Mutex + intervals *ContinuousIntervals + f *File + lock sync.Mutex + collection string + replication string } func newDirtyPages(file *File) *ContinuousDirtyPages { @@ -140,6 +142,8 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io. var fileId, host string var auth security.EncodedJwt + dir, _ := pages.f.fullpath().DirAndName() + if err := pages.f.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ @@ -148,6 +152,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io. Collection: pages.f.wfs.option.Collection, TtlSec: pages.f.wfs.option.TtlSec, DataCenter: pages.f.wfs.option.DataCenter, + ParentPath: dir, } resp, err := client.AssignVolume(ctx, request) @@ -157,6 +162,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io. } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) + pages.collection, pages.replication = resp.Collection, resp.Replication return nil }); err != nil { diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index cf253a7ed..71954d75d 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -178,6 +178,8 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { fh.f.entry.Attributes.Mtime = time.Now().Unix() fh.f.entry.Attributes.Crtime = time.Now().Unix() fh.f.entry.Attributes.FileMode = uint32(0777 &^ fh.f.wfs.option.Umask) + fh.f.entry.Attributes.Collection = fh.dirtyPages.collection + fh.f.entry.Attributes.Replication = fh.dirtyPages.replication } request := &filer_pb.CreateEntryRequest{ diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index b67d8b708..6da30605b 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -3,11 +3,13 @@ package operation import ( "context" "fmt" + "strings" + + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "strings" ) type VolumeAssignRequest struct { diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 909458daf..6892effe8 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -165,6 +165,7 @@ message AssignVolumeRequest { string replication = 3; int32 ttl_sec = 4; string data_center = 5; + string parent_path = 6; } message AssignVolumeResponse { @@ -173,6 +174,8 @@ message AssignVolumeResponse { string public_url = 3; int32 count = 4; string auth = 5; + string collection = 6; + string replication = 7; } message LookupVolumeRequest { diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 84869de8c..d34d66023 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -685,6 +685,7 @@ type AssignVolumeRequest struct { Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"` DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` + ParentPath string `protobuf:"bytes,6,opt,name=parent_path,json=parentPath" json:"parent_path,omitempty"` } func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } @@ -727,12 +728,21 @@ func (m *AssignVolumeRequest) GetDataCenter() string { return "" } +func (m *AssignVolumeRequest) GetParentPath() string { + if m != nil { + return m.ParentPath + } + return "" +} + type AssignVolumeResponse struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"` + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` + Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"` + Collection string `protobuf:"bytes,6,opt,name=collection" json:"collection,omitempty"` + Replication string `protobuf:"bytes,7,opt,name=replication" json:"replication,omitempty"` } func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } @@ -775,6 +785,20 @@ func (m *AssignVolumeResponse) GetAuth() string { return "" } +func (m *AssignVolumeResponse) GetCollection() string { + if m != nil { + return m.Collection + } + return "" +} + +func (m *AssignVolumeResponse) GetReplication() string { + if m != nil { + return m.Replication + } + return "" +} + type LookupVolumeRequest struct { VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` } @@ -1481,109 +1505,110 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1655 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x4b, 0x6f, 0x1b, 0x47, - 0x12, 0xd6, 0xf0, 0x3d, 0x45, 0xd2, 0x96, 0x9a, 0x92, 0x4d, 0x53, 0x8f, 0x95, 0x47, 0x6b, 0xaf, - 0x16, 0x36, 0xb4, 0x86, 0xd6, 0x07, 0x7b, 0xbd, 0x7b, 0xb0, 0xf5, 0x58, 0x08, 0x2b, 0x3f, 0x30, - 0xb2, 0x17, 0xbb, 0x08, 0x90, 0xc1, 0x68, 0xa6, 0x49, 0x75, 0x34, 0x9c, 0x66, 0xba, 0x7b, 0x24, - 0x39, 0x3f, 0x21, 0x3f, 0x21, 0x40, 0xce, 0xf9, 0x01, 0xb9, 0x06, 0xb9, 0x04, 0x41, 0xfe, 0x4d, - 0x8e, 0x39, 0x07, 0xdd, 0x3d, 0x33, 0xec, 0x21, 0x29, 0xc9, 0x46, 0xe0, 0xdb, 0x74, 0xbd, 0xba, - 0xaa, 0xba, 0xea, 0xab, 0x22, 0xa1, 0xd9, 0x27, 0x11, 0x66, 0x5b, 0x23, 0x46, 0x05, 0x45, 0x0d, - 0x75, 0xf0, 0x46, 0xc7, 0xce, 0x6b, 0x58, 0x3e, 0xa4, 0xf4, 0x34, 0x19, 0xed, 0x12, 0x86, 0x03, - 0x41, 0xd9, 0xfb, 0xbd, 0x58, 0xb0, 0xf7, 0x2e, 0xfe, 0x32, 0xc1, 0x5c, 0xa0, 0x15, 0xb0, 0xc3, - 0x8c, 0xd1, 0xb5, 0xd6, 0xad, 0x4d, 0xdb, 0x1d, 0x13, 0x10, 0x82, 0x4a, 0xec, 0x0f, 0x71, 0xb7, - 0xa4, 0x18, 0xea, 0xdb, 0xd9, 0x83, 0x95, 0xd9, 0x06, 0xf9, 0x88, 0xc6, 0x1c, 0xa3, 0x7b, 0x50, - 0xc5, 0x92, 0xa0, 0xac, 0x35, 0xb7, 0x6f, 0x6e, 0x65, 0xae, 0x6c, 0x69, 0x39, 0xcd, 0x75, 0x7e, - 0xb4, 0x00, 0x1d, 0x12, 0x2e, 0x24, 0x91, 0x60, 0xfe, 0x61, 0xfe, 0xdc, 0x82, 0xda, 0x88, 0xe1, - 0x3e, 0xb9, 0x48, 0x3d, 0x4a, 0x4f, 0xe8, 0x21, 0x2c, 0x70, 0xe1, 0x33, 0xb1, 0xcf, 0xe8, 0x70, - 0x9f, 0x44, 0xf8, 0x95, 0x74, 0xba, 0xac, 0x44, 0xa6, 0x19, 0x68, 0x0b, 0x10, 0x89, 0x83, 0x28, - 0xe1, 0xe4, 0x0c, 0x1f, 0x65, 0xdc, 0x6e, 0x65, 0xdd, 0xda, 0x6c, 0xb8, 0x33, 0x38, 0x68, 0x11, - 0xaa, 0x11, 0x19, 0x12, 0xd1, 0xad, 0xae, 0x5b, 0x9b, 0x6d, 0x57, 0x1f, 0x9c, 0x7f, 0x42, 0xa7, - 0xe0, 0xff, 0xc7, 0x85, 0xff, 0x6d, 0x09, 0xaa, 0x8a, 0x90, 0xe7, 0xd8, 0x1a, 0xe7, 0x18, 0xdd, - 0x85, 0x16, 0xe1, 0xde, 0x38, 0x11, 0x25, 0xe5, 0x5b, 0x93, 0xf0, 0x3c, 0xe7, 0xe8, 0x01, 0xd4, - 0x82, 0x93, 0x24, 0x3e, 0xe5, 0xdd, 0xf2, 0x7a, 0x79, 0xb3, 0xb9, 0xdd, 0x19, 0x5f, 0x24, 0x03, - 0xdd, 0x91, 0x3c, 0x37, 0x15, 0x41, 0x4f, 0x00, 0x7c, 0x21, 0x18, 0x39, 0x4e, 0x04, 0xe6, 0x2a, - 0xd2, 0xe6, 0x76, 0xd7, 0x50, 0x48, 0x38, 0x7e, 0x9e, 0xf3, 0x5d, 0x43, 0x16, 0x3d, 0x85, 0x06, - 0xbe, 0x10, 0x38, 0x0e, 0x71, 0xd8, 0xad, 0xaa, 0x8b, 0x56, 0x27, 0x22, 0xda, 0xda, 0x4b, 0xf9, - 0x3a, 0xbe, 0x5c, 0xbc, 0xf7, 0x0c, 0xda, 0x05, 0x16, 0x9a, 0x87, 0xf2, 0x29, 0xce, 0x5e, 0x55, - 0x7e, 0xca, 0xcc, 0x9e, 0xf9, 0x51, 0xa2, 0x0b, 0xac, 0xe5, 0xea, 0xc3, 0x3f, 0x4a, 0x4f, 0x2c, - 0x67, 0x17, 0xec, 0xfd, 0x24, 0x8a, 0x72, 0xc5, 0x90, 0xb0, 0x4c, 0x31, 0x24, 0x6c, 0x9c, 0xe5, - 0xd2, 0x95, 0x59, 0xfe, 0xc1, 0x82, 0x85, 0xbd, 0x33, 0x1c, 0x8b, 0x57, 0x54, 0x90, 0x3e, 0x09, - 0x7c, 0x41, 0x68, 0x8c, 0x1e, 0x82, 0x4d, 0xa3, 0xd0, 0xbb, 0xf2, 0x99, 0x1a, 0x34, 0x4a, 0xbd, - 0x7e, 0x08, 0x76, 0x8c, 0xcf, 0xbd, 0x2b, 0xaf, 0x6b, 0xc4, 0xf8, 0x5c, 0x4b, 0x6f, 0x40, 0x3b, - 0xc4, 0x11, 0x16, 0xd8, 0xcb, 0x5f, 0x47, 0x3e, 0x5d, 0x4b, 0x13, 0x77, 0xf4, 0x73, 0xdc, 0x87, - 0x9b, 0xd2, 0xe4, 0xc8, 0x67, 0x38, 0x16, 0xde, 0xc8, 0x17, 0x27, 0xea, 0x4d, 0x6c, 0xb7, 0x1d, - 0xe3, 0xf3, 0x37, 0x8a, 0xfa, 0xc6, 0x17, 0x27, 0xce, 0x6f, 0x16, 0xd8, 0xf9, 0x63, 0xa2, 0xdb, - 0x50, 0x97, 0xd7, 0x7a, 0x24, 0x4c, 0x33, 0x51, 0x93, 0xc7, 0x83, 0x50, 0x76, 0x05, 0xed, 0xf7, - 0x39, 0x16, 0xca, 0xbd, 0xb2, 0x9b, 0x9e, 0x64, 0x65, 0x71, 0xf2, 0x95, 0x6e, 0x84, 0x8a, 0xab, - 0xbe, 0x65, 0xc6, 0x87, 0x82, 0x0c, 0xb1, 0xba, 0xb0, 0xec, 0xea, 0x03, 0xea, 0x40, 0x15, 0x7b, - 0xc2, 0x1f, 0xa8, 0x0a, 0xb7, 0xdd, 0x0a, 0x7e, 0xeb, 0x0f, 0xd0, 0x9f, 0xe1, 0x06, 0xa7, 0x09, - 0x0b, 0xb0, 0x97, 0x5d, 0x5b, 0x53, 0xdc, 0x96, 0xa6, 0xee, 0xeb, 0xcb, 0x1d, 0x28, 0xf7, 0x49, - 0xd8, 0xad, 0xab, 0xc4, 0xcc, 0x17, 0x8b, 0xf0, 0x20, 0x74, 0x25, 0x13, 0xfd, 0x0d, 0x20, 0xb7, - 0x14, 0x76, 0x1b, 0x97, 0x88, 0xda, 0x99, 0xdd, 0xd0, 0xf9, 0x1f, 0xd4, 0x52, 0xf3, 0xcb, 0x60, - 0x9f, 0xd1, 0x28, 0x19, 0xe6, 0x61, 0xb7, 0xdd, 0x86, 0x26, 0x1c, 0x84, 0xe8, 0x0e, 0x28, 0x9c, - 0xf3, 0x64, 0x55, 0x95, 0x54, 0x90, 0x2a, 0x43, 0xff, 0xc1, 0x0a, 0x29, 0x02, 0x4a, 0x4f, 0x89, - 0x8e, 0xbe, 0xee, 0xa6, 0x27, 0xe7, 0xd7, 0x12, 0xdc, 0x28, 0x96, 0xbb, 0xbc, 0x42, 0x59, 0x51, - 0xb9, 0xb2, 0x94, 0x19, 0x65, 0xf6, 0xa8, 0x90, 0xaf, 0x92, 0x99, 0xaf, 0x4c, 0x65, 0x48, 0x43, - 0x7d, 0x41, 0x5b, 0xab, 0xbc, 0xa4, 0x21, 0x96, 0xd5, 0x9a, 0x90, 0x50, 0x25, 0xb8, 0xed, 0xca, - 0x4f, 0x49, 0x19, 0x90, 0x30, 0x85, 0x0f, 0xf9, 0xa9, 0xdc, 0x63, 0xca, 0x6e, 0x4d, 0x3f, 0x99, - 0x3e, 0xc9, 0x27, 0x1b, 0x4a, 0x6a, 0x5d, 0xbf, 0x83, 0xfc, 0x46, 0xeb, 0xd0, 0x64, 0x78, 0x14, - 0xa5, 0xd5, 0xab, 0xd2, 0x67, 0xbb, 0x26, 0x09, 0xad, 0x01, 0x04, 0x34, 0x8a, 0x70, 0xa0, 0x04, - 0x6c, 0x25, 0x60, 0x50, 0x64, 0xe5, 0x08, 0x11, 0x79, 0x1c, 0x07, 0x5d, 0x58, 0xb7, 0x36, 0xab, - 0x6e, 0x4d, 0x88, 0xe8, 0x08, 0x07, 0x32, 0x8e, 0x84, 0x63, 0xe6, 0x29, 0x00, 0x6a, 0x2a, 0xbd, - 0x86, 0x24, 0x28, 0x98, 0x5c, 0x05, 0x18, 0x30, 0x9a, 0x8c, 0x34, 0xb7, 0xb5, 0x5e, 0x96, 0x58, - 0xac, 0x28, 0x8a, 0x7d, 0x0f, 0x6e, 0xf0, 0xf7, 0xc3, 0x88, 0xc4, 0xa7, 0x9e, 0xf0, 0xd9, 0x00, - 0x8b, 0x6e, 0x5b, 0xd7, 0x70, 0x4a, 0x7d, 0xab, 0x88, 0xce, 0x08, 0xd0, 0x0e, 0xc3, 0xbe, 0xc0, - 0x1f, 0x31, 0x76, 0x3e, 0xac, 0xbb, 0xd1, 0x12, 0xd4, 0xa8, 0x87, 0x2f, 0x82, 0x28, 0x6d, 0xb2, - 0x2a, 0xdd, 0xbb, 0x08, 0x22, 0xe7, 0x01, 0x74, 0x0a, 0x37, 0xa6, 0xc0, 0xbc, 0x08, 0x55, 0xcc, - 0x18, 0xcd, 0x60, 0x44, 0x1f, 0x9c, 0xff, 0x03, 0x7a, 0x37, 0x0a, 0x3f, 0x85, 0x7b, 0xce, 0x12, - 0x74, 0x0a, 0xa6, 0xb5, 0x1f, 0xce, 0xcf, 0x16, 0xa0, 0x5d, 0x85, 0x06, 0x7f, 0x6c, 0x10, 0xcb, - 0xfe, 0x94, 0x43, 0x42, 0xa3, 0x4d, 0xe8, 0x0b, 0x3f, 0x1d, 0x61, 0x2d, 0xc2, 0xb5, 0xfd, 0x5d, - 0x5f, 0xf8, 0xe9, 0x28, 0x61, 0x38, 0x48, 0x98, 0x9c, 0x6a, 0xaa, 0x08, 0xd5, 0x28, 0x71, 0x33, - 0x12, 0x7a, 0x0c, 0xb7, 0xc8, 0x20, 0xa6, 0x0c, 0x8f, 0xc5, 0x3c, 0x9d, 0xaa, 0x9a, 0x12, 0x5e, - 0xd4, 0xdc, 0x5c, 0x61, 0x4f, 0x65, 0x6e, 0x09, 0x3a, 0x85, 0x30, 0xd2, 0xf0, 0xbe, 0xb1, 0xa0, - 0xfb, 0x5c, 0xd0, 0x21, 0x09, 0x5c, 0x2c, 0xdd, 0x2c, 0x04, 0xb9, 0x01, 0x6d, 0x89, 0xbc, 0x93, - 0x81, 0xb6, 0x68, 0x14, 0x8e, 0x27, 0xdb, 0x1d, 0x90, 0xe0, 0xeb, 0x19, 0xf1, 0xd6, 0x69, 0x14, - 0xaa, 0x9a, 0xdb, 0x00, 0x89, 0x90, 0x86, 0xbe, 0x9e, 0xf1, 0xad, 0x18, 0x9f, 0x17, 0xf4, 0xa5, - 0x90, 0xd2, 0xd7, 0xb0, 0x5a, 0x8f, 0xf1, 0xb9, 0xd4, 0x77, 0x96, 0xe1, 0xce, 0x0c, 0xdf, 0x52, - 0xcf, 0xbf, 0xb3, 0xa0, 0xf3, 0x9c, 0x73, 0x32, 0x88, 0xff, 0xab, 0x00, 0x26, 0x73, 0x7a, 0x11, - 0xaa, 0x01, 0x4d, 0x62, 0xa1, 0x9c, 0xad, 0xba, 0xfa, 0x30, 0xd1, 0x73, 0xa5, 0xa9, 0x9e, 0x9b, - 0xe8, 0xda, 0xf2, 0x74, 0xd7, 0x1a, 0x5d, 0x59, 0x29, 0x74, 0xe5, 0x9f, 0xa0, 0x29, 0x9f, 0xd3, - 0x0b, 0x70, 0x2c, 0x30, 0x4b, 0x31, 0x19, 0x24, 0x69, 0x47, 0x51, 0x9c, 0xaf, 0x2d, 0x58, 0x2c, - 0x7a, 0x9a, 0xd6, 0xf8, 0xa5, 0x23, 0x42, 0x62, 0x12, 0x8b, 0x52, 0x37, 0xe5, 0xa7, 0xec, 0xee, - 0x51, 0x72, 0x1c, 0x91, 0xc0, 0x93, 0x0c, 0xed, 0x9e, 0xad, 0x29, 0xef, 0x58, 0x34, 0x0e, 0xba, - 0x62, 0x06, 0x8d, 0xa0, 0xe2, 0x27, 0xe2, 0x24, 0x1b, 0x13, 0xf2, 0xdb, 0x79, 0x0c, 0x1d, 0xbd, - 0x0f, 0x16, 0xb3, 0xb6, 0x0a, 0x90, 0x03, 0x37, 0xef, 0x5a, 0x1a, 0x3d, 0x32, 0xe4, 0xe6, 0xce, - 0xbf, 0xc0, 0x3e, 0xa4, 0x3a, 0x11, 0x1c, 0x3d, 0x02, 0x3b, 0xca, 0x0e, 0x4a, 0xb4, 0xb9, 0x8d, - 0xc6, 0x4d, 0x95, 0xc9, 0xb9, 0x63, 0x21, 0xe7, 0x19, 0x34, 0x32, 0x72, 0x16, 0x9b, 0x75, 0x59, - 0x6c, 0xa5, 0x89, 0xd8, 0x9c, 0x9f, 0x2c, 0x58, 0x2c, 0xba, 0x9c, 0xa6, 0xef, 0x1d, 0xb4, 0xf3, - 0x2b, 0xbc, 0xa1, 0x3f, 0x4a, 0x7d, 0x79, 0x64, 0xfa, 0x32, 0xad, 0x96, 0x3b, 0xc8, 0x5f, 0xfa, - 0x23, 0x5d, 0x52, 0xad, 0xc8, 0x20, 0xf5, 0xde, 0xc2, 0xc2, 0x94, 0xc8, 0x8c, 0x65, 0xe8, 0xaf, - 0xe6, 0x32, 0x54, 0x58, 0xe8, 0x72, 0x6d, 0x73, 0x43, 0x7a, 0x0a, 0xb7, 0x75, 0xff, 0xed, 0xe4, - 0x45, 0x97, 0xe5, 0xbe, 0x58, 0x9b, 0xd6, 0x64, 0x6d, 0x3a, 0x3d, 0xe8, 0x4e, 0xab, 0xa6, 0x5d, - 0x30, 0x80, 0x85, 0x23, 0xe1, 0x0b, 0xc2, 0x05, 0x09, 0xf2, 0xad, 0x7c, 0xa2, 0x98, 0xad, 0xeb, - 0x46, 0xd0, 0x74, 0x3b, 0xcc, 0x43, 0x59, 0x88, 0xac, 0xce, 0xe4, 0xa7, 0x7c, 0x05, 0x64, 0xde, - 0x94, 0xbe, 0xc1, 0x27, 0xb8, 0x4a, 0xd6, 0x83, 0xa0, 0xc2, 0x8f, 0xf4, 0x88, 0xaf, 0xa8, 0x11, - 0x6f, 0x2b, 0x8a, 0x9a, 0xf1, 0x7a, 0x0a, 0x86, 0x9a, 0x5b, 0xd5, 0x0b, 0x80, 0x24, 0x28, 0xe6, - 0x2a, 0x80, 0x6a, 0x29, 0xdd, 0x0d, 0x35, 0xad, 0x2b, 0x29, 0x3b, 0x92, 0xe0, 0xac, 0xc1, 0xca, - 0xbf, 0xb1, 0x90, 0xcb, 0x0a, 0xdb, 0xa1, 0x71, 0x9f, 0x0c, 0x12, 0xe6, 0x1b, 0x4f, 0xe1, 0x7c, - 0x6f, 0xc1, 0xea, 0x25, 0x02, 0x69, 0xc0, 0x5d, 0xa8, 0x0f, 0x7d, 0x2e, 0x30, 0xcb, 0xba, 0x24, - 0x3b, 0x4e, 0xa6, 0xa2, 0x74, 0x5d, 0x2a, 0xca, 0x53, 0xa9, 0x58, 0x82, 0xda, 0xd0, 0xbf, 0xf0, - 0x86, 0xc7, 0xe9, 0x36, 0x52, 0x1d, 0xfa, 0x17, 0x2f, 0x8f, 0x15, 0xc0, 0x10, 0xe6, 0x1d, 0x27, - 0xc1, 0x29, 0x16, 0x3c, 0x07, 0x18, 0xc2, 0x5e, 0x68, 0xca, 0xf6, 0x2f, 0x75, 0x68, 0x1d, 0x61, - 0xff, 0x1c, 0xe3, 0x50, 0x79, 0x8e, 0x06, 0x59, 0xc7, 0x14, 0x7f, 0xf4, 0xa1, 0x7b, 0x93, 0xad, - 0x31, 0xf3, 0x57, 0x66, 0xef, 0xfe, 0x75, 0x62, 0x69, 0xf1, 0xcd, 0xa1, 0x57, 0xd0, 0x34, 0x7e, - 0x55, 0xa1, 0x15, 0x43, 0x71, 0xea, 0xc7, 0x62, 0x6f, 0xf5, 0x12, 0x6e, 0x66, 0xed, 0x91, 0x85, - 0x0e, 0xa1, 0x69, 0x2c, 0x03, 0xa6, 0xbd, 0xe9, 0xad, 0xc4, 0xb4, 0x37, 0x63, 0x83, 0x70, 0xe6, - 0xa4, 0x35, 0x63, 0xa4, 0x9b, 0xd6, 0xa6, 0x97, 0x08, 0xd3, 0xda, 0xac, 0x3d, 0x40, 0x59, 0x33, - 0x26, 0xa8, 0x69, 0x6d, 0x7a, 0x3f, 0x30, 0xad, 0xcd, 0x1a, 0xbb, 0x73, 0xe8, 0x73, 0x58, 0x98, - 0x9a, 0x6d, 0xc8, 0x19, 0x6b, 0x5d, 0x36, 0x94, 0x7b, 0x1b, 0x57, 0xca, 0xe4, 0xf6, 0x5f, 0x43, - 0xcb, 0x9c, 0x39, 0xc8, 0x70, 0x68, 0xc6, 0xd4, 0xec, 0xad, 0x5d, 0xc6, 0x36, 0x0d, 0x9a, 0x70, - 0x6a, 0x1a, 0x9c, 0x31, 0x50, 0x4c, 0x83, 0xb3, 0x50, 0xd8, 0x99, 0x43, 0x9f, 0xc1, 0xfc, 0x24, - 0xac, 0xa1, 0xbb, 0x93, 0x69, 0x9b, 0x42, 0xcb, 0x9e, 0x73, 0x95, 0x48, 0x6e, 0xfc, 0x00, 0x60, - 0x8c, 0x56, 0x68, 0x79, 0xac, 0x33, 0x85, 0x96, 0xbd, 0x95, 0xd9, 0xcc, 0xdc, 0xd4, 0x17, 0xb0, - 0x34, 0x13, 0x12, 0x90, 0xd1, 0x26, 0x57, 0x81, 0x4a, 0xef, 0x2f, 0xd7, 0xca, 0x65, 0x77, 0xbd, - 0x58, 0x83, 0x79, 0xae, 0x1b, 0xb9, 0xcf, 0xb7, 0x82, 0x88, 0xe0, 0x58, 0xbc, 0x00, 0xa5, 0xf1, - 0x86, 0x51, 0x41, 0x8f, 0x6b, 0xea, 0xff, 0xa2, 0xbf, 0xff, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0xd8, 0x6f, 0xb2, 0x3e, 0x12, 0x00, 0x00, + // 1671 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcb, 0x6f, 0xdb, 0x46, + 0x1a, 0x37, 0xf5, 0xe6, 0x27, 0x29, 0xb1, 0x47, 0x76, 0xa2, 0xc8, 0x8f, 0x75, 0xe8, 0x4d, 0xd6, + 0x8b, 0x04, 0xde, 0xc0, 0x9b, 0x43, 0xb2, 0xd9, 0x3d, 0x24, 0x7e, 0x2c, 0x8c, 0x75, 0x1e, 0xa0, + 0x93, 0xc5, 0x2e, 0x0a, 0x94, 0xa0, 0xc9, 0x91, 0x3c, 0x35, 0xc5, 0x61, 0x87, 0x43, 0xdb, 0xe9, + 0x9f, 0x52, 0xa0, 0x7f, 0x45, 0xaf, 0x45, 0x2f, 0x45, 0xd1, 0x1e, 0xfa, 0xb7, 0xf4, 0xd8, 0x73, + 0x31, 0x33, 0x24, 0x35, 0x14, 0x65, 0x3b, 0x41, 0x91, 0x1b, 0xe7, 0x7b, 0xcd, 0x37, 0xbf, 0xef, + 0x29, 0x41, 0x7b, 0x48, 0x02, 0xcc, 0xb6, 0x22, 0x46, 0x39, 0x45, 0x2d, 0x79, 0x70, 0xa2, 0x63, + 0xeb, 0x35, 0x2c, 0x1f, 0x52, 0x7a, 0x9a, 0x44, 0xbb, 0x84, 0x61, 0x8f, 0x53, 0xf6, 0x7e, 0x2f, + 0xe4, 0xec, 0xbd, 0x8d, 0xbf, 0x4c, 0x70, 0xcc, 0xd1, 0x0a, 0x98, 0x7e, 0xc6, 0xe8, 0x1b, 0xeb, + 0xc6, 0xa6, 0x69, 0x4f, 0x08, 0x08, 0x41, 0x2d, 0x74, 0xc7, 0xb8, 0x5f, 0x91, 0x0c, 0xf9, 0x6d, + 0xed, 0xc1, 0xca, 0x6c, 0x83, 0x71, 0x44, 0xc3, 0x18, 0xa3, 0x7b, 0x50, 0xc7, 0x82, 0x20, 0xad, + 0xb5, 0xb7, 0x6f, 0x6e, 0x65, 0xae, 0x6c, 0x29, 0x39, 0xc5, 0xb5, 0xbe, 0x37, 0x00, 0x1d, 0x92, + 0x98, 0x0b, 0x22, 0xc1, 0xf1, 0x87, 0xf9, 0x73, 0x0b, 0x1a, 0x11, 0xc3, 0x43, 0x72, 0x91, 0x7a, + 0x94, 0x9e, 0xd0, 0x43, 0x58, 0x88, 0xb9, 0xcb, 0xf8, 0x3e, 0xa3, 0xe3, 0x7d, 0x12, 0xe0, 0x57, + 0xc2, 0xe9, 0xaa, 0x14, 0x29, 0x33, 0xd0, 0x16, 0x20, 0x12, 0x7a, 0x41, 0x12, 0x93, 0x33, 0x7c, + 0x94, 0x71, 0xfb, 0xb5, 0x75, 0x63, 0xb3, 0x65, 0xcf, 0xe0, 0xa0, 0x45, 0xa8, 0x07, 0x64, 0x4c, + 0x78, 0xbf, 0xbe, 0x6e, 0x6c, 0x76, 0x6d, 0x75, 0xb0, 0xfe, 0x09, 0xbd, 0x82, 0xff, 0x1f, 0xf7, + 0xfc, 0x6f, 0x2a, 0x50, 0x97, 0x84, 0x1c, 0x63, 0x63, 0x82, 0x31, 0xba, 0x0b, 0x1d, 0x12, 0x3b, + 0x13, 0x20, 0x2a, 0xd2, 0xb7, 0x36, 0x89, 0x73, 0xcc, 0xd1, 0x03, 0x68, 0x78, 0x27, 0x49, 0x78, + 0x1a, 0xf7, 0xab, 0xeb, 0xd5, 0xcd, 0xf6, 0x76, 0x6f, 0x72, 0x91, 0x78, 0xe8, 0x8e, 0xe0, 0xd9, + 0xa9, 0x08, 0x7a, 0x02, 0xe0, 0x72, 0xce, 0xc8, 0x71, 0xc2, 0x71, 0x2c, 0x5f, 0xda, 0xde, 0xee, + 0x6b, 0x0a, 0x49, 0x8c, 0x9f, 0xe7, 0x7c, 0x5b, 0x93, 0x45, 0x4f, 0xa1, 0x85, 0x2f, 0x38, 0x0e, + 0x7d, 0xec, 0xf7, 0xeb, 0xf2, 0xa2, 0xd5, 0xa9, 0x17, 0x6d, 0xed, 0xa5, 0x7c, 0xf5, 0xbe, 0x5c, + 0x7c, 0xf0, 0x0c, 0xba, 0x05, 0x16, 0x9a, 0x87, 0xea, 0x29, 0xce, 0xa2, 0x2a, 0x3e, 0x05, 0xb2, + 0x67, 0x6e, 0x90, 0xa8, 0x04, 0xeb, 0xd8, 0xea, 0xf0, 0x8f, 0xca, 0x13, 0xc3, 0xda, 0x05, 0x73, + 0x3f, 0x09, 0x82, 0x5c, 0xd1, 0x27, 0x2c, 0x53, 0xf4, 0x09, 0x9b, 0xa0, 0x5c, 0xb9, 0x12, 0xe5, + 0xef, 0x0c, 0x58, 0xd8, 0x3b, 0xc3, 0x21, 0x7f, 0x45, 0x39, 0x19, 0x12, 0xcf, 0xe5, 0x84, 0x86, + 0xe8, 0x21, 0x98, 0x34, 0xf0, 0x9d, 0x2b, 0xc3, 0xd4, 0xa2, 0x41, 0xea, 0xf5, 0x43, 0x30, 0x43, + 0x7c, 0xee, 0x5c, 0x79, 0x5d, 0x2b, 0xc4, 0xe7, 0x4a, 0x7a, 0x03, 0xba, 0x3e, 0x0e, 0x30, 0xc7, + 0x4e, 0x1e, 0x1d, 0x11, 0xba, 0x8e, 0x22, 0xee, 0xa8, 0x70, 0xdc, 0x87, 0x9b, 0xc2, 0x64, 0xe4, + 0x32, 0x1c, 0x72, 0x27, 0x72, 0xf9, 0x89, 0x8c, 0x89, 0x69, 0x77, 0x43, 0x7c, 0xfe, 0x46, 0x52, + 0xdf, 0xb8, 0xfc, 0xc4, 0xfa, 0xcd, 0x00, 0x33, 0x0f, 0x26, 0xba, 0x0d, 0x4d, 0x71, 0xad, 0x43, + 0xfc, 0x14, 0x89, 0x86, 0x38, 0x1e, 0xf8, 0xa2, 0x2a, 0xe8, 0x70, 0x18, 0x63, 0x2e, 0xdd, 0xab, + 0xda, 0xe9, 0x49, 0x64, 0x56, 0x4c, 0xbe, 0x52, 0x85, 0x50, 0xb3, 0xe5, 0xb7, 0x40, 0x7c, 0xcc, + 0xc9, 0x18, 0xcb, 0x0b, 0xab, 0xb6, 0x3a, 0xa0, 0x1e, 0xd4, 0xb1, 0xc3, 0xdd, 0x91, 0xcc, 0x70, + 0xd3, 0xae, 0xe1, 0xb7, 0xee, 0x08, 0xfd, 0x19, 0x6e, 0xc4, 0x34, 0x61, 0x1e, 0x76, 0xb2, 0x6b, + 0x1b, 0x92, 0xdb, 0x51, 0xd4, 0x7d, 0x75, 0xb9, 0x05, 0xd5, 0x21, 0xf1, 0xfb, 0x4d, 0x09, 0xcc, + 0x7c, 0x31, 0x09, 0x0f, 0x7c, 0x5b, 0x30, 0xd1, 0xdf, 0x00, 0x72, 0x4b, 0x7e, 0xbf, 0x75, 0x89, + 0xa8, 0x99, 0xd9, 0xf5, 0xad, 0xff, 0x41, 0x23, 0x35, 0xbf, 0x0c, 0xe6, 0x19, 0x0d, 0x92, 0x71, + 0xfe, 0xec, 0xae, 0xdd, 0x52, 0x84, 0x03, 0x1f, 0xdd, 0x01, 0xd9, 0xe7, 0x1c, 0x91, 0x55, 0x15, + 0xf9, 0x48, 0x89, 0xd0, 0x7f, 0xb0, 0xec, 0x14, 0x1e, 0xa5, 0xa7, 0x44, 0xbd, 0xbe, 0x69, 0xa7, + 0x27, 0xeb, 0xd7, 0x0a, 0xdc, 0x28, 0xa6, 0xbb, 0xb8, 0x42, 0x5a, 0x91, 0x58, 0x19, 0xd2, 0x8c, + 0x34, 0x7b, 0x54, 0xc0, 0xab, 0xa2, 0xe3, 0x95, 0xa9, 0x8c, 0xa9, 0xaf, 0x2e, 0xe8, 0x2a, 0x95, + 0x97, 0xd4, 0xc7, 0x22, 0x5b, 0x13, 0xe2, 0x4b, 0x80, 0xbb, 0xb6, 0xf8, 0x14, 0x94, 0x11, 0xf1, + 0xd3, 0xf6, 0x21, 0x3e, 0xa5, 0x7b, 0x4c, 0xda, 0x6d, 0xa8, 0x90, 0xa9, 0x93, 0x08, 0xd9, 0x58, + 0x50, 0x9b, 0x2a, 0x0e, 0xe2, 0x1b, 0xad, 0x43, 0x9b, 0xe1, 0x28, 0x48, 0xb3, 0x57, 0xc2, 0x67, + 0xda, 0x3a, 0x09, 0xad, 0x01, 0x78, 0x34, 0x08, 0xb0, 0x27, 0x05, 0x4c, 0x29, 0xa0, 0x51, 0x44, + 0xe6, 0x70, 0x1e, 0x38, 0x31, 0xf6, 0xfa, 0xb0, 0x6e, 0x6c, 0xd6, 0xed, 0x06, 0xe7, 0xc1, 0x11, + 0xf6, 0xc4, 0x3b, 0x92, 0x18, 0x33, 0x47, 0x36, 0xa0, 0xb6, 0xd4, 0x6b, 0x09, 0x82, 0x6c, 0x93, + 0xab, 0x00, 0x23, 0x46, 0x93, 0x48, 0x71, 0x3b, 0xeb, 0x55, 0xd1, 0x8b, 0x25, 0x45, 0xb2, 0xef, + 0xc1, 0x8d, 0xf8, 0xfd, 0x38, 0x20, 0xe1, 0xa9, 0xc3, 0x5d, 0x36, 0xc2, 0xbc, 0xdf, 0x55, 0x39, + 0x9c, 0x52, 0xdf, 0x4a, 0xa2, 0x15, 0x01, 0xda, 0x61, 0xd8, 0xe5, 0xf8, 0x23, 0xc6, 0xce, 0x87, + 0x55, 0x37, 0x5a, 0x82, 0x06, 0x75, 0xf0, 0x85, 0x17, 0xa4, 0x45, 0x56, 0xa7, 0x7b, 0x17, 0x5e, + 0x60, 0x3d, 0x80, 0x5e, 0xe1, 0xc6, 0xb4, 0x31, 0x2f, 0x42, 0x1d, 0x33, 0x46, 0xb3, 0x36, 0xa2, + 0x0e, 0xd6, 0xff, 0x01, 0xbd, 0x8b, 0xfc, 0x4f, 0xe1, 0x9e, 0xb5, 0x04, 0xbd, 0x82, 0x69, 0xe5, + 0x87, 0xf5, 0xa3, 0x01, 0x68, 0x57, 0x76, 0x83, 0x3f, 0x36, 0x88, 0x45, 0x7d, 0x8a, 0x21, 0xa1, + 0xba, 0x8d, 0xef, 0x72, 0x37, 0x1d, 0x61, 0x1d, 0x12, 0x2b, 0xfb, 0xbb, 0x2e, 0x77, 0xd3, 0x51, + 0xc2, 0xb0, 0x97, 0x30, 0x31, 0xd5, 0x64, 0x12, 0xca, 0x51, 0x62, 0x67, 0x24, 0xf4, 0x18, 0x6e, + 0x91, 0x51, 0x48, 0x19, 0x9e, 0x88, 0x39, 0x0a, 0xaa, 0x86, 0x14, 0x5e, 0x54, 0xdc, 0x5c, 0x61, + 0x4f, 0x22, 0xb7, 0x04, 0xbd, 0xc2, 0x33, 0xd2, 0xe7, 0x7d, 0x6d, 0x40, 0xff, 0x39, 0xa7, 0x63, + 0xe2, 0xd9, 0x58, 0xb8, 0x59, 0x78, 0xe4, 0x06, 0x74, 0x45, 0xe7, 0x9d, 0x7e, 0x68, 0x87, 0x06, + 0xfe, 0x64, 0xb2, 0xdd, 0x01, 0xd1, 0x7c, 0x1d, 0xed, 0xbd, 0x4d, 0x1a, 0xf8, 0x32, 0xe7, 0x36, + 0x40, 0x74, 0x48, 0x4d, 0x5f, 0xcd, 0xf8, 0x4e, 0x88, 0xcf, 0x0b, 0xfa, 0x42, 0x48, 0xea, 0xab, + 0xb6, 0xda, 0x0c, 0xf1, 0xb9, 0xd0, 0xb7, 0x96, 0xe1, 0xce, 0x0c, 0xdf, 0x52, 0xcf, 0x7f, 0x36, + 0xa0, 0xf7, 0x3c, 0x8e, 0xc9, 0x28, 0xfc, 0xaf, 0x6c, 0x30, 0x99, 0xd3, 0x8b, 0x50, 0xf7, 0x68, + 0x12, 0x72, 0xe9, 0x6c, 0xdd, 0x56, 0x87, 0xa9, 0x9a, 0xab, 0x94, 0x6a, 0x6e, 0xaa, 0x6a, 0xab, + 0xe5, 0xaa, 0xd5, 0xaa, 0xb2, 0x56, 0xa8, 0xca, 0x3f, 0x41, 0x5b, 0x84, 0xd3, 0xf1, 0x70, 0xc8, + 0x31, 0x4b, 0x7b, 0x32, 0x08, 0xd2, 0x8e, 0xa4, 0x08, 0x01, 0x7d, 0x76, 0xa8, 0xb6, 0x0c, 0xd1, + 0x64, 0x70, 0xfc, 0x62, 0xc0, 0x62, 0xf1, 0x29, 0x69, 0x11, 0x5c, 0x3a, 0x43, 0x44, 0xd3, 0x62, + 0x41, 0xfa, 0x0e, 0xf1, 0x29, 0xca, 0x3f, 0x4a, 0x8e, 0x03, 0xe2, 0x39, 0x82, 0xa1, 0xfc, 0x37, + 0x15, 0xe5, 0x1d, 0x0b, 0x26, 0xa8, 0xd4, 0x74, 0x54, 0x10, 0xd4, 0xdc, 0x84, 0x9f, 0x64, 0x73, + 0x44, 0x7c, 0x4f, 0x21, 0xd5, 0xb8, 0x0e, 0xa9, 0x66, 0x09, 0x29, 0xeb, 0x31, 0xf4, 0xd4, 0xca, + 0x59, 0x0c, 0xcc, 0x2a, 0x40, 0x3e, 0x1b, 0xe2, 0xbe, 0xa1, 0x1a, 0x54, 0x36, 0x1c, 0x62, 0xeb, + 0x5f, 0x60, 0x1e, 0x52, 0x65, 0x21, 0x46, 0x8f, 0xc0, 0x0c, 0xb2, 0x83, 0x14, 0x6d, 0x6f, 0xa3, + 0x49, 0xdd, 0x66, 0x72, 0xf6, 0x44, 0xc8, 0x7a, 0x06, 0xad, 0x8c, 0x9c, 0xa1, 0x63, 0x5c, 0x86, + 0x4e, 0x65, 0x0a, 0x1d, 0xeb, 0x07, 0x03, 0x16, 0x8b, 0x2e, 0xa7, 0x01, 0x78, 0x07, 0xdd, 0xfc, + 0x0a, 0x67, 0xec, 0x46, 0xa9, 0x2f, 0x8f, 0x74, 0x5f, 0xca, 0x6a, 0xb9, 0x83, 0xf1, 0x4b, 0x37, + 0x52, 0x59, 0xdb, 0x09, 0x34, 0xd2, 0xe0, 0x2d, 0x2c, 0x94, 0x44, 0x66, 0xec, 0x5b, 0x7f, 0xd5, + 0xf7, 0xad, 0xc2, 0xce, 0x98, 0x6b, 0xeb, 0x4b, 0xd8, 0x53, 0xb8, 0xad, 0x4a, 0x7c, 0x27, 0x8f, + 0x56, 0x86, 0x7d, 0x31, 0xa8, 0xc6, 0x74, 0x50, 0xad, 0x01, 0xf4, 0xcb, 0xaa, 0x69, 0xa1, 0x8d, + 0x60, 0xe1, 0x88, 0xbb, 0x9c, 0xc4, 0x9c, 0x78, 0xf9, 0xe2, 0x3f, 0x95, 0x05, 0xc6, 0x75, 0x53, + 0xae, 0x5c, 0x71, 0xf3, 0x50, 0xe5, 0x3c, 0xcb, 0x54, 0xf1, 0x29, 0xa2, 0x80, 0xf4, 0x9b, 0xd2, + 0x18, 0x7c, 0x82, 0xab, 0x44, 0x3e, 0x70, 0xca, 0xdd, 0x40, 0x6d, 0x11, 0x35, 0xb9, 0x45, 0x98, + 0x92, 0x22, 0xd7, 0x08, 0x35, 0x68, 0x7d, 0xc5, 0xad, 0xab, 0x1d, 0x43, 0x10, 0x24, 0x73, 0x15, + 0x40, 0x16, 0xa5, 0xaa, 0xa7, 0x86, 0xd2, 0x15, 0x94, 0x1d, 0x41, 0xb0, 0xd6, 0x60, 0xe5, 0xdf, + 0x98, 0x8b, 0x7d, 0x88, 0xed, 0xd0, 0x70, 0x48, 0x46, 0x09, 0x73, 0xb5, 0x50, 0x58, 0xdf, 0x1a, + 0xb0, 0x7a, 0x89, 0x40, 0xfa, 0xe0, 0x3e, 0x34, 0xc7, 0x6e, 0xcc, 0x31, 0xcb, 0xaa, 0x24, 0x3b, + 0x4e, 0x43, 0x51, 0xb9, 0x0e, 0x8a, 0x6a, 0x09, 0x8a, 0x25, 0x68, 0x8c, 0xdd, 0x0b, 0x67, 0x7c, + 0x9c, 0x2e, 0x3c, 0xf5, 0xb1, 0x7b, 0xf1, 0xf2, 0x58, 0xf6, 0x30, 0xc2, 0x9c, 0xe3, 0xc4, 0x3b, + 0xc5, 0x3c, 0xce, 0x7b, 0x18, 0x61, 0x2f, 0x14, 0x65, 0xfb, 0xa7, 0x26, 0x74, 0x8e, 0xb0, 0x7b, + 0x8e, 0xb1, 0x2f, 0x3d, 0x47, 0xa3, 0xac, 0x62, 0x8a, 0xbf, 0x2b, 0xd1, 0xbd, 0xe9, 0xd2, 0x98, + 0xf9, 0x43, 0x76, 0x70, 0xff, 0x3a, 0xb1, 0x34, 0xf9, 0xe6, 0xd0, 0x2b, 0x68, 0x6b, 0x3f, 0xdc, + 0xd0, 0x8a, 0xa6, 0x58, 0xfa, 0x3d, 0x3a, 0x58, 0xbd, 0x84, 0x9b, 0x59, 0x7b, 0x64, 0xa0, 0x43, + 0x68, 0x6b, 0xfb, 0x86, 0x6e, 0xaf, 0xbc, 0xf8, 0xe8, 0xf6, 0x66, 0x2c, 0x29, 0xd6, 0x9c, 0xb0, + 0xa6, 0x6d, 0x0d, 0xba, 0xb5, 0xf2, 0x9e, 0xa2, 0x5b, 0x9b, 0xb5, 0x6a, 0x48, 0x6b, 0xda, 0x90, + 0xd6, 0xad, 0x95, 0x57, 0x10, 0xdd, 0xda, 0xac, 0xc9, 0x3e, 0x87, 0x3e, 0x87, 0x85, 0xd2, 0xf8, + 0x44, 0xd6, 0x44, 0xeb, 0xb2, 0xb9, 0x3f, 0xd8, 0xb8, 0x52, 0x26, 0xb7, 0xff, 0x1a, 0x3a, 0xfa, + 0xd4, 0x42, 0x9a, 0x43, 0x33, 0x06, 0xf3, 0x60, 0xed, 0x32, 0xb6, 0x6e, 0x50, 0x6f, 0xa7, 0xba, + 0xc1, 0x19, 0x03, 0x45, 0x37, 0x38, 0xab, 0x0b, 0x5b, 0x73, 0xe8, 0x33, 0x98, 0x9f, 0x6e, 0x6b, + 0xe8, 0xee, 0x34, 0x6c, 0xa5, 0x6e, 0x39, 0xb0, 0xae, 0x12, 0xc9, 0x8d, 0x1f, 0x00, 0x4c, 0xba, + 0x15, 0x5a, 0x9e, 0xe8, 0x94, 0xba, 0xe5, 0x60, 0x65, 0x36, 0x33, 0x37, 0xf5, 0x05, 0x2c, 0xcd, + 0x6c, 0x09, 0x48, 0x2b, 0x93, 0xab, 0x9a, 0xca, 0xe0, 0x2f, 0xd7, 0xca, 0x65, 0x77, 0xbd, 0x58, + 0x83, 0xf9, 0x58, 0x15, 0xf2, 0x30, 0xde, 0xf2, 0x02, 0x82, 0x43, 0xfe, 0x02, 0xa4, 0xc6, 0x1b, + 0x46, 0x39, 0x3d, 0x6e, 0xc8, 0xbf, 0xa4, 0xfe, 0xfe, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xaa, + 0x2b, 0xd6, 0xf6, 0xa1, 0x12, 0x00, 0x00, } diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 26c055da5..fe1e87b6b 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -3,10 +3,11 @@ package filersink import ( "context" "fmt" - "google.golang.org/grpc" "strings" "sync" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -14,7 +15,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) { +func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) { if len(sourceChunks) == 0 { return } @@ -23,7 +24,7 @@ func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_ wg.Add(1) go func(chunk *filer_pb.FileChunk) { defer wg.Done() - replicatedChunk, e := fs.replicateOneChunk(ctx, chunk) + replicatedChunk, e := fs.replicateOneChunk(ctx, chunk, dir) if e != nil { err = e } @@ -35,9 +36,9 @@ func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_ return } -func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) { +func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) { - fileId, err := fs.fetchAndWrite(ctx, sourceChunk) + fileId, err := fs.fetchAndWrite(ctx, sourceChunk, dir) if err != nil { return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err) } @@ -52,7 +53,7 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p }, nil } -func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) { +func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) { filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString()) if err != nil { @@ -71,6 +72,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi Collection: fs.collection, TtlSec: fs.ttlSec, DataCenter: fs.dataCenter, + ParentPath: dir, } resp, err := client.AssignVolume(ctx, request) diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index de99fbe1c..8c4c39bc4 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -105,7 +105,7 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p } } - replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks) + replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks, dir) if err != nil { glog.V(0).Infof("replicate entry chunks %s: %v", key, err) @@ -184,7 +184,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } // replicate the chunks that are new in the source - replicatedChunks, err := fs.replicateChunks(ctx, newChunks) + replicatedChunks, err := fs.replicateChunks(ctx, newChunks, newParentPath) if err != nil { return true, fmt.Errorf("replicte %s chunks error: %v", key, err) } diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 662a313f9..864376d60 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -52,8 +52,7 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) } defer dataReader.Close() - uploadUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket) + uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) @@ -167,7 +166,7 @@ func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) { func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code ErrorCode) { hash := md5.New() - var body io.Reader = io.TeeReader(dataReader, hash) + var body = io.TeeReader(dataReader, hash) proxyReq, err := http.NewRequest("PUT", uploadUrl, body) diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 98951c347..12ea25144 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -54,6 +54,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file includeLastFile := req.InclusiveStartFrom for limit > 0 { entries, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) + if err != nil { return err } @@ -84,6 +85,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file }); err != nil { return err } + limit-- if limit == 0 { return nil @@ -226,6 +228,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol if req.TtlSec > 0 { ttlStr = strconv.Itoa(int(req.TtlSec)) } + collection, replication := fs.detectCollection(req.ParentPath, req.Collection, req.Replication) var altRequest *operation.VolumeAssignRequest @@ -236,16 +239,16 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol assignRequest := &operation.VolumeAssignRequest{ Count: uint64(req.Count), - Replication: req.Replication, - Collection: req.Collection, + Replication: replication, + Collection: collection, Ttl: ttlStr, DataCenter: dataCenter, } if dataCenter != "" { altRequest = &operation.VolumeAssignRequest{ Count: uint64(req.Count), - Replication: req.Replication, - Collection: req.Collection, + Replication: replication, + Collection: collection, Ttl: ttlStr, DataCenter: "", } @@ -261,11 +264,13 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol } return &filer_pb.AssignVolumeResponse{ - FileId: assignResult.Fid, - Count: int32(assignResult.Count), - Url: assignResult.Url, - PublicUrl: assignResult.PublicUrl, - Auth: string(assignResult.Auth), + FileId: assignResult.Fid, + Count: int32(assignResult.Count), + Url: assignResult.Url, + PublicUrl: assignResult.PublicUrl, + Auth: string(assignResult.Auth), + Collection: collection, + Replication: replication, }, err } diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 55888a4a4..b0df851c9 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -67,7 +67,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) glog.Fatal("master list is required!") } - fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption) + fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.DirBucketsPath) go fs.filer.KeepConnectedToMaster() @@ -83,6 +83,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) util.LoadConfiguration("notification", false) fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") + v.Set("filer.option.buckets_folder", "/buckets") fs.option.DirBucketsPath = v.GetString("filer.option.buckets_folder") fs.filer.LoadConfiguration(v) @@ -96,6 +97,8 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) } + fs.filer.LoadBuckets(fs.option.DirBucketsPath) + maybeStartMetrics(fs, option) return fs, nil diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 4707f1011..bb5f28663 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -80,14 +80,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { ctx := context.Background() query := r.URL.Query() - replication := query.Get("replication") - if replication == "" { - replication = fs.option.DefaultReplication - } - collection := query.Get("collection") - if collection == "" { - collection = fs.option.Collection - } + collection, replication := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication")) dataCenter := query.Get("dataCenter") if dataCenter == "" { dataCenter = fs.option.DataCenter @@ -305,3 +298,32 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } + +func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string) { + // default + collection = fs.option.Collection + replication = fs.option.DefaultReplication + + // get default collection settings + if qCollection != "" { + collection = qCollection + } + if qReplication != "" { + replication = qReplication + } + + // required by buckets folder + if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") { + bucketAndObjectKey := requestURI[len(fs.filer.DirBucketsPath)+1:] + t := strings.Index(bucketAndObjectKey, "/") + if t < 0 { + collection = bucketAndObjectKey + } + if t > 0 { + collection = bucketAndObjectKey[:t] + } + replication = fs.filer.ReadBucketOption(collection) + } + + return +} diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index d75869f30..8b0f09edc 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -367,6 +367,8 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { glog.V(2).Infof("WebDavFileSystem.Write %v", f.name) + dir, _ := filer2.FullPath(f.name).DirAndName() + var err error ctx := context.Background() if f.entry == nil { @@ -382,13 +384,15 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { var fileId, host string var auth security.EncodedJwt + var collection, replication string if err = f.fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, - Replication: "000", + Replication: "", Collection: f.fs.option.Collection, + ParentPath: dir, } resp, err := client.AssignVolume(ctx, request) @@ -398,6 +402,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) + collection, replication = resp.Collection, resp.Replication return nil }); err != nil { @@ -425,10 +430,11 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { } f.entry.Chunks = append(f.entry.Chunks, chunk) - dir, _ := filer2.FullPath(f.name).DirAndName() err = f.fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { f.entry.Attributes.Mtime = time.Now().Unix() + f.entry.Attributes.Collection = collection + f.entry.Attributes.Replication = replication request := &filer_pb.UpdateEntryRequest{ Directory: dir, From 0644d637484fb811ab2b0491becde49fa51894aa Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 24 Feb 2020 23:30:01 -0800 Subject: [PATCH 0146/2432] shell: add commands for bucket --- weed/shell/command_bucket_create.go | 90 +++++++++++++++++++++++++++++ weed/shell/command_bucket_delete.go | 73 +++++++++++++++++++++++ weed/shell/command_bucket_list.go | 83 ++++++++++++++++++++++++++ 3 files changed, 246 insertions(+) create mode 100644 weed/shell/command_bucket_create.go create mode 100644 weed/shell/command_bucket_delete.go create mode 100644 weed/shell/command_bucket_list.go diff --git a/weed/shell/command_bucket_create.go b/weed/shell/command_bucket_create.go new file mode 100644 index 000000000..603e9c564 --- /dev/null +++ b/weed/shell/command_bucket_create.go @@ -0,0 +1,90 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandBucketCreate{}) +} + +type commandBucketCreate struct { +} + +func (c *commandBucketCreate) Name() string { + return "bucket.create" +} + +func (c *commandBucketCreate) Help() string { + return `create a bucket with a given name + + Example: + bucket.create -name -replication 001 +` +} + +func (c *commandBucketCreate) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + bucketName := bucketCommand.String("name", "", "bucket name") + replication := bucketCommand.String("replication", "", "replication setting for the bucket") + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + if *bucketName == "" { + return fmt.Errorf("empty bucket name") + } + + filerServer, filerPort, _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + if parseErr != nil { + return parseErr + } + + ctx := context.Background() + + err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) + } + filerBucketsPath := resp.DirBuckets + + println("create bucket under", filerBucketsPath) + + entry := &filer_pb.Entry{ + Name: *bucketName, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0777 | os.ModeDir), + Collection: *bucketName, + Replication: *replication, + }, + } + + if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{ + Directory: filerBucketsPath, + Entry: entry, + }); err != nil { + return err + } + + println("created bucket", *bucketName) + + return nil + + }) + + return err + +} diff --git a/weed/shell/command_bucket_delete.go b/weed/shell/command_bucket_delete.go new file mode 100644 index 000000000..9e814ccf9 --- /dev/null +++ b/weed/shell/command_bucket_delete.go @@ -0,0 +1,73 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandBucketDelete{}) +} + +type commandBucketDelete struct { +} + +func (c *commandBucketDelete) Name() string { + return "bucket.delete" +} + +func (c *commandBucketDelete) Help() string { + return `delete a bucket by a given name + + bucket.delete -name +` +} + +func (c *commandBucketDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + bucketName := bucketCommand.String("name", "", "bucket name") + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + if *bucketName == "" { + return fmt.Errorf("empty bucket name") + } + + filerServer, filerPort, _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + if parseErr != nil { + return parseErr + } + + ctx := context.Background() + + err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) + } + filerBucketsPath := resp.DirBuckets + + if _, err := client.DeleteEntry(ctx, &filer_pb.DeleteEntryRequest{ + Directory: filerBucketsPath, + Name: *bucketName, + IsDeleteData: false, + IsRecursive: true, + IgnoreRecursiveError: true, + }); err != nil { + return err + } + + return nil + + }) + + return err + +} diff --git a/weed/shell/command_bucket_list.go b/weed/shell/command_bucket_list.go new file mode 100644 index 000000000..051eeda2d --- /dev/null +++ b/weed/shell/command_bucket_list.go @@ -0,0 +1,83 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "math" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandBucketList{}) +} + +type commandBucketList struct { +} + +func (c *commandBucketList) Name() string { + return "bucket.list" +} + +func (c *commandBucketList) Help() string { + return `list all buckets + +` +} + +func (c *commandBucketList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + filerServer, filerPort, _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + if parseErr != nil { + return parseErr + } + + ctx := context.Background() + + err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) + } + filerBucketsPath := resp.DirBuckets + + stream, err := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ + Directory: filerBucketsPath, + Limit: math.MaxUint32, + }) + if err != nil { + return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err) + } + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + if resp.Entry.Attributes.Replication == "" { + fmt.Fprintf(writer, " %s\n", resp.Entry.Name) + } else { + fmt.Fprintf(writer, " %s\t\t\treplication: %s\n", resp.Entry.Name, resp.Entry.Attributes.Replication) + } + } + + return nil + + }) + + return err + +} From e86da5a4918e0cf663a8c592387c72c850647e4e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 00:42:48 -0800 Subject: [PATCH 0147/2432] minor --- weed/filesys/dir.go | 4 ++-- weed/shell/command_bucket_list.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index abe5a21a6..203545b44 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -54,7 +54,7 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { attr.Inode = filer2.FullPath(dir.Path).AsInode() attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0) - attr.Ctime = time.Unix(dir.entry.Attributes.Crtime, 0) + attr.Crtime = time.Unix(dir.entry.Attributes.Crtime, 0) attr.Gid = dir.entry.Attributes.Gid attr.Uid = dir.entry.Attributes.Uid @@ -221,7 +221,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. resp.Attr.Inode = fullFilePath.AsInode() resp.Attr.Valid = time.Second resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) - resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) + resp.Attr.Crtime = time.Unix(entry.Attributes.Crtime, 0) resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode) resp.Attr.Gid = entry.Attributes.Gid resp.Attr.Uid = entry.Attributes.Uid diff --git a/weed/shell/command_bucket_list.go b/weed/shell/command_bucket_list.go index 051eeda2d..32198c29d 100644 --- a/weed/shell/command_bucket_list.go +++ b/weed/shell/command_bucket_list.go @@ -67,7 +67,7 @@ func (c *commandBucketList) Do(args []string, commandEnv *CommandEnv, writer io. } } - if resp.Entry.Attributes.Replication == "" { + if resp.Entry.Attributes.Replication == "" || resp.Entry.Attributes.Replication == "000" { fmt.Fprintf(writer, " %s\n", resp.Entry.Name) } else { fmt.Fprintf(writer, " %s\t\t\treplication: %s\n", resp.Entry.Name, resp.Entry.Attributes.Replication) From 7d10fdf73720fb3234cd5cacfaf10fb79590d754 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 11:13:06 -0800 Subject: [PATCH 0148/2432] fix directory lookup nil --- weed/filer2/filer_delete_entry.go | 3 --- weed/filesys/dir.go | 5 ++++- weed/replication/sink/filersink/filer_sink.go | 5 ++++- weed/s3api/filer_util.go | 4 ++++ weed/s3api/s3api_bucket_handlers.go | 5 +++-- weed/shell/command_fs_cat.go | 3 +++ weed/shell/command_fs_meta_cat.go | 3 +++ weed/shell/command_fs_mv.go | 2 +- 8 files changed, 22 insertions(+), 8 deletions(-) diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go index 2d3654df6..af88d1512 100644 --- a/weed/filer2/filer_delete_entry.go +++ b/weed/filer2/filer_delete_entry.go @@ -64,9 +64,6 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry } if lastFileName == "" && !isRecursive && len(entries) > 0 { // only for first iteration in the loop - for _, child := range entries { - println("existing children", child.Name()) - } return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath) } diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 203545b44..2a4f6fa75 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -229,7 +229,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. return node, nil } - glog.V(1).Infof("not found dir GetEntry %s: %v", fullFilePath, err) + glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT } @@ -276,6 +276,9 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro if err != nil { return err } + if entry == nil { + return nil + } dir.wfs.deleteFileChunks(ctx, entry.Chunks) diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 8c4c39bc4..cdc4f4a45 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -98,7 +98,7 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p Name: name, } glog.V(1).Infof("lookup: %v", lookupRequest) - if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil { + if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil && resp.Entry != nil { if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) { glog.V(0).Infof("already replicated %s", key) return nil @@ -153,6 +153,9 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file glog.V(0).Infof("lookup %s: %v", key, err) return err } + if resp.Entry == nil { + return filer2.ErrNotFound + } existingEntry = resp.Entry diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 2fceacd2a..91c34f0eb 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -154,6 +154,10 @@ func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, glog.V(0).Infof("exists entry %v: %v", request, err) return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) } + if resp.Entry == nil { + exists = false + return nil + } exists = resp.Entry.IsDirectory == isDirectory diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 492d94616..69275e212 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -11,9 +11,10 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" + "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gorilla/mux" ) var ( @@ -119,7 +120,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request } glog.V(1).Infof("lookup bucket: %v", request) - if _, err := client.LookupDirectoryEntry(ctx, request); err != nil { + if resp, err := client.LookupDirectoryEntry(ctx, request); err != nil || resp.Entry == nil { return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err) } diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 238dee7f9..06c8232c9 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -56,6 +56,9 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write if err != nil { return err } + if respLookupEntry.Entry == nil { + return fmt.Errorf("file not found: %s", path) + } return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt32) diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go index 9980f67a2..ec9a495f2 100644 --- a/weed/shell/command_fs_meta_cat.go +++ b/weed/shell/command_fs_meta_cat.go @@ -55,6 +55,9 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W if err != nil { return err } + if respLookupEntry.Entry == nil { + return fmt.Errorf("file not found: %s", path) + } m := jsonpb.Marshaler{ EmitDefaults: true, diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index e77755921..b9301ad3c 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -65,7 +65,7 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer var targetDir, targetName string // moving a file or folder - if err == nil && respDestinationLookupEntry.Entry.IsDirectory { + if err == nil && respDestinationLookupEntry.Entry!= nil && respDestinationLookupEntry.Entry.IsDirectory { // to a directory targetDir = filepath.ToSlash(filepath.Join(destinationDir, destinationName)) targetName = sourceName From 986d63cd5e1e9cccfcd1a2382ede0e936ff0747a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 11:45:40 -0800 Subject: [PATCH 0149/2432] s3: increase list objects or parts limit from 1000 to 10000 10x of Amazon S3 limits --- weed/s3api/s3api_object_multipart_handlers.go | 8 ++++---- weed/s3api/s3api_objects_list_handlers.go | 7 ++----- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 9012bedb4..4b08ce5e1 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -14,10 +14,10 @@ import ( ) const ( - maxObjectList = 1000 // Limit number of objects in a listObjectsResponse. - maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse. - maxPartsList = 1000 // Limit number of parts in a listPartsResponse. - globalMaxPartID = 10000 + maxObjectListSizeLimit = 10000 // Limit number of objects in a listObjectsResponse. + maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. + maxPartsList = 10000 // Limit number of parts in a listPartsResponse. + globalMaxPartID = 100000 ) // NewMultipartUploadHandler - New multipart upload. diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index aa6849cbd..17ce2b547 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -11,14 +11,11 @@ import ( "strings" "time" + "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gorilla/mux" -) - -const ( - maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse. ) func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { From 35dde5671186b3fcfd6e8c743064257627f47a77 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 12:58:45 -0800 Subject: [PATCH 0150/2432] refactoring --- weed/s3api/s3api_object_copy_handlers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go index b18ab329c..b8fb3f6a4 100644 --- a/weed/s3api/s3api_object_copy_handlers.go +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -113,7 +113,7 @@ func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Req } // check partID with maximum part ID for multipart objects - if partID > 10000 { + if partID > globalMaxPartID { writeErrorResponse(w, ErrInvalidMaxParts, r.URL) return } From bc38b72a20bd79bf67ee1770e20dcd538285cedf Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 14:38:36 -0800 Subject: [PATCH 0151/2432] s3: implemented DeleteMultipleObjects --- other/java/client/src/main/proto/filer.proto | 4 + weed/pb/filer.proto | 4 + weed/pb/filer_pb/filer.pb.go | 286 ++++++++++++------- weed/s3api/filer_util.go | 55 ++++ weed/s3api/s3api_errors.go | 7 + weed/s3api/s3api_object_handlers.go | 93 +++++- weed/server/filer_grpc_server.go | 26 +- 7 files changed, 366 insertions(+), 109 deletions(-) diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 6892effe8..d26c5595f 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -24,6 +24,9 @@ service SeaweedFiler { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } + rpc StreamDeleteEntries (stream DeleteEntryRequest) returns (stream DeleteEntryResponse) { + } + rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { } @@ -147,6 +150,7 @@ message DeleteEntryRequest { } message DeleteEntryResponse { + string error = 1; } message AtomicRenameEntryRequest { diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 6892effe8..d26c5595f 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -24,6 +24,9 @@ service SeaweedFiler { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } + rpc StreamDeleteEntries (stream DeleteEntryRequest) returns (stream DeleteEntryResponse) { + } + rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { } @@ -147,6 +150,7 @@ message DeleteEntryRequest { } message DeleteEntryResponse { + string error = 1; } message AtomicRenameEntryRequest { diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index d34d66023..50ac5e435 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -624,6 +624,7 @@ func (m *DeleteEntryRequest) GetIgnoreRecursiveError() bool { } type DeleteEntryResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` } func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} } @@ -631,6 +632,13 @@ func (m *DeleteEntryResponse) String() string { return proto.CompactT func (*DeleteEntryResponse) ProtoMessage() {} func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (m *DeleteEntryResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type AtomicRenameEntryRequest struct { OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory" json:"old_directory,omitempty"` OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName" json:"old_name,omitempty"` @@ -1088,6 +1096,7 @@ type SeaweedFilerClient interface { CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) + StreamDeleteEntries(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_StreamDeleteEntriesClient, error) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) @@ -1172,6 +1181,37 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq return out, nil } +func (c *seaweedFilerClient) StreamDeleteEntries(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_StreamDeleteEntriesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], c.cc, "/filer_pb.SeaweedFiler/StreamDeleteEntries", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerStreamDeleteEntriesClient{stream} + return x, nil +} + +type SeaweedFiler_StreamDeleteEntriesClient interface { + Send(*DeleteEntryRequest) error + Recv() (*DeleteEntryResponse, error) + grpc.ClientStream +} + +type seaweedFilerStreamDeleteEntriesClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerStreamDeleteEntriesClient) Send(m *DeleteEntryRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedFilerStreamDeleteEntriesClient) Recv() (*DeleteEntryResponse, error) { + m := new(DeleteEntryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) { out := new(AtomicRenameEntryResponse) err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, c.cc, opts...) @@ -1234,6 +1274,7 @@ type SeaweedFilerServer interface { CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) + StreamDeleteEntries(SeaweedFiler_StreamDeleteEntriesServer) error AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) @@ -1339,6 +1380,32 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_StreamDeleteEntries_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedFilerServer).StreamDeleteEntries(&seaweedFilerStreamDeleteEntriesServer{stream}) +} + +type SeaweedFiler_StreamDeleteEntriesServer interface { + Send(*DeleteEntryResponse) error + Recv() (*DeleteEntryRequest, error) + grpc.ServerStream +} + +type seaweedFilerStreamDeleteEntriesServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerStreamDeleteEntriesServer) Send(m *DeleteEntryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedFilerStreamDeleteEntriesServer) Recv() (*DeleteEntryRequest, error) { + m := new(DeleteEntryRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func _SeaweedFiler_AtomicRenameEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AtomicRenameEntryRequest) if err := dec(in); err != nil { @@ -1498,6 +1565,12 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ Handler: _SeaweedFiler_ListEntries_Handler, ServerStreams: true, }, + { + StreamName: "StreamDeleteEntries", + Handler: _SeaweedFiler_StreamDeleteEntries_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "filer.proto", } @@ -1505,110 +1578,111 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1671 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcb, 0x6f, 0xdb, 0x46, - 0x1a, 0x37, 0xf5, 0xe6, 0x27, 0x29, 0xb1, 0x47, 0x76, 0xa2, 0xc8, 0x8f, 0x75, 0xe8, 0x4d, 0xd6, - 0x8b, 0x04, 0xde, 0xc0, 0x9b, 0x43, 0xb2, 0xd9, 0x3d, 0x24, 0x7e, 0x2c, 0x8c, 0x75, 0x1e, 0xa0, - 0x93, 0xc5, 0x2e, 0x0a, 0x94, 0xa0, 0xc9, 0x91, 0x3c, 0x35, 0xc5, 0x61, 0x87, 0x43, 0xdb, 0xe9, - 0x9f, 0x52, 0xa0, 0x7f, 0x45, 0xaf, 0x45, 0x2f, 0x45, 0xd1, 0x1e, 0xfa, 0xb7, 0xf4, 0xd8, 0x73, - 0x31, 0x33, 0x24, 0x35, 0x14, 0x65, 0x3b, 0x41, 0x91, 0x1b, 0xe7, 0x7b, 0xcd, 0x37, 0xbf, 0xef, - 0x29, 0x41, 0x7b, 0x48, 0x02, 0xcc, 0xb6, 0x22, 0x46, 0x39, 0x45, 0x2d, 0x79, 0x70, 0xa2, 0x63, - 0xeb, 0x35, 0x2c, 0x1f, 0x52, 0x7a, 0x9a, 0x44, 0xbb, 0x84, 0x61, 0x8f, 0x53, 0xf6, 0x7e, 0x2f, - 0xe4, 0xec, 0xbd, 0x8d, 0xbf, 0x4c, 0x70, 0xcc, 0xd1, 0x0a, 0x98, 0x7e, 0xc6, 0xe8, 0x1b, 0xeb, - 0xc6, 0xa6, 0x69, 0x4f, 0x08, 0x08, 0x41, 0x2d, 0x74, 0xc7, 0xb8, 0x5f, 0x91, 0x0c, 0xf9, 0x6d, - 0xed, 0xc1, 0xca, 0x6c, 0x83, 0x71, 0x44, 0xc3, 0x18, 0xa3, 0x7b, 0x50, 0xc7, 0x82, 0x20, 0xad, - 0xb5, 0xb7, 0x6f, 0x6e, 0x65, 0xae, 0x6c, 0x29, 0x39, 0xc5, 0xb5, 0xbe, 0x37, 0x00, 0x1d, 0x92, - 0x98, 0x0b, 0x22, 0xc1, 0xf1, 0x87, 0xf9, 0x73, 0x0b, 0x1a, 0x11, 0xc3, 0x43, 0x72, 0x91, 0x7a, - 0x94, 0x9e, 0xd0, 0x43, 0x58, 0x88, 0xb9, 0xcb, 0xf8, 0x3e, 0xa3, 0xe3, 0x7d, 0x12, 0xe0, 0x57, - 0xc2, 0xe9, 0xaa, 0x14, 0x29, 0x33, 0xd0, 0x16, 0x20, 0x12, 0x7a, 0x41, 0x12, 0x93, 0x33, 0x7c, - 0x94, 0x71, 0xfb, 0xb5, 0x75, 0x63, 0xb3, 0x65, 0xcf, 0xe0, 0xa0, 0x45, 0xa8, 0x07, 0x64, 0x4c, - 0x78, 0xbf, 0xbe, 0x6e, 0x6c, 0x76, 0x6d, 0x75, 0xb0, 0xfe, 0x09, 0xbd, 0x82, 0xff, 0x1f, 0xf7, - 0xfc, 0x6f, 0x2a, 0x50, 0x97, 0x84, 0x1c, 0x63, 0x63, 0x82, 0x31, 0xba, 0x0b, 0x1d, 0x12, 0x3b, - 0x13, 0x20, 0x2a, 0xd2, 0xb7, 0x36, 0x89, 0x73, 0xcc, 0xd1, 0x03, 0x68, 0x78, 0x27, 0x49, 0x78, - 0x1a, 0xf7, 0xab, 0xeb, 0xd5, 0xcd, 0xf6, 0x76, 0x6f, 0x72, 0x91, 0x78, 0xe8, 0x8e, 0xe0, 0xd9, - 0xa9, 0x08, 0x7a, 0x02, 0xe0, 0x72, 0xce, 0xc8, 0x71, 0xc2, 0x71, 0x2c, 0x5f, 0xda, 0xde, 0xee, - 0x6b, 0x0a, 0x49, 0x8c, 0x9f, 0xe7, 0x7c, 0x5b, 0x93, 0x45, 0x4f, 0xa1, 0x85, 0x2f, 0x38, 0x0e, - 0x7d, 0xec, 0xf7, 0xeb, 0xf2, 0xa2, 0xd5, 0xa9, 0x17, 0x6d, 0xed, 0xa5, 0x7c, 0xf5, 0xbe, 0x5c, - 0x7c, 0xf0, 0x0c, 0xba, 0x05, 0x16, 0x9a, 0x87, 0xea, 0x29, 0xce, 0xa2, 0x2a, 0x3e, 0x05, 0xb2, - 0x67, 0x6e, 0x90, 0xa8, 0x04, 0xeb, 0xd8, 0xea, 0xf0, 0x8f, 0xca, 0x13, 0xc3, 0xda, 0x05, 0x73, - 0x3f, 0x09, 0x82, 0x5c, 0xd1, 0x27, 0x2c, 0x53, 0xf4, 0x09, 0x9b, 0xa0, 0x5c, 0xb9, 0x12, 0xe5, - 0xef, 0x0c, 0x58, 0xd8, 0x3b, 0xc3, 0x21, 0x7f, 0x45, 0x39, 0x19, 0x12, 0xcf, 0xe5, 0x84, 0x86, - 0xe8, 0x21, 0x98, 0x34, 0xf0, 0x9d, 0x2b, 0xc3, 0xd4, 0xa2, 0x41, 0xea, 0xf5, 0x43, 0x30, 0x43, - 0x7c, 0xee, 0x5c, 0x79, 0x5d, 0x2b, 0xc4, 0xe7, 0x4a, 0x7a, 0x03, 0xba, 0x3e, 0x0e, 0x30, 0xc7, - 0x4e, 0x1e, 0x1d, 0x11, 0xba, 0x8e, 0x22, 0xee, 0xa8, 0x70, 0xdc, 0x87, 0x9b, 0xc2, 0x64, 0xe4, - 0x32, 0x1c, 0x72, 0x27, 0x72, 0xf9, 0x89, 0x8c, 0x89, 0x69, 0x77, 0x43, 0x7c, 0xfe, 0x46, 0x52, - 0xdf, 0xb8, 0xfc, 0xc4, 0xfa, 0xcd, 0x00, 0x33, 0x0f, 0x26, 0xba, 0x0d, 0x4d, 0x71, 0xad, 0x43, - 0xfc, 0x14, 0x89, 0x86, 0x38, 0x1e, 0xf8, 0xa2, 0x2a, 0xe8, 0x70, 0x18, 0x63, 0x2e, 0xdd, 0xab, - 0xda, 0xe9, 0x49, 0x64, 0x56, 0x4c, 0xbe, 0x52, 0x85, 0x50, 0xb3, 0xe5, 0xb7, 0x40, 0x7c, 0xcc, - 0xc9, 0x18, 0xcb, 0x0b, 0xab, 0xb6, 0x3a, 0xa0, 0x1e, 0xd4, 0xb1, 0xc3, 0xdd, 0x91, 0xcc, 0x70, - 0xd3, 0xae, 0xe1, 0xb7, 0xee, 0x08, 0xfd, 0x19, 0x6e, 0xc4, 0x34, 0x61, 0x1e, 0x76, 0xb2, 0x6b, - 0x1b, 0x92, 0xdb, 0x51, 0xd4, 0x7d, 0x75, 0xb9, 0x05, 0xd5, 0x21, 0xf1, 0xfb, 0x4d, 0x09, 0xcc, - 0x7c, 0x31, 0x09, 0x0f, 0x7c, 0x5b, 0x30, 0xd1, 0xdf, 0x00, 0x72, 0x4b, 0x7e, 0xbf, 0x75, 0x89, - 0xa8, 0x99, 0xd9, 0xf5, 0xad, 0xff, 0x41, 0x23, 0x35, 0xbf, 0x0c, 0xe6, 0x19, 0x0d, 0x92, 0x71, - 0xfe, 0xec, 0xae, 0xdd, 0x52, 0x84, 0x03, 0x1f, 0xdd, 0x01, 0xd9, 0xe7, 0x1c, 0x91, 0x55, 0x15, - 0xf9, 0x48, 0x89, 0xd0, 0x7f, 0xb0, 0xec, 0x14, 0x1e, 0xa5, 0xa7, 0x44, 0xbd, 0xbe, 0x69, 0xa7, - 0x27, 0xeb, 0xd7, 0x0a, 0xdc, 0x28, 0xa6, 0xbb, 0xb8, 0x42, 0x5a, 0x91, 0x58, 0x19, 0xd2, 0x8c, - 0x34, 0x7b, 0x54, 0xc0, 0xab, 0xa2, 0xe3, 0x95, 0xa9, 0x8c, 0xa9, 0xaf, 0x2e, 0xe8, 0x2a, 0x95, - 0x97, 0xd4, 0xc7, 0x22, 0x5b, 0x13, 0xe2, 0x4b, 0x80, 0xbb, 0xb6, 0xf8, 0x14, 0x94, 0x11, 0xf1, - 0xd3, 0xf6, 0x21, 0x3e, 0xa5, 0x7b, 0x4c, 0xda, 0x6d, 0xa8, 0x90, 0xa9, 0x93, 0x08, 0xd9, 0x58, - 0x50, 0x9b, 0x2a, 0x0e, 0xe2, 0x1b, 0xad, 0x43, 0x9b, 0xe1, 0x28, 0x48, 0xb3, 0x57, 0xc2, 0x67, - 0xda, 0x3a, 0x09, 0xad, 0x01, 0x78, 0x34, 0x08, 0xb0, 0x27, 0x05, 0x4c, 0x29, 0xa0, 0x51, 0x44, - 0xe6, 0x70, 0x1e, 0x38, 0x31, 0xf6, 0xfa, 0xb0, 0x6e, 0x6c, 0xd6, 0xed, 0x06, 0xe7, 0xc1, 0x11, - 0xf6, 0xc4, 0x3b, 0x92, 0x18, 0x33, 0x47, 0x36, 0xa0, 0xb6, 0xd4, 0x6b, 0x09, 0x82, 0x6c, 0x93, - 0xab, 0x00, 0x23, 0x46, 0x93, 0x48, 0x71, 0x3b, 0xeb, 0x55, 0xd1, 0x8b, 0x25, 0x45, 0xb2, 0xef, - 0xc1, 0x8d, 0xf8, 0xfd, 0x38, 0x20, 0xe1, 0xa9, 0xc3, 0x5d, 0x36, 0xc2, 0xbc, 0xdf, 0x55, 0x39, - 0x9c, 0x52, 0xdf, 0x4a, 0xa2, 0x15, 0x01, 0xda, 0x61, 0xd8, 0xe5, 0xf8, 0x23, 0xc6, 0xce, 0x87, - 0x55, 0x37, 0x5a, 0x82, 0x06, 0x75, 0xf0, 0x85, 0x17, 0xa4, 0x45, 0x56, 0xa7, 0x7b, 0x17, 0x5e, - 0x60, 0x3d, 0x80, 0x5e, 0xe1, 0xc6, 0xb4, 0x31, 0x2f, 0x42, 0x1d, 0x33, 0x46, 0xb3, 0x36, 0xa2, - 0x0e, 0xd6, 0xff, 0x01, 0xbd, 0x8b, 0xfc, 0x4f, 0xe1, 0x9e, 0xb5, 0x04, 0xbd, 0x82, 0x69, 0xe5, - 0x87, 0xf5, 0xa3, 0x01, 0x68, 0x57, 0x76, 0x83, 0x3f, 0x36, 0x88, 0x45, 0x7d, 0x8a, 0x21, 0xa1, - 0xba, 0x8d, 0xef, 0x72, 0x37, 0x1d, 0x61, 0x1d, 0x12, 0x2b, 0xfb, 0xbb, 0x2e, 0x77, 0xd3, 0x51, - 0xc2, 0xb0, 0x97, 0x30, 0x31, 0xd5, 0x64, 0x12, 0xca, 0x51, 0x62, 0x67, 0x24, 0xf4, 0x18, 0x6e, - 0x91, 0x51, 0x48, 0x19, 0x9e, 0x88, 0x39, 0x0a, 0xaa, 0x86, 0x14, 0x5e, 0x54, 0xdc, 0x5c, 0x61, - 0x4f, 0x22, 0xb7, 0x04, 0xbd, 0xc2, 0x33, 0xd2, 0xe7, 0x7d, 0x6d, 0x40, 0xff, 0x39, 0xa7, 0x63, - 0xe2, 0xd9, 0x58, 0xb8, 0x59, 0x78, 0xe4, 0x06, 0x74, 0x45, 0xe7, 0x9d, 0x7e, 0x68, 0x87, 0x06, - 0xfe, 0x64, 0xb2, 0xdd, 0x01, 0xd1, 0x7c, 0x1d, 0xed, 0xbd, 0x4d, 0x1a, 0xf8, 0x32, 0xe7, 0x36, - 0x40, 0x74, 0x48, 0x4d, 0x5f, 0xcd, 0xf8, 0x4e, 0x88, 0xcf, 0x0b, 0xfa, 0x42, 0x48, 0xea, 0xab, - 0xb6, 0xda, 0x0c, 0xf1, 0xb9, 0xd0, 0xb7, 0x96, 0xe1, 0xce, 0x0c, 0xdf, 0x52, 0xcf, 0x7f, 0x36, - 0xa0, 0xf7, 0x3c, 0x8e, 0xc9, 0x28, 0xfc, 0xaf, 0x6c, 0x30, 0x99, 0xd3, 0x8b, 0x50, 0xf7, 0x68, - 0x12, 0x72, 0xe9, 0x6c, 0xdd, 0x56, 0x87, 0xa9, 0x9a, 0xab, 0x94, 0x6a, 0x6e, 0xaa, 0x6a, 0xab, - 0xe5, 0xaa, 0xd5, 0xaa, 0xb2, 0x56, 0xa8, 0xca, 0x3f, 0x41, 0x5b, 0x84, 0xd3, 0xf1, 0x70, 0xc8, - 0x31, 0x4b, 0x7b, 0x32, 0x08, 0xd2, 0x8e, 0xa4, 0x08, 0x01, 0x7d, 0x76, 0xa8, 0xb6, 0x0c, 0xd1, - 0x64, 0x70, 0xfc, 0x62, 0xc0, 0x62, 0xf1, 0x29, 0x69, 0x11, 0x5c, 0x3a, 0x43, 0x44, 0xd3, 0x62, - 0x41, 0xfa, 0x0e, 0xf1, 0x29, 0xca, 0x3f, 0x4a, 0x8e, 0x03, 0xe2, 0x39, 0x82, 0xa1, 0xfc, 0x37, - 0x15, 0xe5, 0x1d, 0x0b, 0x26, 0xa8, 0xd4, 0x74, 0x54, 0x10, 0xd4, 0xdc, 0x84, 0x9f, 0x64, 0x73, - 0x44, 0x7c, 0x4f, 0x21, 0xd5, 0xb8, 0x0e, 0xa9, 0x66, 0x09, 0x29, 0xeb, 0x31, 0xf4, 0xd4, 0xca, - 0x59, 0x0c, 0xcc, 0x2a, 0x40, 0x3e, 0x1b, 0xe2, 0xbe, 0xa1, 0x1a, 0x54, 0x36, 0x1c, 0x62, 0xeb, - 0x5f, 0x60, 0x1e, 0x52, 0x65, 0x21, 0x46, 0x8f, 0xc0, 0x0c, 0xb2, 0x83, 0x14, 0x6d, 0x6f, 0xa3, - 0x49, 0xdd, 0x66, 0x72, 0xf6, 0x44, 0xc8, 0x7a, 0x06, 0xad, 0x8c, 0x9c, 0xa1, 0x63, 0x5c, 0x86, - 0x4e, 0x65, 0x0a, 0x1d, 0xeb, 0x07, 0x03, 0x16, 0x8b, 0x2e, 0xa7, 0x01, 0x78, 0x07, 0xdd, 0xfc, - 0x0a, 0x67, 0xec, 0x46, 0xa9, 0x2f, 0x8f, 0x74, 0x5f, 0xca, 0x6a, 0xb9, 0x83, 0xf1, 0x4b, 0x37, - 0x52, 0x59, 0xdb, 0x09, 0x34, 0xd2, 0xe0, 0x2d, 0x2c, 0x94, 0x44, 0x66, 0xec, 0x5b, 0x7f, 0xd5, - 0xf7, 0xad, 0xc2, 0xce, 0x98, 0x6b, 0xeb, 0x4b, 0xd8, 0x53, 0xb8, 0xad, 0x4a, 0x7c, 0x27, 0x8f, - 0x56, 0x86, 0x7d, 0x31, 0xa8, 0xc6, 0x74, 0x50, 0xad, 0x01, 0xf4, 0xcb, 0xaa, 0x69, 0xa1, 0x8d, - 0x60, 0xe1, 0x88, 0xbb, 0x9c, 0xc4, 0x9c, 0x78, 0xf9, 0xe2, 0x3f, 0x95, 0x05, 0xc6, 0x75, 0x53, - 0xae, 0x5c, 0x71, 0xf3, 0x50, 0xe5, 0x3c, 0xcb, 0x54, 0xf1, 0x29, 0xa2, 0x80, 0xf4, 0x9b, 0xd2, - 0x18, 0x7c, 0x82, 0xab, 0x44, 0x3e, 0x70, 0xca, 0xdd, 0x40, 0x6d, 0x11, 0x35, 0xb9, 0x45, 0x98, - 0x92, 0x22, 0xd7, 0x08, 0x35, 0x68, 0x7d, 0xc5, 0xad, 0xab, 0x1d, 0x43, 0x10, 0x24, 0x73, 0x15, - 0x40, 0x16, 0xa5, 0xaa, 0xa7, 0x86, 0xd2, 0x15, 0x94, 0x1d, 0x41, 0xb0, 0xd6, 0x60, 0xe5, 0xdf, - 0x98, 0x8b, 0x7d, 0x88, 0xed, 0xd0, 0x70, 0x48, 0x46, 0x09, 0x73, 0xb5, 0x50, 0x58, 0xdf, 0x1a, - 0xb0, 0x7a, 0x89, 0x40, 0xfa, 0xe0, 0x3e, 0x34, 0xc7, 0x6e, 0xcc, 0x31, 0xcb, 0xaa, 0x24, 0x3b, - 0x4e, 0x43, 0x51, 0xb9, 0x0e, 0x8a, 0x6a, 0x09, 0x8a, 0x25, 0x68, 0x8c, 0xdd, 0x0b, 0x67, 0x7c, - 0x9c, 0x2e, 0x3c, 0xf5, 0xb1, 0x7b, 0xf1, 0xf2, 0x58, 0xf6, 0x30, 0xc2, 0x9c, 0xe3, 0xc4, 0x3b, - 0xc5, 0x3c, 0xce, 0x7b, 0x18, 0x61, 0x2f, 0x14, 0x65, 0xfb, 0xa7, 0x26, 0x74, 0x8e, 0xb0, 0x7b, - 0x8e, 0xb1, 0x2f, 0x3d, 0x47, 0xa3, 0xac, 0x62, 0x8a, 0xbf, 0x2b, 0xd1, 0xbd, 0xe9, 0xd2, 0x98, - 0xf9, 0x43, 0x76, 0x70, 0xff, 0x3a, 0xb1, 0x34, 0xf9, 0xe6, 0xd0, 0x2b, 0x68, 0x6b, 0x3f, 0xdc, - 0xd0, 0x8a, 0xa6, 0x58, 0xfa, 0x3d, 0x3a, 0x58, 0xbd, 0x84, 0x9b, 0x59, 0x7b, 0x64, 0xa0, 0x43, - 0x68, 0x6b, 0xfb, 0x86, 0x6e, 0xaf, 0xbc, 0xf8, 0xe8, 0xf6, 0x66, 0x2c, 0x29, 0xd6, 0x9c, 0xb0, - 0xa6, 0x6d, 0x0d, 0xba, 0xb5, 0xf2, 0x9e, 0xa2, 0x5b, 0x9b, 0xb5, 0x6a, 0x48, 0x6b, 0xda, 0x90, - 0xd6, 0xad, 0x95, 0x57, 0x10, 0xdd, 0xda, 0xac, 0xc9, 0x3e, 0x87, 0x3e, 0x87, 0x85, 0xd2, 0xf8, - 0x44, 0xd6, 0x44, 0xeb, 0xb2, 0xb9, 0x3f, 0xd8, 0xb8, 0x52, 0x26, 0xb7, 0xff, 0x1a, 0x3a, 0xfa, - 0xd4, 0x42, 0x9a, 0x43, 0x33, 0x06, 0xf3, 0x60, 0xed, 0x32, 0xb6, 0x6e, 0x50, 0x6f, 0xa7, 0xba, - 0xc1, 0x19, 0x03, 0x45, 0x37, 0x38, 0xab, 0x0b, 0x5b, 0x73, 0xe8, 0x33, 0x98, 0x9f, 0x6e, 0x6b, - 0xe8, 0xee, 0x34, 0x6c, 0xa5, 0x6e, 0x39, 0xb0, 0xae, 0x12, 0xc9, 0x8d, 0x1f, 0x00, 0x4c, 0xba, - 0x15, 0x5a, 0x9e, 0xe8, 0x94, 0xba, 0xe5, 0x60, 0x65, 0x36, 0x33, 0x37, 0xf5, 0x05, 0x2c, 0xcd, - 0x6c, 0x09, 0x48, 0x2b, 0x93, 0xab, 0x9a, 0xca, 0xe0, 0x2f, 0xd7, 0xca, 0x65, 0x77, 0xbd, 0x58, - 0x83, 0xf9, 0x58, 0x15, 0xf2, 0x30, 0xde, 0xf2, 0x02, 0x82, 0x43, 0xfe, 0x02, 0xa4, 0xc6, 0x1b, - 0x46, 0x39, 0x3d, 0x6e, 0xc8, 0xbf, 0xa4, 0xfe, 0xfe, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xaa, - 0x2b, 0xd6, 0xf6, 0xa1, 0x12, 0x00, 0x00, + // 1692 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcb, 0x6f, 0xdb, 0xc8, + 0x19, 0x37, 0xf5, 0xe6, 0x27, 0x29, 0xb1, 0x47, 0x76, 0xa2, 0xc8, 0x8f, 0x3a, 0x74, 0x93, 0xba, + 0x48, 0xe0, 0x1a, 0x6e, 0x0e, 0x49, 0xd3, 0x1e, 0x12, 0x3f, 0x0a, 0xa3, 0xce, 0x03, 0x74, 0x52, + 0xa4, 0x28, 0x50, 0x82, 0x26, 0x47, 0xf2, 0xd4, 0x24, 0x47, 0x1d, 0x0e, 0x6d, 0xa7, 0x7f, 0x4a, + 0x81, 0x1e, 0xfa, 0x37, 0xf4, 0xba, 0xd8, 0xcb, 0x62, 0x81, 0x3d, 0xec, 0xdf, 0xb2, 0xc7, 0x3d, + 0x2f, 0x66, 0x86, 0xa4, 0x86, 0xa2, 0x6c, 0x27, 0xbb, 0xc8, 0x8d, 0xf3, 0xbd, 0xe6, 0x9b, 0xdf, + 0xf7, 0x94, 0xa0, 0x3d, 0x24, 0x01, 0x66, 0x5b, 0x63, 0x46, 0x39, 0x45, 0x2d, 0x79, 0x70, 0xc6, + 0x27, 0xd6, 0x1b, 0x58, 0x3e, 0xa2, 0xf4, 0x2c, 0x19, 0xef, 0x11, 0x86, 0x3d, 0x4e, 0xd9, 0xc7, + 0xfd, 0x88, 0xb3, 0x8f, 0x36, 0xfe, 0x57, 0x82, 0x63, 0x8e, 0x56, 0xc0, 0xf4, 0x33, 0x46, 0xdf, + 0x58, 0x37, 0x36, 0x4d, 0x7b, 0x42, 0x40, 0x08, 0x6a, 0x91, 0x1b, 0xe2, 0x7e, 0x45, 0x32, 0xe4, + 0xb7, 0xb5, 0x0f, 0x2b, 0xb3, 0x0d, 0xc6, 0x63, 0x1a, 0xc5, 0x18, 0x3d, 0x80, 0x3a, 0x16, 0x04, + 0x69, 0xad, 0xbd, 0x73, 0x7b, 0x2b, 0x73, 0x65, 0x4b, 0xc9, 0x29, 0xae, 0xf5, 0xb5, 0x01, 0xe8, + 0x88, 0xc4, 0x5c, 0x10, 0x09, 0x8e, 0x3f, 0xcd, 0x9f, 0x3b, 0xd0, 0x18, 0x33, 0x3c, 0x24, 0x97, + 0xa9, 0x47, 0xe9, 0x09, 0x3d, 0x86, 0x85, 0x98, 0xbb, 0x8c, 0x1f, 0x30, 0x1a, 0x1e, 0x90, 0x00, + 0xbf, 0x16, 0x4e, 0x57, 0xa5, 0x48, 0x99, 0x81, 0xb6, 0x00, 0x91, 0xc8, 0x0b, 0x92, 0x98, 0x9c, + 0xe3, 0xe3, 0x8c, 0xdb, 0xaf, 0xad, 0x1b, 0x9b, 0x2d, 0x7b, 0x06, 0x07, 0x2d, 0x42, 0x3d, 0x20, + 0x21, 0xe1, 0xfd, 0xfa, 0xba, 0xb1, 0xd9, 0xb5, 0xd5, 0xc1, 0xfa, 0x23, 0xf4, 0x0a, 0xfe, 0x7f, + 0xde, 0xf3, 0xff, 0x5b, 0x81, 0xba, 0x24, 0xe4, 0x18, 0x1b, 0x13, 0x8c, 0xd1, 0x7d, 0xe8, 0x90, + 0xd8, 0x99, 0x00, 0x51, 0x91, 0xbe, 0xb5, 0x49, 0x9c, 0x63, 0x8e, 0x1e, 0x41, 0xc3, 0x3b, 0x4d, + 0xa2, 0xb3, 0xb8, 0x5f, 0x5d, 0xaf, 0x6e, 0xb6, 0x77, 0x7a, 0x93, 0x8b, 0xc4, 0x43, 0x77, 0x05, + 0xcf, 0x4e, 0x45, 0xd0, 0x53, 0x00, 0x97, 0x73, 0x46, 0x4e, 0x12, 0x8e, 0x63, 0xf9, 0xd2, 0xf6, + 0x4e, 0x5f, 0x53, 0x48, 0x62, 0xfc, 0x22, 0xe7, 0xdb, 0x9a, 0x2c, 0x7a, 0x06, 0x2d, 0x7c, 0xc9, + 0x71, 0xe4, 0x63, 0xbf, 0x5f, 0x97, 0x17, 0xad, 0x4e, 0xbd, 0x68, 0x6b, 0x3f, 0xe5, 0xab, 0xf7, + 0xe5, 0xe2, 0x83, 0xe7, 0xd0, 0x2d, 0xb0, 0xd0, 0x3c, 0x54, 0xcf, 0x70, 0x16, 0x55, 0xf1, 0x29, + 0x90, 0x3d, 0x77, 0x83, 0x44, 0x25, 0x58, 0xc7, 0x56, 0x87, 0x3f, 0x54, 0x9e, 0x1a, 0xd6, 0x1e, + 0x98, 0x07, 0x49, 0x10, 0xe4, 0x8a, 0x3e, 0x61, 0x99, 0xa2, 0x4f, 0xd8, 0x04, 0xe5, 0xca, 0xb5, + 0x28, 0x7f, 0x65, 0xc0, 0xc2, 0xfe, 0x39, 0x8e, 0xf8, 0x6b, 0xca, 0xc9, 0x90, 0x78, 0x2e, 0x27, + 0x34, 0x42, 0x8f, 0xc1, 0xa4, 0x81, 0xef, 0x5c, 0x1b, 0xa6, 0x16, 0x0d, 0x52, 0xaf, 0x1f, 0x83, + 0x19, 0xe1, 0x0b, 0xe7, 0xda, 0xeb, 0x5a, 0x11, 0xbe, 0x50, 0xd2, 0x1b, 0xd0, 0xf5, 0x71, 0x80, + 0x39, 0x76, 0xf2, 0xe8, 0x88, 0xd0, 0x75, 0x14, 0x71, 0x57, 0x85, 0xe3, 0x21, 0xdc, 0x16, 0x26, + 0xc7, 0x2e, 0xc3, 0x11, 0x77, 0xc6, 0x2e, 0x3f, 0x95, 0x31, 0x31, 0xed, 0x6e, 0x84, 0x2f, 0xde, + 0x4a, 0xea, 0x5b, 0x97, 0x9f, 0x5a, 0x3f, 0x1a, 0x60, 0xe6, 0xc1, 0x44, 0x77, 0xa1, 0x29, 0xae, + 0x75, 0x88, 0x9f, 0x22, 0xd1, 0x10, 0xc7, 0x43, 0x5f, 0x54, 0x05, 0x1d, 0x0e, 0x63, 0xcc, 0xa5, + 0x7b, 0x55, 0x3b, 0x3d, 0x89, 0xcc, 0x8a, 0xc9, 0xbf, 0x55, 0x21, 0xd4, 0x6c, 0xf9, 0x2d, 0x10, + 0x0f, 0x39, 0x09, 0xb1, 0xbc, 0xb0, 0x6a, 0xab, 0x03, 0xea, 0x41, 0x1d, 0x3b, 0xdc, 0x1d, 0xc9, + 0x0c, 0x37, 0xed, 0x1a, 0x7e, 0xe7, 0x8e, 0xd0, 0xaf, 0xe1, 0x56, 0x4c, 0x13, 0xe6, 0x61, 0x27, + 0xbb, 0xb6, 0x21, 0xb9, 0x1d, 0x45, 0x3d, 0x50, 0x97, 0x5b, 0x50, 0x1d, 0x12, 0xbf, 0xdf, 0x94, + 0xc0, 0xcc, 0x17, 0x93, 0xf0, 0xd0, 0xb7, 0x05, 0x13, 0xfd, 0x0e, 0x20, 0xb7, 0xe4, 0xf7, 0x5b, + 0x57, 0x88, 0x9a, 0x99, 0x5d, 0xdf, 0xfa, 0x00, 0x8d, 0xd4, 0xfc, 0x32, 0x98, 0xe7, 0x34, 0x48, + 0xc2, 0xfc, 0xd9, 0x5d, 0xbb, 0xa5, 0x08, 0x87, 0x3e, 0xba, 0x07, 0xb2, 0xcf, 0x39, 0x22, 0xab, + 0x2a, 0xf2, 0x91, 0x12, 0xa1, 0xbf, 0x60, 0xd9, 0x29, 0x3c, 0x4a, 0xcf, 0x88, 0x7a, 0x7d, 0xd3, + 0x4e, 0x4f, 0xd6, 0x0f, 0x15, 0xb8, 0x55, 0x4c, 0x77, 0x71, 0x85, 0xb4, 0x22, 0xb1, 0x32, 0xa4, + 0x19, 0x69, 0xf6, 0xb8, 0x80, 0x57, 0x45, 0xc7, 0x2b, 0x53, 0x09, 0xa9, 0xaf, 0x2e, 0xe8, 0x2a, + 0x95, 0x57, 0xd4, 0xc7, 0x22, 0x5b, 0x13, 0xe2, 0x4b, 0x80, 0xbb, 0xb6, 0xf8, 0x14, 0x94, 0x11, + 0xf1, 0xd3, 0xf6, 0x21, 0x3e, 0xa5, 0x7b, 0x4c, 0xda, 0x6d, 0xa8, 0x90, 0xa9, 0x93, 0x08, 0x59, + 0x28, 0xa8, 0x4d, 0x15, 0x07, 0xf1, 0x8d, 0xd6, 0xa1, 0xcd, 0xf0, 0x38, 0x48, 0xb3, 0x57, 0xc2, + 0x67, 0xda, 0x3a, 0x09, 0xad, 0x01, 0x78, 0x34, 0x08, 0xb0, 0x27, 0x05, 0x4c, 0x29, 0xa0, 0x51, + 0x44, 0xe6, 0x70, 0x1e, 0x38, 0x31, 0xf6, 0xfa, 0xb0, 0x6e, 0x6c, 0xd6, 0xed, 0x06, 0xe7, 0xc1, + 0x31, 0xf6, 0xc4, 0x3b, 0x92, 0x18, 0x33, 0x47, 0x36, 0xa0, 0xb6, 0xd4, 0x6b, 0x09, 0x82, 0x6c, + 0x93, 0xab, 0x00, 0x23, 0x46, 0x93, 0xb1, 0xe2, 0x76, 0xd6, 0xab, 0xa2, 0x17, 0x4b, 0x8a, 0x64, + 0x3f, 0x80, 0x5b, 0xf1, 0xc7, 0x30, 0x20, 0xd1, 0x99, 0xc3, 0x5d, 0x36, 0xc2, 0xbc, 0xdf, 0x55, + 0x39, 0x9c, 0x52, 0xdf, 0x49, 0xa2, 0x35, 0x06, 0xb4, 0xcb, 0xb0, 0xcb, 0xf1, 0x67, 0x8c, 0x9d, + 0x4f, 0xab, 0x6e, 0xb4, 0x04, 0x0d, 0xea, 0xe0, 0x4b, 0x2f, 0x48, 0x8b, 0xac, 0x4e, 0xf7, 0x2f, + 0xbd, 0xc0, 0x7a, 0x04, 0xbd, 0xc2, 0x8d, 0x69, 0x63, 0x5e, 0x84, 0x3a, 0x66, 0x8c, 0x66, 0x6d, + 0x44, 0x1d, 0xac, 0xbf, 0x01, 0x7a, 0x3f, 0xf6, 0xbf, 0x84, 0x7b, 0xd6, 0x12, 0xf4, 0x0a, 0xa6, + 0x95, 0x1f, 0xd6, 0xb7, 0x06, 0xa0, 0x3d, 0xd9, 0x0d, 0x7e, 0xd9, 0x20, 0x16, 0xf5, 0x29, 0x86, + 0x84, 0xea, 0x36, 0xbe, 0xcb, 0xdd, 0x74, 0x84, 0x75, 0x48, 0xac, 0xec, 0xef, 0xb9, 0xdc, 0x4d, + 0x47, 0x09, 0xc3, 0x5e, 0xc2, 0xc4, 0x54, 0x93, 0x49, 0x28, 0x47, 0x89, 0x9d, 0x91, 0xd0, 0x13, + 0xb8, 0x43, 0x46, 0x11, 0x65, 0x78, 0x22, 0xe6, 0x28, 0xa8, 0x1a, 0x52, 0x78, 0x51, 0x71, 0x73, + 0x85, 0x7d, 0x89, 0xdc, 0x23, 0xe8, 0x15, 0x9e, 0x71, 0x2d, 0xcc, 0xff, 0x31, 0xa0, 0xff, 0x82, + 0xd3, 0x90, 0x78, 0x36, 0x16, 0xce, 0x17, 0x9e, 0xbe, 0x01, 0x5d, 0xd1, 0x8f, 0xa7, 0x9f, 0xdf, + 0xa1, 0x81, 0x3f, 0x99, 0x77, 0xf7, 0x40, 0xb4, 0x64, 0x47, 0x43, 0xa1, 0x49, 0x03, 0x5f, 0x66, + 0xe2, 0x06, 0x88, 0xbe, 0xa9, 0xe9, 0xab, 0xc9, 0xdf, 0x89, 0xf0, 0x45, 0x41, 0x5f, 0x08, 0x49, + 0x7d, 0xd5, 0x6c, 0x9b, 0x11, 0xbe, 0x10, 0xfa, 0xd6, 0x32, 0xdc, 0x9b, 0xe1, 0x5b, 0x1a, 0xae, + 0xef, 0x0c, 0xe8, 0xbd, 0x88, 0x63, 0x32, 0x8a, 0xfe, 0x2a, 0xdb, 0x4e, 0xe6, 0xf4, 0x22, 0xd4, + 0x3d, 0x9a, 0x44, 0x5c, 0x3a, 0x5b, 0xb7, 0xd5, 0x61, 0xaa, 0x12, 0x2b, 0xa5, 0x4a, 0x9c, 0xaa, + 0xe5, 0x6a, 0xb9, 0x96, 0xb5, 0x5a, 0xad, 0x15, 0x6a, 0xf5, 0x57, 0xd0, 0x16, 0x41, 0x76, 0x3c, + 0x1c, 0x71, 0xcc, 0xd2, 0x4e, 0x0d, 0x82, 0xb4, 0x2b, 0x29, 0x42, 0x40, 0x9f, 0x28, 0xaa, 0x59, + 0xc3, 0x78, 0x32, 0x4e, 0xbe, 0x37, 0x60, 0xb1, 0xf8, 0x94, 0x34, 0x66, 0x57, 0x4e, 0x16, 0xd1, + 0xca, 0x58, 0x90, 0xbe, 0x43, 0x7c, 0x8a, 0xa6, 0x30, 0x4e, 0x4e, 0x02, 0xe2, 0x39, 0x82, 0xa1, + 0xfc, 0x37, 0x15, 0xe5, 0x3d, 0x0b, 0x26, 0xa8, 0xd4, 0x74, 0x54, 0x10, 0xd4, 0xdc, 0x84, 0x9f, + 0x66, 0xd3, 0x45, 0x7c, 0x4f, 0x21, 0xd5, 0xb8, 0x09, 0xa9, 0x66, 0x09, 0x29, 0xeb, 0x09, 0xf4, + 0xd4, 0x22, 0x5a, 0x0c, 0xcc, 0x2a, 0x40, 0x3e, 0x31, 0xe2, 0xbe, 0xa1, 0xda, 0x56, 0x36, 0x32, + 0x62, 0xeb, 0x4f, 0x60, 0x1e, 0x51, 0x65, 0x21, 0x46, 0xdb, 0x60, 0x06, 0xd9, 0x41, 0x8a, 0xb6, + 0x77, 0xd0, 0xa4, 0x9a, 0x33, 0x39, 0x7b, 0x22, 0x64, 0x3d, 0x87, 0x56, 0x46, 0xce, 0xd0, 0x31, + 0xae, 0x42, 0xa7, 0x32, 0x85, 0x8e, 0xf5, 0x8d, 0x01, 0x8b, 0x45, 0x97, 0xd3, 0x00, 0xbc, 0x87, + 0x6e, 0x7e, 0x85, 0x13, 0xba, 0xe3, 0xd4, 0x97, 0x6d, 0xdd, 0x97, 0xb2, 0x5a, 0xee, 0x60, 0xfc, + 0xca, 0x1d, 0xab, 0xac, 0xed, 0x04, 0x1a, 0x69, 0xf0, 0x0e, 0x16, 0x4a, 0x22, 0x33, 0xb6, 0xb0, + 0xdf, 0xea, 0x5b, 0x58, 0x61, 0x93, 0xcc, 0xb5, 0xf5, 0xd5, 0xec, 0x19, 0xdc, 0x55, 0x85, 0xbf, + 0x9b, 0x47, 0x2b, 0xc3, 0xbe, 0x18, 0x54, 0x63, 0x3a, 0xa8, 0xd6, 0x00, 0xfa, 0x65, 0xd5, 0xb4, + 0xd0, 0x46, 0xb0, 0x70, 0xcc, 0x5d, 0x4e, 0x62, 0x4e, 0xbc, 0xfc, 0xe7, 0xc0, 0x54, 0x16, 0x18, + 0x37, 0xcd, 0xbe, 0x72, 0xc5, 0xcd, 0x43, 0x95, 0xf3, 0x2c, 0x53, 0xc5, 0xa7, 0x88, 0x02, 0xd2, + 0x6f, 0x4a, 0x63, 0xf0, 0x05, 0xae, 0x12, 0xf9, 0xc0, 0x29, 0x77, 0x03, 0xb5, 0x5b, 0xd4, 0xe4, + 0x6e, 0x61, 0x4a, 0x8a, 0x5c, 0x2e, 0xd4, 0xf8, 0xf5, 0x15, 0xb7, 0xae, 0x36, 0x0f, 0x41, 0x90, + 0xcc, 0x55, 0x00, 0x59, 0x94, 0xaa, 0x9e, 0x1a, 0x4a, 0x57, 0x50, 0x76, 0x05, 0xc1, 0x5a, 0x83, + 0x95, 0x3f, 0x63, 0x2e, 0xb6, 0x24, 0xb6, 0x4b, 0xa3, 0x21, 0x19, 0x25, 0xcc, 0xd5, 0x42, 0x61, + 0xfd, 0xdf, 0x80, 0xd5, 0x2b, 0x04, 0xd2, 0x07, 0xf7, 0xa1, 0x19, 0xba, 0x31, 0xc7, 0x2c, 0xab, + 0x92, 0xec, 0x38, 0x0d, 0x45, 0xe5, 0x26, 0x28, 0xaa, 0x25, 0x28, 0x96, 0xa0, 0x11, 0xba, 0x97, + 0x4e, 0x78, 0x92, 0xae, 0x41, 0xf5, 0xd0, 0xbd, 0x7c, 0x75, 0x22, 0x7b, 0x18, 0x61, 0xce, 0x49, + 0xe2, 0x9d, 0x61, 0x1e, 0xe7, 0x3d, 0x8c, 0xb0, 0x97, 0x8a, 0xb2, 0xf3, 0xbf, 0x16, 0x74, 0x8e, + 0xb1, 0x7b, 0x81, 0xb1, 0x2f, 0x3d, 0x47, 0xa3, 0xac, 0x62, 0x8a, 0xbf, 0x36, 0xd1, 0x83, 0xe9, + 0xd2, 0x98, 0xf9, 0xf3, 0x76, 0xf0, 0xf0, 0x26, 0xb1, 0x34, 0xf9, 0xe6, 0xd0, 0x6b, 0x68, 0x6b, + 0x3f, 0xe7, 0xd0, 0x8a, 0xa6, 0x58, 0xfa, 0x95, 0x3a, 0x58, 0xbd, 0x82, 0x9b, 0x59, 0xdb, 0x36, + 0xd0, 0x11, 0xb4, 0xb5, 0x2d, 0x44, 0xb7, 0x57, 0x5e, 0x87, 0x74, 0x7b, 0x33, 0x56, 0x17, 0x6b, + 0x4e, 0x58, 0xd3, 0x76, 0x09, 0xdd, 0x5a, 0x79, 0x7b, 0xd1, 0xad, 0xcd, 0x5a, 0x40, 0xa4, 0x35, + 0x6d, 0x74, 0xeb, 0xd6, 0xca, 0x8b, 0x89, 0x6e, 0x6d, 0xc6, 0xbc, 0xb7, 0xe6, 0xd0, 0x07, 0xe8, + 0x1d, 0x73, 0x86, 0xdd, 0x70, 0xc2, 0x9e, 0x42, 0xf0, 0x67, 0x58, 0xdd, 0x34, 0xb6, 0x0d, 0xf4, + 0x0f, 0x58, 0x28, 0x0d, 0x66, 0x64, 0x4d, 0x34, 0xaf, 0xda, 0x28, 0x06, 0x1b, 0xd7, 0xca, 0xe4, + 0x9e, 0xbf, 0x81, 0x8e, 0x3e, 0x0f, 0x91, 0xe6, 0xd4, 0x8c, 0x91, 0x3f, 0x58, 0xbb, 0x8a, 0xad, + 0x1b, 0xd4, 0x1b, 0xb5, 0x6e, 0x70, 0xc6, 0xa8, 0xd2, 0x0d, 0xce, 0xea, 0xef, 0xd6, 0x1c, 0xfa, + 0x3b, 0xcc, 0x4f, 0x37, 0x4c, 0x74, 0x7f, 0x1a, 0xba, 0x52, 0x1f, 0x1e, 0x58, 0xd7, 0x89, 0xe4, + 0xc6, 0x0f, 0x01, 0x26, 0x7d, 0x10, 0x2d, 0x4f, 0x74, 0x4a, 0x7d, 0x78, 0xb0, 0x32, 0x9b, 0x99, + 0x9b, 0xfa, 0x27, 0x2c, 0xcd, 0x6c, 0x36, 0x48, 0x2b, 0xc0, 0xeb, 0xda, 0xd5, 0xe0, 0x37, 0x37, + 0xca, 0x65, 0x77, 0xbd, 0x5c, 0x83, 0xf9, 0x58, 0xb5, 0x88, 0x61, 0xbc, 0xe5, 0x05, 0x04, 0x47, + 0xfc, 0x25, 0x48, 0x8d, 0xb7, 0x8c, 0x72, 0x7a, 0xd2, 0x90, 0x7f, 0x81, 0xfd, 0xfe, 0xa7, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x84, 0x9c, 0x05, 0x4d, 0x11, 0x13, 0x00, 0x00, } diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 91c34f0eb..3c11b032c 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -139,6 +139,61 @@ func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entr } +func (s3a *S3ApiServer) streamRemove(ctx context.Context, quiet bool, + fn func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool), + respFn func(err string)) error { + + return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + + stream, err := client.StreamDeleteEntries(ctx) + if err != nil { + glog.V(0).Infof("stream delete entry: %v", err) + return fmt.Errorf("stream delete entry: %v", err) + } + + waitc := make(chan struct{}) + go func() { + for { + resp, err := stream.Recv() + if err == io.EOF { + // read done. + close(waitc) + return + } + if err != nil { + glog.V(0).Infof("streamRemove: %v", err) + return + } + respFn(resp.Error) + } + }() + + for { + finished, parentDirectoryPath, entryName, isDeleteData, isRecursive := fn() + if finished { + break + } + err = stream.Send(&filer_pb.DeleteEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + IsDeleteData: isDeleteData, + IsRecursive: isRecursive, + IgnoreRecursiveError: quiet, + }) + if err != nil { + glog.V(0).Infof("streamRemove: %v", err) + break + } + + } + stream.CloseSend() + <-waitc + return err + + }) + +} + func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go index e0f89c2b0..3f97c73cb 100644 --- a/weed/s3api/s3api_errors.go +++ b/weed/s3api/s3api_errors.go @@ -49,6 +49,7 @@ const ( ErrMissingFields ErrMissingCredTag ErrCredMalformed + ErrMalformedXML ErrMalformedDate ErrMalformedPresignedDate ErrMalformedCredentialDate @@ -161,6 +162,12 @@ var errorCodeResponse = map[ErrorCode]APIError{ HTTPStatusCode: http.StatusBadRequest, }, + ErrMalformedXML: { + Code: "MalformedXML", + Description: "The XML you provided was not well-formed or did not validate against our published schema.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrAuthHeaderEmpty: { Code: "InvalidArgument", Description: "Authorization header is invalid -- one and only one ' ' (space) required.", diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 864376d60..b7bdf334a 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -1,8 +1,10 @@ package s3api import ( + "context" "crypto/md5" "encoding/json" + "encoding/xml" "fmt" "io" "io/ioutil" @@ -115,10 +117,97 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque } +/// ObjectIdentifier carries key name for the object to delete. +type ObjectIdentifier struct { + ObjectName string `xml:"Key"` +} + +// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted. +type DeleteObjectsRequest struct { + // Element to enable quiet mode for the request + Quiet bool + // List of objects to be deleted + Objects []ObjectIdentifier `xml:"Object"` +} + +// DeleteError structure. +type DeleteError struct { + Code string + Message string + Key string +} + +// DeleteObjectsResponse container for multiple object deletes. +type DeleteObjectsResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` + + // Collection of all deleted objects + DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` + + // Collection of errors deleting certain objects. + Errors []DeleteError `xml:"Error,omitempty"` +} + // DeleteMultipleObjectsHandler - Delete multiple objects func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { - // TODO - writeErrorResponse(w, ErrNotImplemented, r.URL) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + deleteXMLBytes, err := ioutil.ReadAll(r.Body) + if err != nil { + writeErrorResponse(w, ErrInternalError, r.URL) + return + } + + deleteObjects := &DeleteObjectsRequest{} + if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { + writeErrorResponse(w, ErrMalformedXML, r.URL) + return + } + + var index int + + var deletedObjects []ObjectIdentifier + var deleteErrors []DeleteError + s3a.streamRemove(context.Background(), deleteObjects.Quiet, func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool) { + if index >= len(deleteObjects.Objects) { + finished = true + return + } + + object := deleteObjects.Objects[index] + + lastSeparator := strings.LastIndex(object.ObjectName, "/") + parentDirectoryPath, entryName, isDeleteData, isRecursive = "/", object.ObjectName, true, false + if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) { + entryName = object.ObjectName[lastSeparator+1:] + parentDirectoryPath = "/" + object.ObjectName[:lastSeparator] + } + parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) + return + }, func(err string) { + object := deleteObjects.Objects[index] + if err == "" { + deletedObjects = append(deletedObjects, object) + } else { + deleteErrors = append(deleteErrors, DeleteError{ + Code: "", + Message: err, + Key: object.ObjectName, + }) + } + index++ + }) + + deleteResp := DeleteObjectsResponse{} + if !deleteObjects.Quiet { + deleteResp.DeletedObjects = deletedObjects + } + deleteResp.Errors = deleteErrors + + writeSuccessResponseXML(w, encodeResponse(deleteResp)) + } func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResonse *http.Response, w http.ResponseWriter)) { diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 12ea25144..a1360e92f 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -219,7 +219,31 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { err = fs.filer.DeleteEntryMetaAndData(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) - return &filer_pb.DeleteEntryResponse{}, err + resp = &filer_pb.DeleteEntryResponse{} + if err != nil { + resp.Error = err.Error() + } + return resp, nil +} + +func (fs *FilerServer) StreamDeleteEntries(stream filer_pb.SeaweedFiler_StreamDeleteEntriesServer) error { + for { + req, err := stream.Recv() + if err != nil { + return fmt.Errorf("receive delete entry request: %v", err) + } + ctx := context.Background() + fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))) + err = fs.filer.DeleteEntryMetaAndData(ctx, fullpath, req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) + resp := &filer_pb.DeleteEntryResponse{} + if err != nil { + resp.Error = err.Error() + } + if err := stream.Send(resp); err != nil { + return err + } + } + return nil } func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) { From 0841bedb150fb7d4a96c237961474310942c2454 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 17:15:09 -0800 Subject: [PATCH 0152/2432] move filer assign volume grpc errror to response --- other/java/client/src/main/proto/filer.proto | 1 + weed/command/filer_copy.go | 6 + weed/filesys/dirty_page.go | 3 + weed/pb/filer.proto | 1 + weed/pb/filer_pb/filer.pb.go | 223 +++++++++--------- weed/pb/filer_pb/filer_pb_helper.go | 6 +- .../replication/sink/filersink/fetch_write.go | 3 + weed/server/filer_grpc_server.go | 6 +- weed/server/webdav_server.go | 3 + 9 files changed, 139 insertions(+), 113 deletions(-) diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index d26c5595f..04901770a 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -180,6 +180,7 @@ message AssignVolumeResponse { string auth = 5; string collection = 6; string replication = 7; + string error = 8; } message LookupVolumeRequest { diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index a359bf32b..6470cbbca 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -290,6 +290,9 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy if assignError != nil { return fmt.Errorf("assign volume failure %v: %v", request, assignError) } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } return nil }) if err != nil { @@ -387,6 +390,9 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC if assignError != nil { return fmt.Errorf("assign volume failure %v: %v", request, assignError) } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } return nil }) if err != nil { diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index a4d9d1df9..659f33736 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -160,6 +160,9 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io. glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) pages.collection, pages.replication = resp.Collection, resp.Replication diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index d26c5595f..04901770a 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -180,6 +180,7 @@ message AssignVolumeResponse { string auth = 5; string collection = 6; string replication = 7; + string error = 8; } message LookupVolumeRequest { diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 50ac5e435..d77e5b125 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -751,6 +751,7 @@ type AssignVolumeResponse struct { Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"` Collection string `protobuf:"bytes,6,opt,name=collection" json:"collection,omitempty"` Replication string `protobuf:"bytes,7,opt,name=replication" json:"replication,omitempty"` + Error string `protobuf:"bytes,8,opt,name=error" json:"error,omitempty"` } func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } @@ -807,6 +808,13 @@ func (m *AssignVolumeResponse) GetReplication() string { return "" } +func (m *AssignVolumeResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type LookupVolumeRequest struct { VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` } @@ -1578,111 +1586,112 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1692 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcb, 0x6f, 0xdb, 0xc8, - 0x19, 0x37, 0xf5, 0xe6, 0x27, 0x29, 0xb1, 0x47, 0x76, 0xa2, 0xc8, 0x8f, 0x3a, 0x74, 0x93, 0xba, - 0x48, 0xe0, 0x1a, 0x6e, 0x0e, 0x49, 0xd3, 0x1e, 0x12, 0x3f, 0x0a, 0xa3, 0xce, 0x03, 0x74, 0x52, - 0xa4, 0x28, 0x50, 0x82, 0x26, 0x47, 0xf2, 0xd4, 0x24, 0x47, 0x1d, 0x0e, 0x6d, 0xa7, 0x7f, 0x4a, - 0x81, 0x1e, 0xfa, 0x37, 0xf4, 0xba, 0xd8, 0xcb, 0x62, 0x81, 0x3d, 0xec, 0xdf, 0xb2, 0xc7, 0x3d, - 0x2f, 0x66, 0x86, 0xa4, 0x86, 0xa2, 0x6c, 0x27, 0xbb, 0xc8, 0x8d, 0xf3, 0xbd, 0xe6, 0x9b, 0xdf, - 0xf7, 0x94, 0xa0, 0x3d, 0x24, 0x01, 0x66, 0x5b, 0x63, 0x46, 0x39, 0x45, 0x2d, 0x79, 0x70, 0xc6, - 0x27, 0xd6, 0x1b, 0x58, 0x3e, 0xa2, 0xf4, 0x2c, 0x19, 0xef, 0x11, 0x86, 0x3d, 0x4e, 0xd9, 0xc7, - 0xfd, 0x88, 0xb3, 0x8f, 0x36, 0xfe, 0x57, 0x82, 0x63, 0x8e, 0x56, 0xc0, 0xf4, 0x33, 0x46, 0xdf, - 0x58, 0x37, 0x36, 0x4d, 0x7b, 0x42, 0x40, 0x08, 0x6a, 0x91, 0x1b, 0xe2, 0x7e, 0x45, 0x32, 0xe4, - 0xb7, 0xb5, 0x0f, 0x2b, 0xb3, 0x0d, 0xc6, 0x63, 0x1a, 0xc5, 0x18, 0x3d, 0x80, 0x3a, 0x16, 0x04, - 0x69, 0xad, 0xbd, 0x73, 0x7b, 0x2b, 0x73, 0x65, 0x4b, 0xc9, 0x29, 0xae, 0xf5, 0xb5, 0x01, 0xe8, - 0x88, 0xc4, 0x5c, 0x10, 0x09, 0x8e, 0x3f, 0xcd, 0x9f, 0x3b, 0xd0, 0x18, 0x33, 0x3c, 0x24, 0x97, - 0xa9, 0x47, 0xe9, 0x09, 0x3d, 0x86, 0x85, 0x98, 0xbb, 0x8c, 0x1f, 0x30, 0x1a, 0x1e, 0x90, 0x00, - 0xbf, 0x16, 0x4e, 0x57, 0xa5, 0x48, 0x99, 0x81, 0xb6, 0x00, 0x91, 0xc8, 0x0b, 0x92, 0x98, 0x9c, - 0xe3, 0xe3, 0x8c, 0xdb, 0xaf, 0xad, 0x1b, 0x9b, 0x2d, 0x7b, 0x06, 0x07, 0x2d, 0x42, 0x3d, 0x20, - 0x21, 0xe1, 0xfd, 0xfa, 0xba, 0xb1, 0xd9, 0xb5, 0xd5, 0xc1, 0xfa, 0x23, 0xf4, 0x0a, 0xfe, 0x7f, - 0xde, 0xf3, 0xff, 0x5b, 0x81, 0xba, 0x24, 0xe4, 0x18, 0x1b, 0x13, 0x8c, 0xd1, 0x7d, 0xe8, 0x90, - 0xd8, 0x99, 0x00, 0x51, 0x91, 0xbe, 0xb5, 0x49, 0x9c, 0x63, 0x8e, 0x1e, 0x41, 0xc3, 0x3b, 0x4d, - 0xa2, 0xb3, 0xb8, 0x5f, 0x5d, 0xaf, 0x6e, 0xb6, 0x77, 0x7a, 0x93, 0x8b, 0xc4, 0x43, 0x77, 0x05, - 0xcf, 0x4e, 0x45, 0xd0, 0x53, 0x00, 0x97, 0x73, 0x46, 0x4e, 0x12, 0x8e, 0x63, 0xf9, 0xd2, 0xf6, - 0x4e, 0x5f, 0x53, 0x48, 0x62, 0xfc, 0x22, 0xe7, 0xdb, 0x9a, 0x2c, 0x7a, 0x06, 0x2d, 0x7c, 0xc9, - 0x71, 0xe4, 0x63, 0xbf, 0x5f, 0x97, 0x17, 0xad, 0x4e, 0xbd, 0x68, 0x6b, 0x3f, 0xe5, 0xab, 0xf7, - 0xe5, 0xe2, 0x83, 0xe7, 0xd0, 0x2d, 0xb0, 0xd0, 0x3c, 0x54, 0xcf, 0x70, 0x16, 0x55, 0xf1, 0x29, - 0x90, 0x3d, 0x77, 0x83, 0x44, 0x25, 0x58, 0xc7, 0x56, 0x87, 0x3f, 0x54, 0x9e, 0x1a, 0xd6, 0x1e, - 0x98, 0x07, 0x49, 0x10, 0xe4, 0x8a, 0x3e, 0x61, 0x99, 0xa2, 0x4f, 0xd8, 0x04, 0xe5, 0xca, 0xb5, - 0x28, 0x7f, 0x65, 0xc0, 0xc2, 0xfe, 0x39, 0x8e, 0xf8, 0x6b, 0xca, 0xc9, 0x90, 0x78, 0x2e, 0x27, - 0x34, 0x42, 0x8f, 0xc1, 0xa4, 0x81, 0xef, 0x5c, 0x1b, 0xa6, 0x16, 0x0d, 0x52, 0xaf, 0x1f, 0x83, - 0x19, 0xe1, 0x0b, 0xe7, 0xda, 0xeb, 0x5a, 0x11, 0xbe, 0x50, 0xd2, 0x1b, 0xd0, 0xf5, 0x71, 0x80, - 0x39, 0x76, 0xf2, 0xe8, 0x88, 0xd0, 0x75, 0x14, 0x71, 0x57, 0x85, 0xe3, 0x21, 0xdc, 0x16, 0x26, - 0xc7, 0x2e, 0xc3, 0x11, 0x77, 0xc6, 0x2e, 0x3f, 0x95, 0x31, 0x31, 0xed, 0x6e, 0x84, 0x2f, 0xde, - 0x4a, 0xea, 0x5b, 0x97, 0x9f, 0x5a, 0x3f, 0x1a, 0x60, 0xe6, 0xc1, 0x44, 0x77, 0xa1, 0x29, 0xae, - 0x75, 0x88, 0x9f, 0x22, 0xd1, 0x10, 0xc7, 0x43, 0x5f, 0x54, 0x05, 0x1d, 0x0e, 0x63, 0xcc, 0xa5, - 0x7b, 0x55, 0x3b, 0x3d, 0x89, 0xcc, 0x8a, 0xc9, 0xbf, 0x55, 0x21, 0xd4, 0x6c, 0xf9, 0x2d, 0x10, - 0x0f, 0x39, 0x09, 0xb1, 0xbc, 0xb0, 0x6a, 0xab, 0x03, 0xea, 0x41, 0x1d, 0x3b, 0xdc, 0x1d, 0xc9, - 0x0c, 0x37, 0xed, 0x1a, 0x7e, 0xe7, 0x8e, 0xd0, 0xaf, 0xe1, 0x56, 0x4c, 0x13, 0xe6, 0x61, 0x27, - 0xbb, 0xb6, 0x21, 0xb9, 0x1d, 0x45, 0x3d, 0x50, 0x97, 0x5b, 0x50, 0x1d, 0x12, 0xbf, 0xdf, 0x94, - 0xc0, 0xcc, 0x17, 0x93, 0xf0, 0xd0, 0xb7, 0x05, 0x13, 0xfd, 0x0e, 0x20, 0xb7, 0xe4, 0xf7, 0x5b, - 0x57, 0x88, 0x9a, 0x99, 0x5d, 0xdf, 0xfa, 0x00, 0x8d, 0xd4, 0xfc, 0x32, 0x98, 0xe7, 0x34, 0x48, - 0xc2, 0xfc, 0xd9, 0x5d, 0xbb, 0xa5, 0x08, 0x87, 0x3e, 0xba, 0x07, 0xb2, 0xcf, 0x39, 0x22, 0xab, - 0x2a, 0xf2, 0x91, 0x12, 0xa1, 0xbf, 0x60, 0xd9, 0x29, 0x3c, 0x4a, 0xcf, 0x88, 0x7a, 0x7d, 0xd3, - 0x4e, 0x4f, 0xd6, 0x0f, 0x15, 0xb8, 0x55, 0x4c, 0x77, 0x71, 0x85, 0xb4, 0x22, 0xb1, 0x32, 0xa4, - 0x19, 0x69, 0xf6, 0xb8, 0x80, 0x57, 0x45, 0xc7, 0x2b, 0x53, 0x09, 0xa9, 0xaf, 0x2e, 0xe8, 0x2a, - 0x95, 0x57, 0xd4, 0xc7, 0x22, 0x5b, 0x13, 0xe2, 0x4b, 0x80, 0xbb, 0xb6, 0xf8, 0x14, 0x94, 0x11, - 0xf1, 0xd3, 0xf6, 0x21, 0x3e, 0xa5, 0x7b, 0x4c, 0xda, 0x6d, 0xa8, 0x90, 0xa9, 0x93, 0x08, 0x59, - 0x28, 0xa8, 0x4d, 0x15, 0x07, 0xf1, 0x8d, 0xd6, 0xa1, 0xcd, 0xf0, 0x38, 0x48, 0xb3, 0x57, 0xc2, - 0x67, 0xda, 0x3a, 0x09, 0xad, 0x01, 0x78, 0x34, 0x08, 0xb0, 0x27, 0x05, 0x4c, 0x29, 0xa0, 0x51, - 0x44, 0xe6, 0x70, 0x1e, 0x38, 0x31, 0xf6, 0xfa, 0xb0, 0x6e, 0x6c, 0xd6, 0xed, 0x06, 0xe7, 0xc1, - 0x31, 0xf6, 0xc4, 0x3b, 0x92, 0x18, 0x33, 0x47, 0x36, 0xa0, 0xb6, 0xd4, 0x6b, 0x09, 0x82, 0x6c, - 0x93, 0xab, 0x00, 0x23, 0x46, 0x93, 0xb1, 0xe2, 0x76, 0xd6, 0xab, 0xa2, 0x17, 0x4b, 0x8a, 0x64, - 0x3f, 0x80, 0x5b, 0xf1, 0xc7, 0x30, 0x20, 0xd1, 0x99, 0xc3, 0x5d, 0x36, 0xc2, 0xbc, 0xdf, 0x55, - 0x39, 0x9c, 0x52, 0xdf, 0x49, 0xa2, 0x35, 0x06, 0xb4, 0xcb, 0xb0, 0xcb, 0xf1, 0x67, 0x8c, 0x9d, - 0x4f, 0xab, 0x6e, 0xb4, 0x04, 0x0d, 0xea, 0xe0, 0x4b, 0x2f, 0x48, 0x8b, 0xac, 0x4e, 0xf7, 0x2f, - 0xbd, 0xc0, 0x7a, 0x04, 0xbd, 0xc2, 0x8d, 0x69, 0x63, 0x5e, 0x84, 0x3a, 0x66, 0x8c, 0x66, 0x6d, - 0x44, 0x1d, 0xac, 0xbf, 0x01, 0x7a, 0x3f, 0xf6, 0xbf, 0x84, 0x7b, 0xd6, 0x12, 0xf4, 0x0a, 0xa6, - 0x95, 0x1f, 0xd6, 0xb7, 0x06, 0xa0, 0x3d, 0xd9, 0x0d, 0x7e, 0xd9, 0x20, 0x16, 0xf5, 0x29, 0x86, - 0x84, 0xea, 0x36, 0xbe, 0xcb, 0xdd, 0x74, 0x84, 0x75, 0x48, 0xac, 0xec, 0xef, 0xb9, 0xdc, 0x4d, - 0x47, 0x09, 0xc3, 0x5e, 0xc2, 0xc4, 0x54, 0x93, 0x49, 0x28, 0x47, 0x89, 0x9d, 0x91, 0xd0, 0x13, - 0xb8, 0x43, 0x46, 0x11, 0x65, 0x78, 0x22, 0xe6, 0x28, 0xa8, 0x1a, 0x52, 0x78, 0x51, 0x71, 0x73, - 0x85, 0x7d, 0x89, 0xdc, 0x23, 0xe8, 0x15, 0x9e, 0x71, 0x2d, 0xcc, 0xff, 0x31, 0xa0, 0xff, 0x82, - 0xd3, 0x90, 0x78, 0x36, 0x16, 0xce, 0x17, 0x9e, 0xbe, 0x01, 0x5d, 0xd1, 0x8f, 0xa7, 0x9f, 0xdf, - 0xa1, 0x81, 0x3f, 0x99, 0x77, 0xf7, 0x40, 0xb4, 0x64, 0x47, 0x43, 0xa1, 0x49, 0x03, 0x5f, 0x66, - 0xe2, 0x06, 0x88, 0xbe, 0xa9, 0xe9, 0xab, 0xc9, 0xdf, 0x89, 0xf0, 0x45, 0x41, 0x5f, 0x08, 0x49, - 0x7d, 0xd5, 0x6c, 0x9b, 0x11, 0xbe, 0x10, 0xfa, 0xd6, 0x32, 0xdc, 0x9b, 0xe1, 0x5b, 0x1a, 0xae, - 0xef, 0x0c, 0xe8, 0xbd, 0x88, 0x63, 0x32, 0x8a, 0xfe, 0x2a, 0xdb, 0x4e, 0xe6, 0xf4, 0x22, 0xd4, - 0x3d, 0x9a, 0x44, 0x5c, 0x3a, 0x5b, 0xb7, 0xd5, 0x61, 0xaa, 0x12, 0x2b, 0xa5, 0x4a, 0x9c, 0xaa, - 0xe5, 0x6a, 0xb9, 0x96, 0xb5, 0x5a, 0xad, 0x15, 0x6a, 0xf5, 0x57, 0xd0, 0x16, 0x41, 0x76, 0x3c, - 0x1c, 0x71, 0xcc, 0xd2, 0x4e, 0x0d, 0x82, 0xb4, 0x2b, 0x29, 0x42, 0x40, 0x9f, 0x28, 0xaa, 0x59, - 0xc3, 0x78, 0x32, 0x4e, 0xbe, 0x37, 0x60, 0xb1, 0xf8, 0x94, 0x34, 0x66, 0x57, 0x4e, 0x16, 0xd1, - 0xca, 0x58, 0x90, 0xbe, 0x43, 0x7c, 0x8a, 0xa6, 0x30, 0x4e, 0x4e, 0x02, 0xe2, 0x39, 0x82, 0xa1, - 0xfc, 0x37, 0x15, 0xe5, 0x3d, 0x0b, 0x26, 0xa8, 0xd4, 0x74, 0x54, 0x10, 0xd4, 0xdc, 0x84, 0x9f, - 0x66, 0xd3, 0x45, 0x7c, 0x4f, 0x21, 0xd5, 0xb8, 0x09, 0xa9, 0x66, 0x09, 0x29, 0xeb, 0x09, 0xf4, - 0xd4, 0x22, 0x5a, 0x0c, 0xcc, 0x2a, 0x40, 0x3e, 0x31, 0xe2, 0xbe, 0xa1, 0xda, 0x56, 0x36, 0x32, - 0x62, 0xeb, 0x4f, 0x60, 0x1e, 0x51, 0x65, 0x21, 0x46, 0xdb, 0x60, 0x06, 0xd9, 0x41, 0x8a, 0xb6, - 0x77, 0xd0, 0xa4, 0x9a, 0x33, 0x39, 0x7b, 0x22, 0x64, 0x3d, 0x87, 0x56, 0x46, 0xce, 0xd0, 0x31, - 0xae, 0x42, 0xa7, 0x32, 0x85, 0x8e, 0xf5, 0x8d, 0x01, 0x8b, 0x45, 0x97, 0xd3, 0x00, 0xbc, 0x87, - 0x6e, 0x7e, 0x85, 0x13, 0xba, 0xe3, 0xd4, 0x97, 0x6d, 0xdd, 0x97, 0xb2, 0x5a, 0xee, 0x60, 0xfc, - 0xca, 0x1d, 0xab, 0xac, 0xed, 0x04, 0x1a, 0x69, 0xf0, 0x0e, 0x16, 0x4a, 0x22, 0x33, 0xb6, 0xb0, - 0xdf, 0xea, 0x5b, 0x58, 0x61, 0x93, 0xcc, 0xb5, 0xf5, 0xd5, 0xec, 0x19, 0xdc, 0x55, 0x85, 0xbf, - 0x9b, 0x47, 0x2b, 0xc3, 0xbe, 0x18, 0x54, 0x63, 0x3a, 0xa8, 0xd6, 0x00, 0xfa, 0x65, 0xd5, 0xb4, - 0xd0, 0x46, 0xb0, 0x70, 0xcc, 0x5d, 0x4e, 0x62, 0x4e, 0xbc, 0xfc, 0xe7, 0xc0, 0x54, 0x16, 0x18, - 0x37, 0xcd, 0xbe, 0x72, 0xc5, 0xcd, 0x43, 0x95, 0xf3, 0x2c, 0x53, 0xc5, 0xa7, 0x88, 0x02, 0xd2, - 0x6f, 0x4a, 0x63, 0xf0, 0x05, 0xae, 0x12, 0xf9, 0xc0, 0x29, 0x77, 0x03, 0xb5, 0x5b, 0xd4, 0xe4, - 0x6e, 0x61, 0x4a, 0x8a, 0x5c, 0x2e, 0xd4, 0xf8, 0xf5, 0x15, 0xb7, 0xae, 0x36, 0x0f, 0x41, 0x90, - 0xcc, 0x55, 0x00, 0x59, 0x94, 0xaa, 0x9e, 0x1a, 0x4a, 0x57, 0x50, 0x76, 0x05, 0xc1, 0x5a, 0x83, - 0x95, 0x3f, 0x63, 0x2e, 0xb6, 0x24, 0xb6, 0x4b, 0xa3, 0x21, 0x19, 0x25, 0xcc, 0xd5, 0x42, 0x61, - 0xfd, 0xdf, 0x80, 0xd5, 0x2b, 0x04, 0xd2, 0x07, 0xf7, 0xa1, 0x19, 0xba, 0x31, 0xc7, 0x2c, 0xab, - 0x92, 0xec, 0x38, 0x0d, 0x45, 0xe5, 0x26, 0x28, 0xaa, 0x25, 0x28, 0x96, 0xa0, 0x11, 0xba, 0x97, - 0x4e, 0x78, 0x92, 0xae, 0x41, 0xf5, 0xd0, 0xbd, 0x7c, 0x75, 0x22, 0x7b, 0x18, 0x61, 0xce, 0x49, - 0xe2, 0x9d, 0x61, 0x1e, 0xe7, 0x3d, 0x8c, 0xb0, 0x97, 0x8a, 0xb2, 0xf3, 0xbf, 0x16, 0x74, 0x8e, - 0xb1, 0x7b, 0x81, 0xb1, 0x2f, 0x3d, 0x47, 0xa3, 0xac, 0x62, 0x8a, 0xbf, 0x36, 0xd1, 0x83, 0xe9, - 0xd2, 0x98, 0xf9, 0xf3, 0x76, 0xf0, 0xf0, 0x26, 0xb1, 0x34, 0xf9, 0xe6, 0xd0, 0x6b, 0x68, 0x6b, - 0x3f, 0xe7, 0xd0, 0x8a, 0xa6, 0x58, 0xfa, 0x95, 0x3a, 0x58, 0xbd, 0x82, 0x9b, 0x59, 0xdb, 0x36, - 0xd0, 0x11, 0xb4, 0xb5, 0x2d, 0x44, 0xb7, 0x57, 0x5e, 0x87, 0x74, 0x7b, 0x33, 0x56, 0x17, 0x6b, - 0x4e, 0x58, 0xd3, 0x76, 0x09, 0xdd, 0x5a, 0x79, 0x7b, 0xd1, 0xad, 0xcd, 0x5a, 0x40, 0xa4, 0x35, - 0x6d, 0x74, 0xeb, 0xd6, 0xca, 0x8b, 0x89, 0x6e, 0x6d, 0xc6, 0xbc, 0xb7, 0xe6, 0xd0, 0x07, 0xe8, - 0x1d, 0x73, 0x86, 0xdd, 0x70, 0xc2, 0x9e, 0x42, 0xf0, 0x67, 0x58, 0xdd, 0x34, 0xb6, 0x0d, 0xf4, - 0x0f, 0x58, 0x28, 0x0d, 0x66, 0x64, 0x4d, 0x34, 0xaf, 0xda, 0x28, 0x06, 0x1b, 0xd7, 0xca, 0xe4, - 0x9e, 0xbf, 0x81, 0x8e, 0x3e, 0x0f, 0x91, 0xe6, 0xd4, 0x8c, 0x91, 0x3f, 0x58, 0xbb, 0x8a, 0xad, - 0x1b, 0xd4, 0x1b, 0xb5, 0x6e, 0x70, 0xc6, 0xa8, 0xd2, 0x0d, 0xce, 0xea, 0xef, 0xd6, 0x1c, 0xfa, - 0x3b, 0xcc, 0x4f, 0x37, 0x4c, 0x74, 0x7f, 0x1a, 0xba, 0x52, 0x1f, 0x1e, 0x58, 0xd7, 0x89, 0xe4, - 0xc6, 0x0f, 0x01, 0x26, 0x7d, 0x10, 0x2d, 0x4f, 0x74, 0x4a, 0x7d, 0x78, 0xb0, 0x32, 0x9b, 0x99, - 0x9b, 0xfa, 0x27, 0x2c, 0xcd, 0x6c, 0x36, 0x48, 0x2b, 0xc0, 0xeb, 0xda, 0xd5, 0xe0, 0x37, 0x37, - 0xca, 0x65, 0x77, 0xbd, 0x5c, 0x83, 0xf9, 0x58, 0xb5, 0x88, 0x61, 0xbc, 0xe5, 0x05, 0x04, 0x47, - 0xfc, 0x25, 0x48, 0x8d, 0xb7, 0x8c, 0x72, 0x7a, 0xd2, 0x90, 0x7f, 0x81, 0xfd, 0xfe, 0xa7, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x84, 0x9c, 0x05, 0x4d, 0x11, 0x13, 0x00, 0x00, + // 1697 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xdb, 0x6e, 0xdb, 0xc8, + 0x19, 0x36, 0x75, 0xe6, 0x2f, 0x29, 0xb1, 0x47, 0x76, 0xa2, 0xc8, 0x87, 0x3a, 0x74, 0x93, 0xba, + 0x48, 0xe0, 0x1a, 0x6e, 0x2e, 0x92, 0xa6, 0xbd, 0x48, 0x7c, 0x28, 0x8c, 0x3a, 0x07, 0xd0, 0x49, + 0x91, 0xa2, 0x40, 0x09, 0x9a, 0x1c, 0xc9, 0x53, 0x93, 0x1c, 0x75, 0x38, 0xb4, 0x9d, 0x3e, 0x4a, + 0x81, 0x5e, 0xf4, 0x19, 0x7a, 0xbb, 0xd8, 0x9b, 0xc5, 0x02, 0xfb, 0x1c, 0xfb, 0x00, 0x7b, 0xb9, + 0xd7, 0x8b, 0x99, 0x21, 0xa9, 0xa1, 0x28, 0xdb, 0xc9, 0x2e, 0x72, 0xc7, 0xf9, 0x4f, 0xf3, 0xcf, + 0xf7, 0x1f, 0x25, 0x68, 0x0f, 0x49, 0x80, 0xd9, 0xd6, 0x98, 0x51, 0x4e, 0x51, 0x4b, 0x1e, 0x9c, + 0xf1, 0x89, 0xf5, 0x06, 0x96, 0x8f, 0x28, 0x3d, 0x4b, 0xc6, 0x7b, 0x84, 0x61, 0x8f, 0x53, 0xf6, + 0x71, 0x3f, 0xe2, 0xec, 0xa3, 0x8d, 0xff, 0x95, 0xe0, 0x98, 0xa3, 0x15, 0x30, 0xfd, 0x8c, 0xd1, + 0x37, 0xd6, 0x8d, 0x4d, 0xd3, 0x9e, 0x10, 0x10, 0x82, 0x5a, 0xe4, 0x86, 0xb8, 0x5f, 0x91, 0x0c, + 0xf9, 0x6d, 0xed, 0xc3, 0xca, 0x6c, 0x83, 0xf1, 0x98, 0x46, 0x31, 0x46, 0x0f, 0xa0, 0x8e, 0x05, + 0x41, 0x5a, 0x6b, 0xef, 0xdc, 0xde, 0xca, 0x5c, 0xd9, 0x52, 0x72, 0x8a, 0x6b, 0x7d, 0x6d, 0x00, + 0x3a, 0x22, 0x31, 0x17, 0x44, 0x82, 0xe3, 0x4f, 0xf3, 0xe7, 0x0e, 0x34, 0xc6, 0x0c, 0x0f, 0xc9, + 0x65, 0xea, 0x51, 0x7a, 0x42, 0x8f, 0x61, 0x21, 0xe6, 0x2e, 0xe3, 0x07, 0x8c, 0x86, 0x07, 0x24, + 0xc0, 0xaf, 0x85, 0xd3, 0x55, 0x29, 0x52, 0x66, 0xa0, 0x2d, 0x40, 0x24, 0xf2, 0x82, 0x24, 0x26, + 0xe7, 0xf8, 0x38, 0xe3, 0xf6, 0x6b, 0xeb, 0xc6, 0x66, 0xcb, 0x9e, 0xc1, 0x41, 0x8b, 0x50, 0x0f, + 0x48, 0x48, 0x78, 0xbf, 0xbe, 0x6e, 0x6c, 0x76, 0x6d, 0x75, 0xb0, 0xfe, 0x08, 0xbd, 0x82, 0xff, + 0x9f, 0xf7, 0xfc, 0xff, 0x56, 0xa0, 0x2e, 0x09, 0x39, 0xc6, 0xc6, 0x04, 0x63, 0x74, 0x1f, 0x3a, + 0x24, 0x76, 0x26, 0x40, 0x54, 0xa4, 0x6f, 0x6d, 0x12, 0xe7, 0x98, 0xa3, 0x47, 0xd0, 0xf0, 0x4e, + 0x93, 0xe8, 0x2c, 0xee, 0x57, 0xd7, 0xab, 0x9b, 0xed, 0x9d, 0xde, 0xe4, 0x22, 0xf1, 0xd0, 0x5d, + 0xc1, 0xb3, 0x53, 0x11, 0xf4, 0x14, 0xc0, 0xe5, 0x9c, 0x91, 0x93, 0x84, 0xe3, 0x58, 0xbe, 0xb4, + 0xbd, 0xd3, 0xd7, 0x14, 0x92, 0x18, 0xbf, 0xc8, 0xf9, 0xb6, 0x26, 0x8b, 0x9e, 0x41, 0x0b, 0x5f, + 0x72, 0x1c, 0xf9, 0xd8, 0xef, 0xd7, 0xe5, 0x45, 0xab, 0x53, 0x2f, 0xda, 0xda, 0x4f, 0xf9, 0xea, + 0x7d, 0xb9, 0xf8, 0xe0, 0x39, 0x74, 0x0b, 0x2c, 0x34, 0x0f, 0xd5, 0x33, 0x9c, 0x45, 0x55, 0x7c, + 0x0a, 0x64, 0xcf, 0xdd, 0x20, 0x51, 0x09, 0xd6, 0xb1, 0xd5, 0xe1, 0x0f, 0x95, 0xa7, 0x86, 0xb5, + 0x07, 0xe6, 0x41, 0x12, 0x04, 0xb9, 0xa2, 0x4f, 0x58, 0xa6, 0xe8, 0x13, 0x36, 0x41, 0xb9, 0x72, + 0x2d, 0xca, 0x5f, 0x19, 0xb0, 0xb0, 0x7f, 0x8e, 0x23, 0xfe, 0x9a, 0x72, 0x32, 0x24, 0x9e, 0xcb, + 0x09, 0x8d, 0xd0, 0x63, 0x30, 0x69, 0xe0, 0x3b, 0xd7, 0x86, 0xa9, 0x45, 0x83, 0xd4, 0xeb, 0xc7, + 0x60, 0x46, 0xf8, 0xc2, 0xb9, 0xf6, 0xba, 0x56, 0x84, 0x2f, 0x94, 0xf4, 0x06, 0x74, 0x7d, 0x1c, + 0x60, 0x8e, 0x9d, 0x3c, 0x3a, 0x22, 0x74, 0x1d, 0x45, 0xdc, 0x55, 0xe1, 0x78, 0x08, 0xb7, 0x85, + 0xc9, 0xb1, 0xcb, 0x70, 0xc4, 0x9d, 0xb1, 0xcb, 0x4f, 0x65, 0x4c, 0x4c, 0xbb, 0x1b, 0xe1, 0x8b, + 0xb7, 0x92, 0xfa, 0xd6, 0xe5, 0xa7, 0xd6, 0x8f, 0x06, 0x98, 0x79, 0x30, 0xd1, 0x5d, 0x68, 0x8a, + 0x6b, 0x1d, 0xe2, 0xa7, 0x48, 0x34, 0xc4, 0xf1, 0xd0, 0x17, 0x55, 0x41, 0x87, 0xc3, 0x18, 0x73, + 0xe9, 0x5e, 0xd5, 0x4e, 0x4f, 0x22, 0xb3, 0x62, 0xf2, 0x6f, 0x55, 0x08, 0x35, 0x5b, 0x7e, 0x0b, + 0xc4, 0x43, 0x4e, 0x42, 0x2c, 0x2f, 0xac, 0xda, 0xea, 0x80, 0x7a, 0x50, 0xc7, 0x0e, 0x77, 0x47, + 0x32, 0xc3, 0x4d, 0xbb, 0x86, 0xdf, 0xb9, 0x23, 0xf4, 0x6b, 0xb8, 0x15, 0xd3, 0x84, 0x79, 0xd8, + 0xc9, 0xae, 0x6d, 0x48, 0x6e, 0x47, 0x51, 0x0f, 0xd4, 0xe5, 0x16, 0x54, 0x87, 0xc4, 0xef, 0x37, + 0x25, 0x30, 0xf3, 0xc5, 0x24, 0x3c, 0xf4, 0x6d, 0xc1, 0x44, 0xbf, 0x03, 0xc8, 0x2d, 0xf9, 0xfd, + 0xd6, 0x15, 0xa2, 0x66, 0x66, 0xd7, 0xb7, 0x3e, 0x40, 0x23, 0x35, 0xbf, 0x0c, 0xe6, 0x39, 0x0d, + 0x92, 0x30, 0x7f, 0x76, 0xd7, 0x6e, 0x29, 0xc2, 0xa1, 0x8f, 0xee, 0x81, 0xec, 0x73, 0x8e, 0xc8, + 0xaa, 0x8a, 0x7c, 0xa4, 0x44, 0xe8, 0x2f, 0x58, 0x76, 0x0a, 0x8f, 0xd2, 0x33, 0xa2, 0x5e, 0xdf, + 0xb4, 0xd3, 0x93, 0xf5, 0x43, 0x05, 0x6e, 0x15, 0xd3, 0x5d, 0x5c, 0x21, 0xad, 0x48, 0xac, 0x0c, + 0x69, 0x46, 0x9a, 0x3d, 0x2e, 0xe0, 0x55, 0xd1, 0xf1, 0xca, 0x54, 0x42, 0xea, 0xab, 0x0b, 0xba, + 0x4a, 0xe5, 0x15, 0xf5, 0xb1, 0xc8, 0xd6, 0x84, 0xf8, 0x12, 0xe0, 0xae, 0x2d, 0x3e, 0x05, 0x65, + 0x44, 0xfc, 0xb4, 0x7d, 0x88, 0x4f, 0xe9, 0x1e, 0x93, 0x76, 0x1b, 0x2a, 0x64, 0xea, 0x24, 0x42, + 0x16, 0x0a, 0x6a, 0x53, 0xc5, 0x41, 0x7c, 0xa3, 0x75, 0x68, 0x33, 0x3c, 0x0e, 0xd2, 0xec, 0x95, + 0xf0, 0x99, 0xb6, 0x4e, 0x42, 0x6b, 0x00, 0x1e, 0x0d, 0x02, 0xec, 0x49, 0x01, 0x53, 0x0a, 0x68, + 0x14, 0x91, 0x39, 0x9c, 0x07, 0x4e, 0x8c, 0xbd, 0x3e, 0xac, 0x1b, 0x9b, 0x75, 0xbb, 0xc1, 0x79, + 0x70, 0x8c, 0x3d, 0xf1, 0x8e, 0x24, 0xc6, 0xcc, 0x91, 0x0d, 0xa8, 0x2d, 0xf5, 0x5a, 0x82, 0x20, + 0xdb, 0xe4, 0x2a, 0xc0, 0x88, 0xd1, 0x64, 0xac, 0xb8, 0x9d, 0xf5, 0xaa, 0xe8, 0xc5, 0x92, 0x22, + 0xd9, 0x0f, 0xe0, 0x56, 0xfc, 0x31, 0x0c, 0x48, 0x74, 0xe6, 0x70, 0x97, 0x8d, 0x30, 0xef, 0x77, + 0x55, 0x0e, 0xa7, 0xd4, 0x77, 0x92, 0x68, 0x8d, 0x01, 0xed, 0x32, 0xec, 0x72, 0xfc, 0x19, 0x63, + 0xe7, 0xd3, 0xaa, 0x1b, 0x2d, 0x41, 0x83, 0x3a, 0xf8, 0xd2, 0x0b, 0xd2, 0x22, 0xab, 0xd3, 0xfd, + 0x4b, 0x2f, 0xb0, 0x1e, 0x41, 0xaf, 0x70, 0x63, 0xda, 0x98, 0x17, 0xa1, 0x8e, 0x19, 0xa3, 0x59, + 0x1b, 0x51, 0x07, 0xeb, 0x6f, 0x80, 0xde, 0x8f, 0xfd, 0x2f, 0xe1, 0x9e, 0xb5, 0x04, 0xbd, 0x82, + 0x69, 0xe5, 0x87, 0xf5, 0xad, 0x01, 0x68, 0x4f, 0x76, 0x83, 0x5f, 0x36, 0x88, 0x45, 0x7d, 0x8a, + 0x21, 0xa1, 0xba, 0x8d, 0xef, 0x72, 0x37, 0x1d, 0x61, 0x1d, 0x12, 0x2b, 0xfb, 0x7b, 0x2e, 0x77, + 0xd3, 0x51, 0xc2, 0xb0, 0x97, 0x30, 0x31, 0xd5, 0x64, 0x12, 0xca, 0x51, 0x62, 0x67, 0x24, 0xf4, + 0x04, 0xee, 0x90, 0x51, 0x44, 0x19, 0x9e, 0x88, 0x39, 0x0a, 0xaa, 0x86, 0x14, 0x5e, 0x54, 0xdc, + 0x5c, 0x61, 0x5f, 0x22, 0xf7, 0x08, 0x7a, 0x85, 0x67, 0x5c, 0x0b, 0xf3, 0x7f, 0x0c, 0xe8, 0xbf, + 0xe0, 0x34, 0x24, 0x9e, 0x8d, 0x85, 0xf3, 0x85, 0xa7, 0x6f, 0x40, 0x57, 0xf4, 0xe3, 0xe9, 0xe7, + 0x77, 0x68, 0xe0, 0x4f, 0xe6, 0xdd, 0x3d, 0x10, 0x2d, 0xd9, 0xd1, 0x50, 0x68, 0xd2, 0xc0, 0x97, + 0x99, 0xb8, 0x01, 0xa2, 0x6f, 0x6a, 0xfa, 0x6a, 0xf2, 0x77, 0x22, 0x7c, 0x51, 0xd0, 0x17, 0x42, + 0x52, 0x5f, 0x35, 0xdb, 0x66, 0x84, 0x2f, 0x84, 0xbe, 0xb5, 0x0c, 0xf7, 0x66, 0xf8, 0x96, 0x86, + 0xeb, 0x3b, 0x03, 0x7a, 0x2f, 0xe2, 0x98, 0x8c, 0xa2, 0xbf, 0xca, 0xb6, 0x93, 0x39, 0xbd, 0x08, + 0x75, 0x8f, 0x26, 0x11, 0x97, 0xce, 0xd6, 0x6d, 0x75, 0x98, 0xaa, 0xc4, 0x4a, 0xa9, 0x12, 0xa7, + 0x6a, 0xb9, 0x5a, 0xae, 0x65, 0xad, 0x56, 0x6b, 0x85, 0x5a, 0xfd, 0x15, 0xb4, 0x45, 0x90, 0x1d, + 0x0f, 0x47, 0x1c, 0xb3, 0xb4, 0x53, 0x83, 0x20, 0xed, 0x4a, 0x8a, 0x10, 0xd0, 0x27, 0x8a, 0x6a, + 0xd6, 0x30, 0x9e, 0x8c, 0x93, 0xef, 0x0d, 0x58, 0x2c, 0x3e, 0x25, 0x8d, 0xd9, 0x95, 0x93, 0x45, + 0xb4, 0x32, 0x16, 0xa4, 0xef, 0x10, 0x9f, 0xa2, 0x29, 0x8c, 0x93, 0x93, 0x80, 0x78, 0x8e, 0x60, + 0x28, 0xff, 0x4d, 0x45, 0x79, 0xcf, 0x82, 0x09, 0x2a, 0x35, 0x1d, 0x15, 0x04, 0x35, 0x37, 0xe1, + 0xa7, 0xd9, 0x74, 0x11, 0xdf, 0x53, 0x48, 0x35, 0x6e, 0x42, 0xaa, 0x59, 0x46, 0x2a, 0xcf, 0xb4, + 0x96, 0x9e, 0x69, 0x4f, 0xa0, 0xa7, 0xd6, 0xd3, 0x62, 0xb8, 0x56, 0x01, 0xf2, 0x39, 0x12, 0xf7, + 0x0d, 0xd5, 0xcc, 0xb2, 0x41, 0x12, 0x5b, 0x7f, 0x02, 0xf3, 0x88, 0x2a, 0xbb, 0x31, 0xda, 0x06, + 0x33, 0xc8, 0x0e, 0x52, 0xb4, 0xbd, 0x83, 0x26, 0x35, 0x9e, 0xc9, 0xd9, 0x13, 0x21, 0xeb, 0x39, + 0xb4, 0x32, 0x72, 0x86, 0x99, 0x71, 0x15, 0x66, 0x95, 0x29, 0xcc, 0xac, 0x6f, 0x0c, 0x58, 0x2c, + 0xba, 0x9c, 0x86, 0xe5, 0x3d, 0x74, 0xf3, 0x2b, 0x9c, 0xd0, 0x1d, 0xa7, 0xbe, 0x6c, 0xeb, 0xbe, + 0x94, 0xd5, 0x72, 0x07, 0xe3, 0x57, 0xee, 0x58, 0xe5, 0x72, 0x27, 0xd0, 0x48, 0x83, 0x77, 0xb0, + 0x50, 0x12, 0x99, 0xb1, 0x9b, 0xfd, 0x56, 0xdf, 0xcd, 0x0a, 0xfb, 0x65, 0xae, 0xad, 0x2f, 0x6c, + 0xcf, 0xe0, 0xae, 0x6a, 0x07, 0xbb, 0x79, 0x0c, 0x33, 0xec, 0x8b, 0xa1, 0x36, 0xa6, 0x43, 0x6d, + 0x0d, 0xa0, 0x5f, 0x56, 0x4d, 0xcb, 0x6f, 0x04, 0x0b, 0xc7, 0xdc, 0xe5, 0x24, 0xe6, 0xc4, 0xcb, + 0x7f, 0x24, 0x4c, 0xe5, 0x86, 0x71, 0xd3, 0x44, 0x2c, 0xd7, 0xe1, 0x3c, 0x54, 0x39, 0xcf, 0xf2, + 0x57, 0x7c, 0x8a, 0x28, 0x20, 0xfd, 0xa6, 0x34, 0x06, 0x5f, 0xe0, 0x2a, 0x91, 0x0f, 0x9c, 0x72, + 0x37, 0x50, 0x1b, 0x47, 0x4d, 0x6e, 0x1c, 0xa6, 0xa4, 0xc8, 0x95, 0x43, 0x0d, 0x65, 0x5f, 0x71, + 0xeb, 0x6a, 0x1f, 0x11, 0x04, 0xc9, 0x5c, 0x05, 0x90, 0xa5, 0xaa, 0xaa, 0xac, 0xa1, 0x74, 0x05, + 0x65, 0x57, 0x10, 0xac, 0x35, 0x58, 0xf9, 0x33, 0xe6, 0x62, 0x77, 0x62, 0xbb, 0x34, 0x1a, 0x92, + 0x51, 0xc2, 0x5c, 0x2d, 0x14, 0xd6, 0xff, 0x0d, 0x58, 0xbd, 0x42, 0x20, 0x7d, 0x70, 0x1f, 0x9a, + 0xa1, 0x1b, 0x73, 0xcc, 0xb2, 0x2a, 0xc9, 0x8e, 0xd3, 0x50, 0x54, 0x6e, 0x82, 0xa2, 0x5a, 0x82, + 0x62, 0x09, 0x1a, 0xa1, 0x7b, 0xe9, 0x84, 0x27, 0xe9, 0x72, 0x54, 0x0f, 0xdd, 0xcb, 0x57, 0x27, + 0xb2, 0xb3, 0x11, 0xe6, 0x9c, 0x24, 0xde, 0x19, 0xe6, 0x71, 0xde, 0xd9, 0x08, 0x7b, 0xa9, 0x28, + 0x3b, 0xff, 0x6b, 0x41, 0xe7, 0x18, 0xbb, 0x17, 0x18, 0xfb, 0xd2, 0x73, 0x34, 0xca, 0x2a, 0xa6, + 0xf8, 0x1b, 0x14, 0x3d, 0x98, 0x2e, 0x8d, 0x99, 0x3f, 0x7a, 0x07, 0x0f, 0x6f, 0x12, 0x4b, 0x93, + 0x6f, 0x0e, 0xbd, 0x86, 0xb6, 0xf6, 0x23, 0x0f, 0xad, 0x68, 0x8a, 0xa5, 0xdf, 0xae, 0x83, 0xd5, + 0x2b, 0xb8, 0x99, 0xb5, 0x6d, 0x03, 0x1d, 0x41, 0x5b, 0xdb, 0x4d, 0x74, 0x7b, 0xe5, 0x25, 0x49, + 0xb7, 0x37, 0x63, 0xa1, 0xb1, 0xe6, 0x84, 0x35, 0x6d, 0xc3, 0xd0, 0xad, 0x95, 0x77, 0x1a, 0xdd, + 0xda, 0xac, 0xb5, 0x44, 0x5a, 0xd3, 0x06, 0xba, 0x6e, 0xad, 0xbc, 0xae, 0xe8, 0xd6, 0x66, 0x6c, + 0x01, 0xd6, 0x1c, 0xfa, 0x00, 0xbd, 0x63, 0xce, 0xb0, 0x1b, 0x4e, 0xd8, 0x53, 0x08, 0xfe, 0x0c, + 0xab, 0x9b, 0xc6, 0xb6, 0x81, 0xfe, 0x01, 0x0b, 0xa5, 0x71, 0x8d, 0xac, 0x89, 0xe6, 0x55, 0x7b, + 0xc6, 0x60, 0xe3, 0x5a, 0x99, 0xdc, 0xf3, 0x37, 0xd0, 0xd1, 0xa7, 0x24, 0xd2, 0x9c, 0x9a, 0xb1, + 0x08, 0x0c, 0xd6, 0xae, 0x62, 0xeb, 0x06, 0xf5, 0x46, 0xad, 0x1b, 0x9c, 0x31, 0xaa, 0x74, 0x83, + 0xb3, 0xfa, 0xbb, 0x35, 0x87, 0xfe, 0x0e, 0xf3, 0xd3, 0x0d, 0x13, 0xdd, 0x9f, 0x86, 0xae, 0xd4, + 0x87, 0x07, 0xd6, 0x75, 0x22, 0xb9, 0xf1, 0x43, 0x80, 0x49, 0x1f, 0x44, 0xcb, 0x13, 0x9d, 0x52, + 0x1f, 0x1e, 0xac, 0xcc, 0x66, 0xe6, 0xa6, 0xfe, 0x09, 0x4b, 0x33, 0x9b, 0x0d, 0xd2, 0x0a, 0xf0, + 0xba, 0x76, 0x35, 0xf8, 0xcd, 0x8d, 0x72, 0xd9, 0x5d, 0x2f, 0xd7, 0x60, 0x3e, 0x56, 0x2d, 0x62, + 0x18, 0x6f, 0x79, 0x01, 0xc1, 0x11, 0x7f, 0x09, 0x52, 0xe3, 0x2d, 0xa3, 0x9c, 0x9e, 0x34, 0xe4, + 0x1f, 0x63, 0xbf, 0xff, 0x29, 0x00, 0x00, 0xff, 0xff, 0x83, 0xa2, 0xc7, 0xb2, 0x27, 0x13, 0x00, + 0x00, } diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index b2ffacc01..0f370ffc4 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -73,11 +73,11 @@ func AfterEntryDeserialization(chunks []*FileChunk) { func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *CreateEntryRequest) error { resp, err := client.CreateEntry(ctx, request) - if err == nil && resp.Error != "" { - return fmt.Errorf("CreateEntry: %v", resp.Error) - } if err != nil { return fmt.Errorf("CreateEntry: %v", err) } + if resp.Error != "" { + return fmt.Errorf("CreateEntry: %v", resp.Error) + } return err } diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index fe1e87b6b..50f3f64d4 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -80,6 +80,9 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index a1360e92f..9bbabec26 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -280,11 +280,11 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) if err != nil { glog.V(3).Infof("AssignVolume: %v", err) - return nil, fmt.Errorf("assign volume: %v", err) + return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil } if assignResult.Error != "" { glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) - return nil, fmt.Errorf("assign volume result: %v", assignResult.Error) + return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil } return &filer_pb.AssignVolumeResponse{ @@ -295,7 +295,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol Auth: string(assignResult.Auth), Collection: collection, Replication: replication, - }, err + }, nil } func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 8b0f09edc..7a7ab7a4b 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -400,6 +400,9 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) collection, replication = resp.Collection, resp.Replication From bd3254b53f78b8f42e31ea50cbf2e0d7e87b2bbc Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 17:24:08 -0800 Subject: [PATCH 0153/2432] adjust logging --- weed/filesys/wfs.go | 2 +- weed/pb/filer_pb/filer_pb_helper.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 4807e367b..37bb92acc 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -99,7 +99,7 @@ func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(context.Context, fi return nil } if strings.Contains(err.Error(), "context canceled") { - glog.V(2).Infoln("retry context canceled request...") + glog.V(0).Infoln("retry context canceled request...") return util.WithCachedGrpcClient(context.Background(), func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(ctx2, client) diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 0f370ffc4..349f07320 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -77,7 +77,7 @@ func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *Create return fmt.Errorf("CreateEntry: %v", err) } if resp.Error != "" { - return fmt.Errorf("CreateEntry: %v", resp.Error) + return fmt.Errorf("CreateEntry : %v", resp.Error) } return err } From 892e726eb9c2427634c46f8ae9b7bcf0b6d1b082 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 21:50:12 -0800 Subject: [PATCH 0154/2432] avoid reusing context object fix https://github.com/chrislusf/seaweedfs/issues/1182 --- weed/command/benchmark.go | 6 +-- weed/command/filer_copy.go | 30 ++++++------ weed/command/s3.go | 2 +- weed/filer2/filer.go | 2 +- weed/filer2/filer_client_util.go | 20 ++++---- weed/filer2/filer_delete_entry.go | 8 +-- weed/filesys/dir.go | 40 +++++++-------- weed/filesys/dir_link.go | 4 +- weed/filesys/dir_rename.go | 4 +- weed/filesys/dirty_page.go | 24 ++++----- weed/filesys/file.go | 14 +++--- weed/filesys/filehandle.go | 14 +++--- weed/filesys/wfs.go | 18 ++----- weed/filesys/wfs_deletion.go | 10 ++-- weed/filesys/xattr.go | 6 +-- weed/operation/assign_file_id.go | 2 +- weed/operation/delete_content.go | 5 +- weed/operation/grpc_client.go | 25 +++++----- weed/operation/lookup.go | 4 +- weed/operation/stats.go | 4 +- weed/operation/sync_volume.go | 4 +- weed/operation/tail_volume.go | 7 +-- weed/pb/filer_pb/filer_pb_helper.go | 9 ++-- weed/replication/replicator.go | 10 ++-- weed/replication/sink/azuresink/azure_sink.go | 14 +++--- weed/replication/sink/b2sink/b2_sink.go | 16 +++--- .../replication/sink/filersink/fetch_write.go | 22 ++++----- weed/replication/sink/filersink/filer_sink.go | 28 +++++------ weed/replication/sink/gcssink/gcs_sink.go | 12 ++--- weed/replication/sink/replication_sink.go | 7 ++- weed/replication/sink/s3sink/s3_sink.go | 11 +++-- weed/replication/sink/s3sink/s3_write.go | 2 +- weed/replication/source/filer_source.go | 16 +++--- weed/s3api/filer_util.go | 16 +++--- weed/s3api/s3api_bucket_handlers.go | 4 +- weed/s3api/s3api_handlers.go | 15 +++--- weed/s3api/s3api_objects_list_handlers.go | 2 +- weed/server/filer_grpc_server.go | 4 +- weed/server/filer_server.go | 4 +- weed/server/master_grpc_server_collection.go | 9 ++-- weed/server/master_server.go | 3 +- weed/server/master_server_handlers_admin.go | 4 +- weed/server/volume_grpc_copy.go | 4 +- weed/server/volume_grpc_erasure_coding.go | 2 +- weed/server/volume_grpc_file.go | 3 +- weed/server/volume_server_handlers_read.go | 6 +-- weed/server/webdav_server.go | 34 ++++++------- weed/shell/command_bucket_create.go | 8 ++- weed/shell/command_bucket_delete.go | 8 ++- weed/shell/command_bucket_list.go | 8 ++- weed/shell/command_collection_delete.go | 5 +- weed/shell/command_collection_list.go | 5 +- weed/shell/command_ec_balance.go | 46 +++++++++-------- weed/shell/command_ec_common.go | 43 ++++++++-------- weed/shell/command_ec_decode.go | 43 ++++++++-------- weed/shell/command_ec_encode.go | 49 +++++++++---------- weed/shell/command_ec_rebuild.go | 29 +++++------ weed/shell/command_ec_test.go | 2 +- weed/shell/command_fs_cat.go | 8 ++- weed/shell/command_fs_cd.go | 5 +- weed/shell/command_fs_du.go | 23 ++++----- weed/shell/command_fs_ls.go | 7 +-- weed/shell/command_fs_meta_cat.go | 6 +-- weed/shell/command_fs_meta_load.go | 10 ++-- weed/shell/command_fs_meta_save.go | 2 +- weed/shell/command_fs_mv.go | 8 ++- weed/shell/command_fs_tree.go | 2 +- weed/shell/command_volume_balance.go | 8 ++- .../command_volume_configure_replication.go | 9 ++-- weed/shell/command_volume_copy.go | 4 +- weed/shell/command_volume_delete.go | 4 +- weed/shell/command_volume_fix_replication.go | 9 ++-- weed/shell/command_volume_list.go | 5 +- weed/shell/command_volume_mount.go | 9 ++-- weed/shell/command_volume_move.go | 29 ++++++----- weed/shell/command_volume_tier_download.go | 21 ++++---- weed/shell/command_volume_tier_upload.go | 19 ++++--- weed/shell/command_volume_unmount.go | 9 ++-- weed/shell/commands.go | 12 ++--- weed/storage/store_ec.go | 36 +++++++------- weed/storage/store_ec_delete.go | 20 ++++---- weed/storage/volume_backup.go | 4 +- weed/topology/allocate_volume.go | 2 +- weed/topology/topology_vacuum.go | 16 +++--- weed/util/grpc_client_server.go | 8 +-- weed/wdclient/masterclient.go | 18 +++---- 86 files changed, 501 insertions(+), 568 deletions(-) diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 9adcb6f33..c83db02b4 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -127,7 +127,7 @@ func runBenchmark(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - b.masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ",")) + b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", strings.Split(*b.masters, ",")) go b.masterClient.KeepConnectedToMaster() b.masterClient.WaitUntilConnected() @@ -314,8 +314,8 @@ func readFiles(fileIdLineChan chan string, s *stat) { } func grpcFileGet(volumeServer, fid string, grpcDialOption grpc.DialOption) (bytesRead int, err error) { - err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { - fileGetClient, err := client.FileGet(ctx, &volume_server_pb.FileGetRequest{FileId: fid}) + err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + fileGetClient, err := client.FileGet(context.Background(), &volume_server_pb.FileGetRequest{FileId: fid}) if err != nil { return err } diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 6470cbbca..2edceb715 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -161,7 +161,7 @@ func runCopy(cmd *Command, args []string) bool { } func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) { - err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) @@ -257,13 +257,13 @@ func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) } if chunkCount == 1 { - return worker.uploadFileAsOne(ctx, task, f) + return worker.uploadFileAsOne(task, f) } - return worker.uploadFileInChunks(ctx, task, f, chunkCount, chunkSize) + return worker.uploadFileInChunks(task, f, chunkCount, chunkSize) } -func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopyTask, f *os.File) error { +func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error { // upload the file content fileName := filepath.Base(f.Name()) @@ -276,7 +276,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy if task.fileSize > 0 { // assign a volume - err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -286,7 +286,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy ParentPath: task.destinationUrlPath, } - assignResult, assignError = client.AssignVolume(ctx, request) + assignResult, assignError = client.AssignVolume(context.Background(), request) if assignError != nil { return fmt.Errorf("assign volume failure %v: %v", request, assignError) } @@ -321,7 +321,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) } - if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -342,7 +342,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy }, } - if err := filer_pb.CreateEntry(ctx, client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -353,7 +353,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy return nil } -func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { +func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) @@ -377,7 +377,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC // assign a volume var assignResult *filer_pb.AssignVolumeResponse var assignError error - err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, Replication: *worker.options.replication, @@ -386,7 +386,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC ParentPath: task.destinationUrlPath, } - assignResult, assignError = client.AssignVolume(ctx, request) + assignResult, assignError = client.AssignVolume(context.Background(), request) if assignError != nil { return fmt.Errorf("assign volume failure %v: %v", request, assignError) } @@ -449,7 +449,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC return uploadError } - if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -470,7 +470,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC }, } - if err := filer_pb.CreateEntry(ctx, client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -499,9 +499,9 @@ func detectMimeType(f *os.File) string { return mimeType } -func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { +func withFilerClient(filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(ctx context.Context, clientConn *grpc.ClientConn) error { + return util.WithCachedGrpcClient(func(clientConn *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(clientConn) return fn(client) }, filerAddress, grpcDialOption) diff --git a/weed/command/s3.go b/weed/command/s3.go index 0eff76d32..abd32e545 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -128,7 +128,7 @@ func (s3opt *S3Options) startS3Server() bool { grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") ctx := context.Background() - err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index b84a67b22..181a250f2 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -37,7 +37,7 @@ type Filer struct { func NewFiler(masters []string, grpcDialOption grpc.DialOption, bucketFolder string) *Filer { f := &Filer{ directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), - MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters), + MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", masters), fileIdDeletionQueue: util.NewUnboundedQueue(), GrpcDialOption: grpcDialOption, } diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index af804b909..9e03d60c4 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -22,10 +22,10 @@ func VolumeId(fileId string) string { } type FilerClient interface { - WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error + WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error } -func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath FullPath, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { +func ReadIntoBuffer(filerClient FilerClient, fullFilePath FullPath, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { var vids []string for _, chunkView := range chunkViews { vids = append(vids, VolumeId(chunkView.FileId)) @@ -33,10 +33,10 @@ func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath F vid2Locations := make(map[string]*filer_pb.Locations) - err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read fh lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: vids, }) if err != nil { @@ -93,11 +93,11 @@ func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath F return } -func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.Entry, err error) { +func GetEntry(filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.Entry, err error) { dir, name := fullFilePath.DirAndName() - err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -105,7 +105,7 @@ func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath FullPat } // glog.V(3).Infof("read %s request: %v", fullFilePath, request) - resp, err := client.LookupDirectoryEntry(ctx, request) + resp, err := client.LookupDirectoryEntry(context.Background(), request) if err != nil { if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { return nil @@ -126,9 +126,9 @@ func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath FullPat return } -func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath FullPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) { +func ReadDirAllEntries(filerClient FilerClient, fullDirPath FullPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) { - err = filerClient.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { lastEntryName := "" @@ -140,7 +140,7 @@ func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath } glog.V(3).Infof("read directory: %v", request) - stream, err := client.ListEntries(ctx, request) + stream, err := client.ListEntries(context.Background(), request) if err != nil { return fmt.Errorf("list %s: %v", fullDirPath, err) } diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go index af88d1512..d0792ac66 100644 --- a/weed/filer2/filer_delete_entry.go +++ b/weed/filer2/filer_delete_entry.go @@ -45,7 +45,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecurs } if isCollection { collectionName := entry.Name() - f.doDeleteCollection(ctx, collectionName) + f.doDeleteCollection(collectionName) f.deleteBucket(collectionName) } @@ -110,10 +110,10 @@ func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shou return nil } -func (f *Filer) doDeleteCollection(ctx context.Context, collectionName string) (err error) { +func (f *Filer) doDeleteCollection(collectionName string) (err error) { - return f.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + return f.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ Name: collectionName, }) if err != nil { diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 2a4f6fa75..c897ed827 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -126,10 +126,10 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, }, OExcl: req.Flags&fuse.OpenExclusive != 0, } - glog.V(1).Infof("create: %v", req.String()) + glog.V(1).Infof("create %s/%s: %v", dir.Path, req.Name, req.Flags) - if err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { - if err := filer_pb.CreateEntry(ctx, client, request); err != nil { + if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(client, request); err != nil { if strings.Contains(err.Error(), "EEXIST") { return fuse.EEXIST } @@ -167,7 +167,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err }, } - err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: dir.Path, @@ -175,7 +175,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err } glog.V(1).Infof("mkdir: %v", request) - if err := filer_pb.CreateEntry(ctx, client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err) return err } @@ -200,7 +200,7 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. if entry == nil { // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) - entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath) + entry, err = filer2.GetEntry(dir.wfs, fullFilePath) if err != nil { glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT @@ -239,7 +239,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { cacheTtl := 5 * time.Minute - readErr := filer2.ReadDirAllEntries(ctx, dir.wfs, filer2.FullPath(dir.Path), "", func(entry *filer_pb.Entry, isLast bool) { + readErr := filer2.ReadDirAllEntries(dir.wfs, filer2.FullPath(dir.Path), "", func(entry *filer_pb.Entry, isLast bool) { fullpath := filer2.NewFullPath(dir.Path, entry.Name) inode := fullpath.AsInode() if entry.IsDirectory { @@ -262,17 +262,17 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { if !req.Dir { - return dir.removeOneFile(ctx, req) + return dir.removeOneFile(req) } - return dir.removeFolder(ctx, req) + return dir.removeFolder(req) } -func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error { +func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error { filePath := filer2.NewFullPath(dir.Path, req.Name) - entry, err := filer2.GetEntry(ctx, dir.wfs, filePath) + entry, err := filer2.GetEntry(dir.wfs, filePath) if err != nil { return err } @@ -280,11 +280,11 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro return nil } - dir.wfs.deleteFileChunks(ctx, entry.Chunks) + dir.wfs.deleteFileChunks(entry.Chunks) dir.wfs.cacheDelete(filePath) - return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -293,7 +293,7 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro } glog.V(3).Infof("remove file: %v", request) - _, err := client.DeleteEntry(ctx, request) + _, err := client.DeleteEntry(context.Background(), request) if err != nil { glog.V(3).Infof("not found remove file %s/%s: %v", dir.Path, req.Name, err) return fuse.ENOENT @@ -304,11 +304,11 @@ func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) erro } -func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { +func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error { dir.wfs.cacheDelete(filer2.NewFullPath(dir.Path, req.Name)) - return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir.Path, @@ -317,7 +317,7 @@ func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error } glog.V(3).Infof("remove directory entry: %v", request) - _, err := client.DeleteEntry(ctx, request) + _, err := client.DeleteEntry(context.Background(), request) if err != nil { glog.V(3).Infof("not found remove %s/%s: %v", dir.Path, req.Name, err) return fuse.ENOENT @@ -419,7 +419,7 @@ func (dir *Dir) Forget() { func (dir *Dir) maybeLoadEntry(ctx context.Context) error { if dir.entry == nil { parentDirPath, name := filer2.FullPath(dir.Path).DirAndName() - entry, err := dir.wfs.maybeLoadEntry(ctx, parentDirPath, name) + entry, err := dir.wfs.maybeLoadEntry(parentDirPath, name) if err != nil { return err } @@ -432,7 +432,7 @@ func (dir *Dir) saveEntry(ctx context.Context) error { parentDir, name := filer2.FullPath(dir.Path).DirAndName() - return dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: parentDir, @@ -440,7 +440,7 @@ func (dir *Dir) saveEntry(ctx context.Context) error { } glog.V(1).Infof("save dir entry: %v", request) - _, err := client.UpdateEntry(ctx, request) + _, err := client.UpdateEntry(context.Background(), request) if err != nil { glog.V(0).Infof("UpdateEntry dir %s/%s: %v", parentDir, name, err) return fuse.EIO diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index 8b7ec7e89..61ed04c26 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -35,8 +35,8 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, }, } - err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { - if err := filer_pb.CreateEntry(ctx, client, request); err != nil { + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) return fuse.EIO } diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index 4eb3c15b5..9b0c0fe6e 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -15,7 +15,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector newDir := newDirectory.(*Dir) glog.V(4).Infof("dir Rename %s/%s => %s/%s", dir.Path, req.OldName, newDir.Path, req.NewName) - err := dir.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: dir.Path, @@ -24,7 +24,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector NewName: req.NewName, } - _, err := client.AtomicRenameEntry(ctx, request) + _, err := client.AtomicRenameEntry(context.Background(), request) if err != nil { glog.V(0).Infof("dir Rename %s/%s => %s/%s : %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) return fuse.EIO diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 659f33736..3fbc7dca7 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -52,7 +52,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da var hasSavedData bool if pages.intervals.TotalSize() > pages.f.wfs.option.ChunkSizeLimit { - chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage(ctx) + chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage() if hasSavedData { chunks = append(chunks, chunk) } @@ -67,7 +67,7 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6 var newChunks []*filer_pb.FileChunk // flush existing - if newChunks, err = pages.saveExistingPagesToStorage(ctx); err == nil { + if newChunks, err = pages.saveExistingPagesToStorage(); err == nil { if newChunks != nil { chunks = append(chunks, newChunks...) } @@ -76,7 +76,7 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6 } // flush the new page - if chunk, err = pages.saveToStorage(ctx, bytes.NewReader(data), offset, int64(len(data))); err == nil { + if chunk, err = pages.saveToStorage(bytes.NewReader(data), offset, int64(len(data))); err == nil { if chunk != nil { glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) chunks = append(chunks, chunk) @@ -89,22 +89,22 @@ func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int6 return } -func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunks []*filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) FlushToStorage() (chunks []*filer_pb.FileChunk, err error) { pages.lock.Lock() defer pages.lock.Unlock() - return pages.saveExistingPagesToStorage(ctx) + return pages.saveExistingPagesToStorage() } -func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (chunks []*filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() (chunks []*filer_pb.FileChunk, err error) { var hasSavedData bool var chunk *filer_pb.FileChunk for { - chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage(ctx) + chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage() if !hasSavedData { return chunks, err } @@ -118,14 +118,14 @@ func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Contex } -func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, hasSavedData bool, err error) { +func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *filer_pb.FileChunk, hasSavedData bool, err error) { maxList := pages.intervals.RemoveLargestIntervalLinkedList() if maxList == nil { return nil, false, nil } - chunk, err = pages.saveToStorage(ctx, maxList.ToReader(), maxList.Offset(), maxList.Size()) + chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size()) if err == nil { hasSavedData = true glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId) @@ -137,14 +137,14 @@ func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage(ctx context. return } -func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) { +func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) { var fileId, host string var auth security.EncodedJwt dir, _ := pages.f.fullpath().DirAndName() - if err := pages.f.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + if err := pages.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -155,7 +155,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, reader io. ParentPath: dir, } - resp, err := client.AssignVolume(ctx, request) + resp, err := client.AssignVolume(context.Background(), request) if err != nil { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err diff --git a/weed/filesys/file.go b/weed/filesys/file.go index eccef4e58..69d440a73 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -148,7 +148,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f file.wfs.cacheDelete(file.fullpath()) - return file.saveEntry(ctx) + return file.saveEntry() } @@ -166,7 +166,7 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error file.wfs.cacheDelete(file.fullpath()) - return file.saveEntry(ctx) + return file.saveEntry() } @@ -184,7 +184,7 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) file.wfs.cacheDelete(file.fullpath()) - return file.saveEntry(ctx) + return file.saveEntry() } @@ -221,7 +221,7 @@ func (file *File) Forget() { func (file *File) maybeLoadEntry(ctx context.Context) error { if file.entry == nil || file.isOpen <= 0 { - entry, err := file.wfs.maybeLoadEntry(ctx, file.dir.Path, file.Name) + entry, err := file.wfs.maybeLoadEntry(file.dir.Path, file.Name) if err != nil { return err } @@ -256,8 +256,8 @@ func (file *File) setEntry(entry *filer_pb.Entry) { file.entryViewCache = filer2.NonOverlappingVisibleIntervals(file.entry.Chunks) } -func (file *File) saveEntry(ctx context.Context) error { - return file.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { +func (file *File) saveEntry() error { + return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: file.dir.Path, @@ -265,7 +265,7 @@ func (file *File) saveEntry(ctx context.Context) error { } glog.V(1).Infof("save file entry: %v", request) - _, err := client.UpdateEntry(ctx, request) + _, err := client.UpdateEntry(context.Background(), request) if err != nil { glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err) return fuse.EIO diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 71954d75d..7b26c5952 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -89,7 +89,7 @@ func (fh *FileHandle) readFromChunks(ctx context.Context, buff []byte, offset in chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, offset, len(buff)) - totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, offset) + totalRead, err := filer2.ReadIntoBuffer(fh.f.wfs, fh.f.fullpath(), buff, chunkViews, offset) if err != nil { glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) @@ -154,7 +154,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { // send the data to the OS glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req) - chunks, err := fh.dirtyPages.FlushToStorage(ctx) + chunks, err := fh.dirtyPages.FlushToStorage() if err != nil { glog.Errorf("flush %s: %v", fh.f.fullpath(), err) return fuse.EIO @@ -169,7 +169,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { return nil } - err = fh.f.wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err = fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { if fh.f.entry.Attributes != nil { fh.f.entry.Attributes.Mime = fh.contentType @@ -196,12 +196,12 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { fh.f.entry.Chunks = chunks // fh.f.entryViewCache = nil - if err := filer_pb.CreateEntry(ctx, client, request); err != nil { - glog.Errorf("update fh: %v", err) - return fmt.Errorf("update fh: %v", err) + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) + return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) } - fh.f.wfs.deleteFileChunks(ctx, garbages) + fh.f.wfs.deleteFileChunks(garbages) for i, chunk := range garbages { glog.V(3).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) } diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 37bb92acc..8f4225fb0 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -5,7 +5,6 @@ import ( "fmt" "math" "os" - "strings" "sync" "time" @@ -88,23 +87,16 @@ func (wfs *WFS) Root() (fs.Node, error) { return wfs.root, nil } -func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { +func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - err := util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { + err := util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(ctx2, client) + return fn(client) }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) if err == nil { return nil } - if strings.Contains(err.Error(), "context canceled") { - glog.V(0).Infoln("retry context canceled request...") - return util.WithCachedGrpcClient(context.Background(), func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(ctx2, client) - }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) - } return err } @@ -162,7 +154,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. if wfs.stats.lastChecked < time.Now().Unix()-20 { - err := wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.StatisticsRequest{ Collection: wfs.option.Collection, @@ -171,7 +163,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. } glog.V(4).Infof("reading filer stats: %+v", request) - resp, err := client.Statistics(ctx, request) + resp, err := client.Statistics(context.Background(), request) if err != nil { glog.V(0).Infof("reading filer stats %v: %v", request, err) return err diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index cce0c792c..f53e95d26 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -10,7 +10,7 @@ import ( "google.golang.org/grpc" ) -func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChunk) { +func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { if len(chunks) == 0 { return } @@ -20,13 +20,13 @@ func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChu fileIds = append(fileIds, chunk.GetFileIdString()) } - wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { - deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds) + wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + deleteFileIds(wfs.option.GrpcDialOption, client, fileIds) return nil }) } -func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { +func deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { var vids []string for _, fileId := range fileIds { @@ -38,7 +38,7 @@ func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client f m := make(map[string]operation.LookupResult) glog.V(4).Infof("remove file lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: vids, }) if err != nil { diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index 9dfb491fd..3ccecdf98 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -108,7 +108,7 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis } -func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *filer_pb.Entry, err error) { +func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) { fullpath := filer2.NewFullPath(dir, name) entry = wfs.cacheGet(fullpath) @@ -117,14 +117,14 @@ func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *fi } // glog.V(3).Infof("read entry cache miss %s", fullpath) - err = wfs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err = wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, Directory: dir, } - resp, err := client.LookupDirectoryEntry(ctx, request) + resp, err := client.LookupDirectoryEntry(context.Background(), request) if err != nil || resp == nil || resp.Entry == nil { if err == filer2.ErrNotFound || strings.Contains(err.Error(), filer2.ErrNotFound.Error()) { glog.V(3).Infof("file attr read not found file %v: %v", request, err) diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 6da30605b..893bf516c 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -46,7 +46,7 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum continue } - lastError = WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { + lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.AssignRequest{ Count: primaryRequest.Count, diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 95bbde9f9..361c09e7e 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -10,7 +10,6 @@ import ( "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) @@ -109,15 +108,13 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str ret = append(ret, result...) } - glog.V(1).Infof("deleted %d items", len(ret)) - return ret, err } // DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { - err = WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { req := &volume_server_pb.BatchDeleteRequest{ FileIds: fileIds, diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index e7ee2d2ba..7eed66503 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -1,29 +1,28 @@ package operation import ( - "context" "fmt" + "strconv" + "strings" + + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "strconv" - "strings" ) -func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(context.Context, volume_server_pb.VolumeServerClient) error) error { - - ctx := context.Background() +func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { grpcAddress, err := toVolumeServerGrpcAddress(volumeServer) if err != nil { return err } - return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) - return fn(ctx2, client) + return fn(client) }, grpcAddress, grpcDialOption) } @@ -38,18 +37,16 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil } -func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(ctx2 context.Context, masterClient master_pb.SeaweedClient) error) error { - - ctx := context.Background() +func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer) if parseErr != nil { return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr) } - return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) - return fn(ctx2, client) + return fn(client) }, masterGrpcAddress, grpcDialOption) } diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go index 78769ac5a..d0773e7fd 100644 --- a/weed/operation/lookup.go +++ b/weed/operation/lookup.go @@ -99,12 +99,12 @@ func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []strin //only query unknown_vids - err := WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { + err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupVolumeRequest{ VolumeIds: unknown_vids, } - resp, grpcErr := masterClient.LookupVolume(ctx, req) + resp, grpcErr := masterClient.LookupVolume(context.Background(), req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/stats.go b/weed/operation/stats.go index 3e6327f19..b69a33750 100644 --- a/weed/operation/stats.go +++ b/weed/operation/stats.go @@ -9,9 +9,9 @@ import ( func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { - err = WithMasterServerClient(server, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { + err = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - grpcResponse, grpcErr := masterClient.Statistics(ctx, req) + grpcResponse, grpcErr := masterClient.Statistics(context.Background(), req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go index 4b39ad544..5562f12ab 100644 --- a/weed/operation/sync_volume.go +++ b/weed/operation/sync_volume.go @@ -8,9 +8,9 @@ import ( func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { - WithVolumeServerClient(server, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - resp, err = client.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{ + resp, err = client.VolumeSyncStatus(context.Background(), &volume_server_pb.VolumeSyncStatusRequest{ VolumeId: vid, }) return nil diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go index 1e8b0a16e..3cd66b5da 100644 --- a/weed/operation/tail_volume.go +++ b/weed/operation/tail_volume.go @@ -5,9 +5,10 @@ import ( "fmt" "io" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" - "google.golang.org/grpc" ) func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error { @@ -26,9 +27,9 @@ func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.Volume } func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error { - return WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - stream, err := client.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{ + stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{ VolumeId: uint32(vid), SinceNs: sinceNs, IdleTimeoutSeconds: uint32(idleTimeoutSeconds), diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 349f07320..408caadcd 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/needle" ) @@ -71,13 +72,15 @@ func AfterEntryDeserialization(chunks []*FileChunk) { } } -func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *CreateEntryRequest) error { - resp, err := client.CreateEntry(ctx, request) +func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { + resp, err := client.CreateEntry(context.Background(), request) if err != nil { + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) return fmt.Errorf("CreateEntry: %v", err) } if resp.Error != "" { + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) return fmt.Errorf("CreateEntry : %v", resp.Error) } - return err + return nil } diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index a0ef6591c..a91c2ddd3 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -41,28 +41,28 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p key = newKey if message.OldEntry != nil && message.NewEntry == nil { glog.V(4).Infof("deleting %v", key) - return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks) + return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks) } if message.OldEntry == nil && message.NewEntry != nil { glog.V(4).Infof("creating %v", key) - return r.sink.CreateEntry(ctx, key, message.NewEntry) + return r.sink.CreateEntry(key, message.NewEntry) } if message.OldEntry == nil && message.NewEntry == nil { glog.V(0).Infof("weird message %+v", message) return nil } - foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks) + foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks) if foundExisting { glog.V(4).Infof("updated %v", key) return err } - err = r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, false) + err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false) if err != nil { return fmt.Errorf("delete old entry %v: %v", key, err) } glog.V(4).Infof("creating missing %v", key) - return r.sink.CreateEntry(ctx, key, message.NewEntry) + return r.sink.CreateEntry(key, message.NewEntry) } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index a0b1a41ab..f7c0eef0d 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -70,7 +70,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e return nil } -func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -78,7 +78,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de key = key + "/" } - if _, err := g.containerURL.NewBlobURL(key).Delete(ctx, + if _, err := g.containerURL.NewBlobURL(key).Delete(context.Background(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil { return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err) } @@ -87,7 +87,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de } -func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -102,21 +102,21 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb // Azure Storage account's container. appendBlobURL := g.containerURL.NewAppendBlobURL(key) - _, err := appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) + _, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) if err != nil { return err } for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) if err != nil { return err } var writeErr error _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { - _, writeErr = appendBlobURL.AppendBlock(ctx, bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) + _, writeErr = appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) }) if readErr != nil { @@ -132,7 +132,7 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb } -func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *AzureSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 8c80a64bd..d230719ce 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -58,7 +58,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { return nil } -func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -66,18 +66,18 @@ func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, delet key = key + "/" } - bucket, err := g.client.Bucket(ctx, g.bucket) + bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { return err } targetObject := bucket.Object(key) - return targetObject.Delete(ctx) + return targetObject.Delete(context.Background()) } -func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -88,17 +88,17 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - bucket, err := g.client.Bucket(ctx, g.bucket) + bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { return err } targetObject := bucket.Object(key) - writer := targetObject.NewWriter(ctx) + writer := targetObject.NewWriter(context.Background()) for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) if err != nil { return err } @@ -124,7 +124,7 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En } -func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *B2Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 50f3f64d4..954e951c9 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -15,7 +15,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) { +func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) { if len(sourceChunks) == 0 { return } @@ -24,7 +24,7 @@ func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_ wg.Add(1) go func(chunk *filer_pb.FileChunk) { defer wg.Done() - replicatedChunk, e := fs.replicateOneChunk(ctx, chunk, dir) + replicatedChunk, e := fs.replicateOneChunk(chunk, dir) if e != nil { err = e } @@ -36,9 +36,9 @@ func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_ return } -func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) { +func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) { - fileId, err := fs.fetchAndWrite(ctx, sourceChunk, dir) + fileId, err := fs.fetchAndWrite(sourceChunk, dir) if err != nil { return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err) } @@ -53,9 +53,9 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p }, nil } -func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) { +func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) { - filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString()) + filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString()) if err != nil { return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err) } @@ -64,7 +64,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi var host string var auth security.EncodedJwt - if err := fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -75,7 +75,7 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi ParentPath: dir, } - resp, err := client.AssignVolume(ctx, request) + resp, err := client.AssignVolume(context.Background(), request) if err != nil { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err @@ -109,11 +109,11 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi return } -func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(ctx, client) + return fn(client) }, fs.grpcAddress, fs.grpcDialOption) } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index cdc4f4a45..6b82b90df 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -64,8 +64,8 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, return nil } -func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { - return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { +func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { + return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() @@ -76,7 +76,7 @@ func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, d } glog.V(1).Infof("delete entry: %v", request) - _, err := client.DeleteEntry(ctx, request) + _, err := client.DeleteEntry(context.Background(), request) if err != nil { glog.V(0).Infof("delete entry %s: %v", key, err) return fmt.Errorf("delete entry %s: %v", key, err) @@ -86,9 +86,9 @@ func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, d }) } -func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { - return fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(key).DirAndName() @@ -98,14 +98,14 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p Name: name, } glog.V(1).Infof("lookup: %v", lookupRequest) - if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil && resp.Entry != nil { + if resp, err := client.LookupDirectoryEntry(context.Background(), lookupRequest); err == nil && resp.Entry != nil { if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) { glog.V(0).Infof("already replicated %s", key) return nil } } - replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks, dir) + replicatedChunks, err := fs.replicateChunks(entry.Chunks, dir) if err != nil { glog.V(0).Infof("replicate entry chunks %s: %v", key, err) @@ -125,7 +125,7 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p } glog.V(1).Infof("create: %v", request) - if err := filer_pb.CreateEntry(ctx, client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("create entry %s: %v", key, err) return fmt.Errorf("create entry %s: %v", key, err) } @@ -134,13 +134,13 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p }) } -func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { dir, name := filer2.FullPath(key).DirAndName() // read existing entry var existingEntry *filer_pb.Entry - err = fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -148,7 +148,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } glog.V(4).Infof("lookup entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) + resp, err := client.LookupDirectoryEntry(context.Background(), request) if err != nil { glog.V(0).Infof("lookup %s: %v", key, err) return err @@ -187,7 +187,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } // replicate the chunks that are new in the source - replicatedChunks, err := fs.replicateChunks(ctx, newChunks, newParentPath) + replicatedChunks, err := fs.replicateChunks(newChunks, newParentPath) if err != nil { return true, fmt.Errorf("replicte %s chunks error: %v", key, err) } @@ -195,14 +195,14 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } // save updated meta data - return true, fs.withFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: newParentPath, Entry: existingEntry, } - if _, err := client.UpdateEntry(ctx, request); err != nil { + if _, err := client.UpdateEntry(context.Background(), request); err != nil { return fmt.Errorf("update existingEntry %s: %v", key, err) } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index 5aa978ab8..31dd5a85a 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -69,13 +69,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str return nil } -func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { if isDirectory { key = key + "/" } - if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil { + if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil { return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err) } @@ -83,7 +83,7 @@ func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, dele } -func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { if entry.IsDirectory { return nil @@ -92,11 +92,11 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx) + wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) for _, chunk := range chunkViews { - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) + fileUrl, err := g.filerSource.LookupFileId(chunk.FileId) if err != nil { return err } @@ -119,7 +119,7 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E } -func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *GcsSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { // TODO improve efficiency return false, nil } diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index 208bbdf87..6d85f660a 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -1,7 +1,6 @@ package sink import ( - "context" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" @@ -10,9 +9,9 @@ import ( type ReplicationSink interface { GetName() string Initialize(configuration util.Configuration, prefix string) error - DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error - CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error - UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) + DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error + CreateEntry(key string, entry *filer_pb.Entry) error + UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) GetSinkToDirectory() string SetSourceFiler(s *source.FilerSource) } diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index e4e097c0f..fc3e6cd4d 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -77,7 +78,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, buc return nil } -func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { key = cleanKey(key) @@ -89,7 +90,7 @@ func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, } -func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { key = cleanKey(key) @@ -112,7 +113,7 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_ wg.Add(1) go func(chunk *filer2.ChunkView) { defer wg.Done() - if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil { + if part, uploadErr := s3sink.uploadPart(context.Background(), key, uploadId, partId, chunk); uploadErr != nil { err = uploadErr } else { parts = append(parts, part) @@ -126,11 +127,11 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_ return err } - return s3sink.completeMultipartUpload(ctx, key, uploadId, parts) + return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts) } -func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (s3sink *S3Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index 0a190b27d..9386169b8 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -157,7 +157,7 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou } func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) { - fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId) + fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId) if err != nil { return nil, err } diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index c3ea44671..11eb3afa1 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -40,16 +40,16 @@ func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { return nil } -func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) { +func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) { vid2Locations := make(map[string]*filer_pb.Locations) vid := volumeId(part) - err = fs.withFilerClient(ctx, fs.grpcDialOption, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read lookup volume id locations: %v", vid) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) if err != nil { @@ -78,9 +78,9 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl s return } -func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) { +func (fs *FilerSource) ReadPart(part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) { - fileUrl, err := fs.LookupFileId(ctx, part) + fileUrl, err := fs.LookupFileId(part) if err != nil { return "", nil, nil, err } @@ -90,11 +90,11 @@ func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename stri return filename, header, readCloser, err } -func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSource) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(ctx2, client) + return fn(client) }, fs.grpcAddress, fs.grpcDialOption) } diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 3c11b032c..f726de515 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -13,7 +13,7 @@ import ( ) func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ Name: dirName, @@ -37,7 +37,7 @@ func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, d } glog.V(1).Infof("mkdir: %v", request) - if err := filer_pb.CreateEntry(ctx, client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("mkdir %v: %v", request, err) return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) } @@ -47,7 +47,7 @@ func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, d } func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ Name: fileName, @@ -68,7 +68,7 @@ func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, } glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) - if err := filer_pb.CreateEntry(ctx, client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("create file %v:%v", request, err) return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) } @@ -79,7 +79,7 @@ func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: parentDirectoryPath, @@ -119,7 +119,7 @@ func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, s func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: parentDirectoryPath, @@ -143,7 +143,7 @@ func (s3a *S3ApiServer) streamRemove(ctx context.Context, quiet bool, fn func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool), respFn func(err string)) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { stream, err := client.StreamDeleteEntries(ctx) if err != nil { @@ -196,7 +196,7 @@ func (s3a *S3ApiServer) streamRemove(ctx context.Context, quiet bool, func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: parentDirectoryPath, diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 69275e212..91267be09 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -80,7 +80,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque bucket := vars["bucket"] ctx := context.Background() - err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { // delete collection deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{ @@ -112,7 +112,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request ctx := context.Background() - err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: s3a.option.BucketsPath, diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 602f03e5c..81a260a63 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -2,17 +2,18 @@ package s3api import ( "bytes" - "context" "encoding/base64" "encoding/xml" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" "net/http" "net/url" "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) type mimeType string @@ -37,9 +38,9 @@ func encodeResponse(response interface{}) []byte { return bytesBuffer.Bytes() } -func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(ctx context.Context, grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index 17ce2b547..97dc931ee 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -95,7 +95,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr } // check filer - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir), diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 9bbabec26..c6ef04e85 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -300,8 +300,8 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - err = fs.filer.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ Name: req.GetCollection(), }) return err diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index b0df851c9..5fc038e17 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -127,8 +127,8 @@ func maybeStartMetrics(fs *FilerServer, option *FilerOption) { } func readFilerConfiguration(grpcDialOption grpc.DialOption, masterGrpcAddress string) (metricsAddress string, metricsIntervalSec int, err error) { - err = operation.WithMasterServerClient(masterGrpcAddress, grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { - resp, err := masterClient.GetMasterConfiguration(ctx, &master_pb.GetMasterConfigurationRequest{}) + err = operation.WithMasterServerClient(masterGrpcAddress, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) if err != nil { return fmt.Errorf("get master %s configuration: %v", masterGrpcAddress, err) } diff --git a/weed/server/master_grpc_server_collection.go b/weed/server/master_grpc_server_collection.go index f02b0f242..b92d6bcbe 100644 --- a/weed/server/master_grpc_server_collection.go +++ b/weed/server/master_grpc_server_collection.go @@ -4,6 +4,7 @@ import ( "context" "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" @@ -57,8 +58,8 @@ func (ms *MasterServer) doDeleteNormalCollection(collectionName string) error { } for _, server := range collection.ListVolumeServers() { - err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { - _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ Collection: collectionName, }) return deleteErr @@ -77,8 +78,8 @@ func (ms *MasterServer) doDeleteEcCollection(collectionName string) error { listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName) for _, server := range listOfEcServers { - err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { - _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ Collection: collectionName, }) return deleteErr diff --git a/weed/server/master_server.go b/weed/server/master_server.go index f5f547a32..095008339 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -1,7 +1,6 @@ package weed_server import ( - "context" "fmt" "net/http" "net/http/httputil" @@ -89,7 +88,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste preallocateSize: preallocateSize, clientChans: make(map[string]chan *master_pb.VolumeLocation), grpcDialOption: grpcDialOption, - MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "master", peers), + MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", peers), } ms.bounedLeaderChan = make(chan int, 16) diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index 44a04cb86..2965a4863 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -25,8 +25,8 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R return } for _, server := range collection.ListVolumeServers() { - err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { - _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ Collection: collection.Name, }) return deleteErr diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go index 6d74f8171..fcd13d364 100644 --- a/weed/server/volume_grpc_copy.go +++ b/weed/server/volume_grpc_copy.go @@ -41,9 +41,9 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo // confirm size and timestamp var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse var volumeFileName, idxFileName, datFileName string - err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { var err error - volFileInfoResp, err = client.ReadVolumeFileStatus(ctx, + volFileInfoResp, err = client.ReadVolumeFileStatus(context.Background(), &volume_server_pb.ReadVolumeFileStatusRequest{ VolumeId: req.VolumeId, }) diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 256e7c447..67efc0f6d 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -106,7 +106,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId)) - err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { // copy ec data slices for _, shardId := range req.ShardIds { diff --git a/weed/server/volume_grpc_file.go b/weed/server/volume_grpc_file.go index c20aeb60f..4d71ddeb1 100644 --- a/weed/server/volume_grpc_file.go +++ b/weed/server/volume_grpc_file.go @@ -1,7 +1,6 @@ package weed_server import ( - "context" "encoding/json" "net/http" "strings" @@ -45,7 +44,7 @@ func (vs *VolumeServer) FileGet(req *volume_server_pb.FileGetRequest, stream vol if hasVolume { count, err = vs.store.ReadVolumeNeedle(volumeId, n) } else if hasEcVolume { - count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n) + count, err = vs.store.ReadEcShardNeedle(volumeId, n) } if err != nil || count < 0 { diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index d89d13a0d..40a951a74 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -2,7 +2,7 @@ package weed_server import ( "bytes" - "context" + "encoding/json" "errors" "fmt" "io" @@ -15,8 +15,6 @@ import ( "strings" "time" - "encoding/json" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/operation" @@ -86,7 +84,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) if hasVolume { count, err = vs.store.ReadVolumeNeedle(volumeId, n) } else if hasEcVolume { - count, err = vs.store.ReadEcShardNeedle(context.Background(), volumeId, n) + count, err = vs.store.ReadEcShardNeedle(volumeId, n) } // glog.V(4).Infoln("read bytes", count, "error", err) if err != nil || count < 0 { diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 7a7ab7a4b..959e50128 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -96,11 +96,11 @@ func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { }, nil } -func (fs *WebDavFileSystem) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { +func (fs *WebDavFileSystem) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(ctx2, client) + return fn(client) }, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption) } @@ -135,7 +135,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm return os.ErrExist } - return fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { dir, name := filer2.FullPath(fullDirPath).DirAndName() request := &filer_pb.CreateEntryRequest{ Directory: dir, @@ -153,7 +153,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm } glog.V(1).Infof("mkdir: %v", request) - if err := filer_pb.CreateEntry(ctx, client, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("mkdir %s/%s: %v", dir, name, err) } @@ -184,8 +184,8 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f } dir, name := filer2.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { - if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{ + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: dir, Entry: &filer_pb.Entry{ Name: name, @@ -249,7 +249,7 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) //_, err = fs.db.Exec(`delete from filesystem where fullFilePath = ?`, fullFilePath) } dir, name := filer2.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ Directory: dir, @@ -308,7 +308,7 @@ func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) oldDir, oldBaseName := filer2.FullPath(oldName).DirAndName() newDir, newBaseName := filer2.FullPath(newName).DirAndName() - return fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AtomicRenameEntryRequest{ OldDirectory: oldDir, @@ -336,7 +336,7 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F fullpath := filer2.FullPath(fullFilePath) var fi FileInfo - entry, err := filer2.GetEntry(ctx, fs, fullpath) + entry, err := filer2.GetEntry(fs, fullpath) if entry == nil { return nil, os.ErrNotExist } @@ -372,7 +372,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { var err error ctx := context.Background() if f.entry == nil { - f.entry, err = filer2.GetEntry(ctx, f.fs, filer2.FullPath(f.name)) + f.entry, err = filer2.GetEntry(f.fs, filer2.FullPath(f.name)) } if f.entry == nil { @@ -386,7 +386,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { var auth security.EncodedJwt var collection, replication string - if err = f.fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + if err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -434,7 +434,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { f.entry.Chunks = append(f.entry.Chunks, chunk) - err = f.fs.WithFilerClient(ctx, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { f.entry.Attributes.Mtime = time.Now().Unix() f.entry.Attributes.Collection = collection f.entry.Attributes.Replication = replication @@ -474,10 +474,9 @@ func (f *WebDavFile) Close() error { func (f *WebDavFile) Read(p []byte) (readSize int, err error) { glog.V(2).Infof("WebDavFileSystem.Read %v", f.name) - ctx := context.Background() if f.entry == nil { - f.entry, err = filer2.GetEntry(ctx, f.fs, filer2.FullPath(f.name)) + f.entry, err = filer2.GetEntry(f.fs, filer2.FullPath(f.name)) } if f.entry == nil { return 0, err @@ -493,7 +492,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { } chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, f.off, len(p)) - totalRead, err := filer2.ReadIntoBuffer(ctx, f.fs, filer2.FullPath(f.name), p, chunkViews, f.off) + totalRead, err := filer2.ReadIntoBuffer(f.fs, filer2.FullPath(f.name), p, chunkViews, f.off) if err != nil { return 0, err } @@ -512,11 +511,10 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count) - ctx := context.Background() dir, _ := filer2.FullPath(f.name).DirAndName() - err = filer2.ReadDirAllEntries(ctx, f.fs, filer2.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) { + err = filer2.ReadDirAllEntries(f.fs, filer2.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) { fi := FileInfo{ size: int64(filer2.TotalSize(entry.GetChunks())), name: entry.Name, diff --git a/weed/shell/command_bucket_create.go b/weed/shell/command_bucket_create.go index 603e9c564..3546528aa 100644 --- a/weed/shell/command_bucket_create.go +++ b/weed/shell/command_bucket_create.go @@ -48,11 +48,9 @@ func (c *commandBucketCreate) Do(args []string, commandEnv *CommandEnv, writer i return parseErr } - ctx := context.Background() + err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { - - resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) } @@ -72,7 +70,7 @@ func (c *commandBucketCreate) Do(args []string, commandEnv *CommandEnv, writer i }, } - if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{ + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: filerBucketsPath, Entry: entry, }); err != nil { diff --git a/weed/shell/command_bucket_delete.go b/weed/shell/command_bucket_delete.go index 9e814ccf9..c57ce7221 100644 --- a/weed/shell/command_bucket_delete.go +++ b/weed/shell/command_bucket_delete.go @@ -44,17 +44,15 @@ func (c *commandBucketDelete) Do(args []string, commandEnv *CommandEnv, writer i return parseErr } - ctx := context.Background() + err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { - - resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) } filerBucketsPath := resp.DirBuckets - if _, err := client.DeleteEntry(ctx, &filer_pb.DeleteEntryRequest{ + if _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ Directory: filerBucketsPath, Name: *bucketName, IsDeleteData: false, diff --git a/weed/shell/command_bucket_list.go b/weed/shell/command_bucket_list.go index 32198c29d..5eb5972ce 100644 --- a/weed/shell/command_bucket_list.go +++ b/weed/shell/command_bucket_list.go @@ -39,17 +39,15 @@ func (c *commandBucketList) Do(args []string, commandEnv *CommandEnv, writer io. return parseErr } - ctx := context.Background() + err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { - - resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) } filerBucketsPath := resp.DirBuckets - stream, err := client.ListEntries(ctx, &filer_pb.ListEntriesRequest{ + stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ Directory: filerBucketsPath, Limit: math.MaxUint32, }) diff --git a/weed/shell/command_collection_delete.go b/weed/shell/command_collection_delete.go index fbaddcd51..4b3d7f0be 100644 --- a/weed/shell/command_collection_delete.go +++ b/weed/shell/command_collection_delete.go @@ -34,9 +34,8 @@ func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writ collectionName := args[0] - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - _, err = client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err = client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ Name: collectionName, }) return err diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go index c4325c66f..2a114e61b 100644 --- a/weed/shell/command_collection_list.go +++ b/weed/shell/command_collection_list.go @@ -41,9 +41,8 @@ func (c *commandCollectionList) Do(args []string, commandEnv *CommandEnv, writer func ListCollectionNames(commandEnv *CommandEnv, includeNormalVolumes, includeEcVolumes bool) (collections []string, err error) { var resp *master_pb.CollectionListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.CollectionList(ctx, &master_pb.CollectionListRequest{ + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.CollectionList(context.Background(), &master_pb.CollectionListRequest{ IncludeNormalVolumes: includeNormalVolumes, IncludeEcVolumes: includeEcVolumes, }) diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go index 96599372e..7230a869f 100644 --- a/weed/shell/command_ec_balance.go +++ b/weed/shell/command_ec_balance.go @@ -107,10 +107,8 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W return nil } - ctx := context.Background() - // collect all ec nodes - allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, *dc) + allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, *dc) if err != nil { return err } @@ -138,7 +136,7 @@ func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.W } } - if err := balanceEcRacks(ctx, commandEnv, racks, *applyBalancing); err != nil { + if err := balanceEcRacks(commandEnv, racks, *applyBalancing); err != nil { return fmt.Errorf("balance ec racks: %v", err) } @@ -170,11 +168,11 @@ func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*E return fmt.Errorf("delete duplicated collection %s ec shards: %v", collection, err) } - if err := balanceEcShardsAcrossRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { + if err := balanceEcShardsAcrossRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err) } - if err := balanceEcShardsWithinRacks(ctx, commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { + if err := balanceEcShardsWithinRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err) } @@ -186,14 +184,14 @@ func deleteDuplicatedEcShards(ctx context.Context, commandEnv *CommandEnv, allEc vidLocations := collectVolumeIdToEcNodes(allEcNodes) // deduplicate ec shards for vid, locations := range vidLocations { - if err := doDeduplicateEcShards(ctx, commandEnv, collection, vid, locations, applyBalancing); err != nil { + if err := doDeduplicateEcShards(commandEnv, collection, vid, locations, applyBalancing); err != nil { return err } } return nil } -func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error { +func doDeduplicateEcShards(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error { // check whether this volume has ecNodes that are over average shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount) @@ -215,10 +213,10 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti duplicatedShardIds := []uint32{uint32(shardId)} for _, ecNode := range ecNodes[1:] { - if err := unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil { + if err := unmountEcShards(commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil { return err } - if err := sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil { + if err := sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil { return err } ecNode.deleteEcVolumeShards(vid, duplicatedShardIds) @@ -227,19 +225,19 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti return nil } -func balanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { +func balanceEcShardsAcrossRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { // collect vid => []ecNode, since previous steps can change the locations vidLocations := collectVolumeIdToEcNodes(allEcNodes) // spread the ec shards evenly for vid, locations := range vidLocations { - if err := doBalanceEcShardsAcrossRacks(ctx, commandEnv, collection, vid, locations, racks, applyBalancing); err != nil { + if err := doBalanceEcShardsAcrossRacks(commandEnv, collection, vid, locations, racks, applyBalancing); err != nil { return err } } return nil } -func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { +func doBalanceEcShardsAcrossRacks(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { // calculate average number of shards an ec rack should have for one volume averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks)) @@ -274,7 +272,7 @@ func doBalanceEcShardsAcrossRacks(ctx context.Context, commandEnv *CommandEnv, c for _, n := range racks[rackId].ecNodes { possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) } - err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) + err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) if err != nil { return err } @@ -306,7 +304,7 @@ func pickOneRack(rackToEcNodes map[RackId]*EcRack, rackToShardCount map[string]i return "" } -func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { +func balanceEcShardsWithinRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { // collect vid => []ecNode, since previous steps can change the locations vidLocations := collectVolumeIdToEcNodes(allEcNodes) @@ -330,7 +328,7 @@ func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, all } sourceEcNodes := rackEcNodesWithVid[rackId] averageShardsPerEcNode := ceilDivide(rackToShardCount[rackId], len(possibleDestinationEcNodes)) - if err := doBalanceEcShardsWithinOneRack(ctx, commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil { + if err := doBalanceEcShardsWithinOneRack(commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil { return err } } @@ -338,7 +336,7 @@ func balanceEcShardsWithinRacks(ctx context.Context, commandEnv *CommandEnv, all return nil } -func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { +func doBalanceEcShardsWithinOneRack(commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { for _, ecNode := range existingLocations { @@ -353,7 +351,7 @@ func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, fmt.Printf("%s has %d overlimit, moving ec shard %d.%d\n", ecNode.info.Id, overLimitCount, vid, shardId) - err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) + err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) if err != nil { return err } @@ -365,18 +363,18 @@ func doBalanceEcShardsWithinOneRack(ctx context.Context, commandEnv *CommandEnv, return nil } -func balanceEcRacks(ctx context.Context, commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error { +func balanceEcRacks(commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error { // balance one rack for all ec shards for _, ecRack := range racks { - if err := doBalanceEcRack(ctx, commandEnv, ecRack, applyBalancing); err != nil { + if err := doBalanceEcRack(commandEnv, ecRack, applyBalancing); err != nil { return err } } return nil } -func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error { +func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error { if len(ecRack.ecNodes) <= 1 { return nil @@ -421,7 +419,7 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id) - err := moveMountedShardToEcNode(ctx, commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing) + err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing) if err != nil { return err } @@ -440,7 +438,7 @@ func doBalanceEcRack(ctx context.Context, commandEnv *CommandEnv, ecRack *EcRack return nil } -func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { +func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes) @@ -458,7 +456,7 @@ func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *CommandEnv, a fmt.Printf("%s moves ec shard %d.%d to %s\n", existingLocation.info.Id, vid, shardId, destEcNode.info.Id) - err := moveMountedShardToEcNode(ctx, commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing) + err := moveMountedShardToEcNode(commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing) if err != nil { return err } diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go index e187d5a3b..0db119d3c 100644 --- a/weed/shell/command_ec_common.go +++ b/weed/shell/command_ec_common.go @@ -15,26 +15,26 @@ import ( "google.golang.org/grpc" ) -func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) { +func moveMountedShardToEcNode(commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) { copiedShardIds := []uint32{uint32(shardId)} if applyBalancing { // ask destination node to copy shard and the ecx file from source node, and mount it - copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id) + copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id) if err != nil { return err } // unmount the to be deleted shards - err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds) + err = unmountEcShards(commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds) if err != nil { return err } // ask source node to delete the shard, and maybe the ecx file - err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds) + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds) if err != nil { return err } @@ -50,18 +50,18 @@ func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, exist } -func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, +func oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption, targetServer *EcNode, shardIdsToCopy []uint32, volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) { fmt.Printf("allocate %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) - err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { if targetServer.info.Id != existingLocation { fmt.Printf("copy %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) - _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: shardIdsToCopy, @@ -76,7 +76,7 @@ func oneServerCopyAndMountEcShardsFromSource(ctx context.Context, grpcDialOption } fmt.Printf("mount %d.%v on %s\n", volumeId, shardIdsToCopy, targetServer.info.Id) - _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{ + _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: shardIdsToCopy, @@ -178,12 +178,12 @@ type EcRack struct { freeEcSlot int } -func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) { +func collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) { // list all possible locations var resp *master_pb.VolumeListResponse - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { @@ -211,13 +211,12 @@ func collectEcNodes(ctx context.Context, commandEnv *CommandEnv, selectedDataCen return } -func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, - collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error { +func sourceServerDeleteEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error { fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation) - return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, deleteErr := volumeServerClient.VolumeEcShardsDelete(ctx, &volume_server_pb.VolumeEcShardsDeleteRequest{ + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, deleteErr := volumeServerClient.VolumeEcShardsDelete(context.Background(), &volume_server_pb.VolumeEcShardsDeleteRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: toBeDeletedShardIds, @@ -227,13 +226,12 @@ func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOpt } -func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, - volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error { +func unmountEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error { fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation) - return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(ctx, &volume_server_pb.VolumeEcShardsUnmountRequest{ + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(context.Background(), &volume_server_pb.VolumeEcShardsUnmountRequest{ VolumeId: uint32(volumeId), ShardIds: toBeUnmountedhardIds, }) @@ -241,13 +239,12 @@ func unmountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, }) } -func mountEcShards(ctx context.Context, grpcDialOption grpc.DialOption, - collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error { +func mountEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error { fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation) - return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, mountErr := volumeServerClient.VolumeEcShardsMount(ctx, &volume_server_pb.VolumeEcShardsMountRequest{ + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: toBeMountedhardIds, diff --git a/weed/shell/command_ec_decode.go b/weed/shell/command_ec_decode.go index 8a705a5ae..b69e403cb 100644 --- a/weed/shell/command_ec_decode.go +++ b/weed/shell/command_ec_decode.go @@ -43,25 +43,24 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } - ctx := context.Background() vid := needle.VolumeId(*volumeId) // collect topology information - topologyInfo, err := collectTopologyInfo(ctx, commandEnv) + topologyInfo, err := collectTopologyInfo(commandEnv) if err != nil { return err } // volumeId is provided if vid != 0 { - return doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid) + return doEcDecode(commandEnv, topologyInfo, *collection, vid) } // apply to all volumes in the collection volumeIds := collectEcShardIds(topologyInfo, *collection) fmt.Printf("ec encode volumes: %v\n", volumeIds) for _, vid := range volumeIds { - if err = doEcDecode(ctx, commandEnv, topologyInfo, *collection, vid); err != nil { + if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil { return err } } @@ -69,26 +68,26 @@ func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } -func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) { +func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) { // find volume location nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid) fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits) // collect ec shards to the server with most space - targetNodeLocation, err := collectEcShards(ctx, commandEnv, nodeToEcIndexBits, collection, vid) + targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid) if err != nil { return fmt.Errorf("collectEcShards for volume %d: %v", vid, err) } // generate a normal volume - err = generateNormalVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation) + err = generateNormalVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation) if err != nil { return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err) } // delete the previous ec shards - err = mountVolumeAndDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid) + err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid) if err != nil { return fmt.Errorf("delete ec shards for volume %d: %v", vid, err) } @@ -96,11 +95,11 @@ func doEcDecode(ctx context.Context, commandEnv *CommandEnv, topoInfo *master_pb return nil } -func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error { +func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error { // mount volume - if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ + if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{ VolumeId: uint32(vid), }) return mountErr @@ -111,7 +110,7 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO // unmount ec shards for location, ecIndexBits := range nodeToEcIndexBits { fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) - err := unmountEcShards(ctx, grpcDialOption, vid, location, ecIndexBits.ToUint32Slice()) + err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice()) if err != nil { return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err) } @@ -119,7 +118,7 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO // delete ec shards for location, ecIndexBits := range nodeToEcIndexBits { fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) - err := sourceServerDeleteEcShards(ctx, grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice()) + err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice()) if err != nil { return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err) } @@ -128,12 +127,12 @@ func mountVolumeAndDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialO return nil } -func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error { +func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error { fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer) - err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, genErr := volumeServerClient.VolumeEcShardsToVolume(ctx, &volume_server_pb.VolumeEcShardsToVolumeRequest{ + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{ VolumeId: uint32(vid), Collection: collection, }) @@ -144,7 +143,7 @@ func generateNormalVolume(ctx context.Context, grpcDialOption grpc.DialOption, v } -func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) { +func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) { maxShardCount := 0 var exisitngEcIndexBits erasure_coding.ShardBits @@ -170,11 +169,11 @@ func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexB continue } - err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation) - _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(vid), Collection: collection, ShardIds: needToCopyEcIndexBits.ToUint32Slice(), @@ -204,11 +203,11 @@ func collectEcShards(ctx context.Context, commandEnv *CommandEnv, nodeToEcIndexB } -func collectTopologyInfo(ctx context.Context, commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) { +func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) { var resp *master_pb.VolumeListResponse - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index 587b59388..e22691c00 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -63,22 +63,21 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } - ctx := context.Background() vid := needle.VolumeId(*volumeId) // volumeId is provided if vid != 0 { - return doEcEncode(ctx, commandEnv, *collection, vid) + return doEcEncode(commandEnv, *collection, vid) } // apply to all volumes in the collection - volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod) + volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod) if err != nil { return err } fmt.Printf("ec encode volumes: %v\n", volumeIds) for _, vid := range volumeIds { - if err = doEcEncode(ctx, commandEnv, *collection, vid); err != nil { + if err = doEcEncode(commandEnv, *collection, vid); err != nil { return err } } @@ -86,7 +85,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr return nil } -func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) { +func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) { // find volume location locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) if !found { @@ -96,19 +95,19 @@ func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, // fmt.Printf("found ec %d shards on %v\n", vid, locations) // mark the volume as readonly - err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) + err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) if err != nil { return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) } // generate ec shards - err = generateEcShards(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url) + err = generateEcShards(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url) if err != nil { return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err) } // balance the ec shards to current cluster - err = spreadEcShards(ctx, commandEnv, vid, collection, locations) + err = spreadEcShards(context.Background(), commandEnv, vid, collection, locations) if err != nil { return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err) } @@ -116,12 +115,12 @@ func doEcEncode(ctx context.Context, commandEnv *CommandEnv, collection string, return nil } -func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error { +func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error { for _, location := range locations { - err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, markErr := volumeServerClient.VolumeMarkReadonly(ctx, &volume_server_pb.VolumeMarkReadonlyRequest{ + err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, markErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{ VolumeId: uint32(volumeId), }) return markErr @@ -136,10 +135,10 @@ func markVolumeReadonly(ctx context.Context, grpcDialOption grpc.DialOption, vol return nil } -func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { +func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { - err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, genErr := volumeServerClient.VolumeEcShardsGenerate(ctx, &volume_server_pb.VolumeEcShardsGenerateRequest{ + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{ VolumeId: uint32(volumeId), Collection: collection, }) @@ -152,7 +151,7 @@ func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volum func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { - allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv, "") + allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "") if err != nil { return err } @@ -169,26 +168,26 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle allocatedEcIds := balancedEcDistribution(allocatedDataNodes) // ask the data nodes to copy from the source volume server - copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0]) + copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0]) if err != nil { return err } // unmount the to be deleted shards - err = unmountEcShards(ctx, commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds) + err = unmountEcShards(commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds) if err != nil { return err } // ask the source volume server to clean up copied ec shards - err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds) + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds) if err != nil { return fmt.Errorf("source delete copied ecShards %s %d.%v: %v", existingLocations[0].Url, volumeId, copiedShardIds, err) } // ask the source volume server to delete the original volume for _, location := range existingLocations { - err = deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, location.Url) + err = deleteVolume(commandEnv.option.GrpcDialOption, volumeId, location.Url) if err != nil { return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, volumeId, err) } @@ -198,9 +197,7 @@ func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle } -func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, - targetServers []*EcNode, allocatedEcIds [][]uint32, - volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { +func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { // parallelize shardIdChan := make(chan []uint32, len(targetServers)) @@ -213,7 +210,7 @@ func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Dia wg.Add(1) go func(server *EcNode, allocatedEcShardIds []uint32) { defer wg.Done() - copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(ctx, grpcDialOption, server, + copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server, allocatedEcShardIds, volumeId, collection, existingLocation.Url) if copyErr != nil { err = copyErr @@ -255,11 +252,11 @@ func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) { return allocated } -func collectVolumeIdsForEcEncode(ctx context.Context, commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { +func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { var resp *master_pb.VolumeListResponse - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go index 600a8cb45..d9d943e6d 100644 --- a/weed/shell/command_ec_rebuild.go +++ b/weed/shell/command_ec_rebuild.go @@ -64,7 +64,7 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W } // collect all ec nodes - allEcNodes, _, err := collectEcNodes(context.Background(), commandEnv, "") + allEcNodes, _, err := collectEcNodes(commandEnv, "") if err != nil { return err } @@ -92,8 +92,6 @@ func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.W func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error { - ctx := context.Background() - fmt.Printf("rebuildEcVolumes %s\n", collection) // collect vid => each shard locations, similar to ecShardMap in topology.go @@ -117,7 +115,7 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s return fmt.Errorf("disk space is not enough") } - if err := rebuildOneEcVolume(ctx, commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil { + if err := rebuildOneEcVolume(commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil { return err } } @@ -125,13 +123,13 @@ func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection s return nil } -func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error { +func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error { fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId) // collect shard files to rebuilder local disk var generatedShardIds []uint32 - copiedShardIds, _, err := prepareDataToRecover(ctx, commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges) + copiedShardIds, _, err := prepareDataToRecover(commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges) if err != nil { return err } @@ -139,7 +137,7 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * // clean up working files // ask the rebuilder to delete the copied shards - err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds) + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds) if err != nil { fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds) } @@ -151,13 +149,13 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * } // generate ec shards, and maybe ecx file - generatedShardIds, err = generateMissingShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id) + generatedShardIds, err = generateMissingShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id) if err != nil { return err } // mount the generated shards - err = mountEcShards(ctx, commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds) + err = mountEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds) if err != nil { return err } @@ -167,11 +165,10 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder * return nil } -func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption, - collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) { +func generateMissingShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) { - err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(ctx, &volume_server_pb.VolumeEcShardsRebuildRequest{ + err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{ VolumeId: uint32(volumeId), Collection: collection, }) @@ -183,7 +180,7 @@ func generateMissingShards(ctx context.Context, grpcDialOption grpc.DialOption, return } -func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) { +func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) { needEcxFile := true var localShardBits erasure_coding.ShardBits @@ -209,8 +206,8 @@ func prepareDataToRecover(ctx context.Context, commandEnv *CommandEnv, rebuilder var copyErr error if applyBalancing { - copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ + copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(volumeId), Collection: collection, ShardIds: []uint32{uint32(shardId)}, diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go index c233d25d0..ddd52303c 100644 --- a/weed/shell/command_ec_test.go +++ b/weed/shell/command_ec_test.go @@ -121,7 +121,7 @@ func TestCommandEcBalanceVolumeEvenButRackUneven(t *testing.T) { racks := collectRacks(allEcNodes) balanceEcVolumes(nil, "c1", allEcNodes, racks, false) - balanceEcRacks(context.Background(), nil, racks, false) + balanceEcRacks(nil, racks, false) } func newEcNode(dc string, rack string, dataNodeId string, freeEcSlot int) *EcNode { diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 06c8232c9..8364e0de1 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -38,21 +38,19 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write return err } - ctx := context.Background() - - if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + if commandEnv.isDirectory(filerServer, filerPort, path) { return fmt.Errorf("%s is a directory", path) } dir, name := filer2.FullPath(path).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, Directory: dir, } - respLookupEntry, err := client.LookupDirectoryEntry(ctx, request) + respLookupEntry, err := client.LookupDirectoryEntry(context.Background(), request) if err != nil { return err } diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go index 408ec86c8..df42cd516 100644 --- a/weed/shell/command_fs_cd.go +++ b/weed/shell/command_fs_cd.go @@ -1,7 +1,6 @@ package shell import ( - "context" "io" ) @@ -45,9 +44,7 @@ func (c *commandFsCd) Do(args []string, commandEnv *CommandEnv, writer io.Writer return nil } - ctx := context.Background() - - err = commandEnv.checkDirectory(ctx, filerServer, filerPort, path) + err = commandEnv.checkDirectory(filerServer, filerPort, path) if err == nil { commandEnv.option.FilerHost = filerServer diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index d6ea51d0c..a1e21bfa6 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" @@ -39,15 +38,13 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer return err } - ctx := context.Background() - - if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + if commandEnv.isDirectory(filerServer, filerPort, path) { path = path + "/" } var blockCount, byteCount uint64 dir, name := filer2.FullPath(path).DirAndName() - blockCount, byteCount, err = duTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name) + blockCount, byteCount, err = duTraverseDirectory(writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name) if name == "" && err == nil { fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) @@ -57,15 +54,15 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer } -func duTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir, name string) (blockCount uint64, byteCount uint64, err error) { +func duTraverseDirectory(writer io.Writer, filerClient filer2.FilerClient, dir, name string) (blockCount, byteCount uint64, err error) { - err = filer2.ReadDirAllEntries(ctx, filerClient, filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { + err = filer2.ReadDirAllEntries(filerClient, filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { if entry.IsDirectory { subDir := fmt.Sprintf("%s/%s", dir, entry.Name) if dir == "/" { subDir = "/" + entry.Name } - numBlock, numByte, err := duTraverseDirectory(ctx, writer, filerClient, subDir, "") + numBlock, numByte, err := duTraverseDirectory(writer, filerClient, subDir, "") if err == nil { blockCount += numBlock byteCount += numByte @@ -82,12 +79,12 @@ func duTraverseDirectory(ctx context.Context, writer io.Writer, filerClient file return } -func (env *CommandEnv) withFilerClient(ctx context.Context, filerServer string, filerPort int64, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { +func (env *CommandEnv) withFilerClient(filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000) - return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(ctx2, client) + return fn(client) }, filerGrpcAddress, env.option.GrpcDialOption) } @@ -105,6 +102,6 @@ func (env *CommandEnv) getFilerClient(filerServer string, filerPort int64) *comm filerPort: filerPort, } } -func (c *commandFilerClient) WithFilerClient(ctx context.Context, fn func(context.Context, filer_pb.SeaweedFilerClient) error) error { - return c.env.withFilerClient(ctx, c.filerServer, c.filerPort, fn) +func (c *commandFilerClient) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + return c.env.withFilerClient(c.filerServer, c.filerPort, fn) } diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 0c63f71fa..69ebe1b30 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" "os" @@ -60,16 +59,14 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer return err } - ctx := context.Background() - - if commandEnv.isDirectory(ctx, filerServer, filerPort, path) { + if commandEnv.isDirectory(filerServer, filerPort, path) { path = path + "/" } dir, name := filer2.FullPath(path).DirAndName() entryCount := 0 - err = filer2.ReadDirAllEntries(ctx, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { + err = filer2.ReadDirAllEntries(commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { if !showHidden && strings.HasPrefix(entry.Name, ".") { return diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go index ec9a495f2..ec5a093df 100644 --- a/weed/shell/command_fs_meta_cat.go +++ b/weed/shell/command_fs_meta_cat.go @@ -41,17 +41,15 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W return err } - ctx := context.Background() - dir, name := filer2.FullPath(path).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, Directory: dir, } - respLookupEntry, err := client.LookupDirectoryEntry(ctx, request) + respLookupEntry, err := client.LookupDirectoryEntry(context.Background(), request) if err != nil { return err } diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go index 8f2ef95e3..ed92d8011 100644 --- a/weed/shell/command_fs_meta_load.go +++ b/weed/shell/command_fs_meta_load.go @@ -1,15 +1,15 @@ package shell import ( - "context" "fmt" "io" "os" + "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" ) func init() { @@ -53,9 +53,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. var dirCount, fileCount uint64 - ctx := context.Background() - - err = commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { sizeBuf := make([]byte, 4) @@ -80,7 +78,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. return err } - if err := filer_pb.CreateEntry(ctx, client, &filer_pb.CreateEntryRequest{ + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: fullEntry.Dir, Entry: fullEntry.Entry, }); err != nil { diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index 178c826d5..7112c7526 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -168,7 +168,7 @@ func processOneDirectory(ctx context.Context, writer io.Writer, filerClient file parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { - return filer2.ReadDirAllEntries(ctx, filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) { + return filer2.ReadDirAllEntries(filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) { fn(parentPath, entry) diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index b9301ad3c..78f797f6c 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -47,20 +47,18 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer return err } - ctx := context.Background() - sourceDir, sourceName := filer2.FullPath(sourcePath).DirAndName() destinationDir, destinationName := filer2.FullPath(destinationPath).DirAndName() - return commandEnv.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { // collect destination entry info destinationRequest := &filer_pb.LookupDirectoryEntryRequest{ Name: destinationDir, Directory: destinationName, } - respDestinationLookupEntry, err := client.LookupDirectoryEntry(ctx, destinationRequest) + respDestinationLookupEntry, err := client.LookupDirectoryEntry(context.Background(), destinationRequest) var targetDir, targetName string @@ -82,7 +80,7 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer NewName: targetName, } - _, err = client.AtomicRenameEntry(ctx, request) + _, err = client.AtomicRenameEntry(context.Background(), request) fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, filer2.NewFullPath(targetDir, targetName)) diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 8660030e3..fb2583240 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -53,7 +53,7 @@ func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient fi prefix.addMarker(level) - err = filer2.ReadDirAllEntries(ctx, filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { + err = filer2.ReadDirAllEntries(filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { if level < 0 && name != "" { if entry.Name != name { return diff --git a/weed/shell/command_volume_balance.go b/weed/shell/command_volume_balance.go index 488beb998..349f52f1c 100644 --- a/weed/shell/command_volume_balance.go +++ b/weed/shell/command_volume_balance.go @@ -69,9 +69,8 @@ func (c *commandVolumeBalance) Do(args []string, commandEnv *CommandEnv, writer } var resp *master_pb.VolumeListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { @@ -239,8 +238,7 @@ func moveVolume(commandEnv *CommandEnv, v *master_pb.VolumeInformationMessage, f } fmt.Fprintf(os.Stdout, "moving volume %s%d %s => %s\n", collectionPrefix, v.Id, fullNode.info.Id, emptyNode.info.Id) if applyBalancing { - ctx := context.Background() - return LiveMoveVolume(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second) + return LiveMoveVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(v.Id), fullNode.info.Id, emptyNode.info.Id, 5*time.Second) } return nil } diff --git a/weed/shell/command_volume_configure_replication.go b/weed/shell/command_volume_configure_replication.go index 6000d0de0..133ec62c6 100644 --- a/weed/shell/command_volume_configure_replication.go +++ b/weed/shell/command_volume_configure_replication.go @@ -53,9 +53,8 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman replicaPlacementInt32 := uint32(replicaPlacement.Byte()) var resp *master_pb.VolumeListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { @@ -81,8 +80,8 @@ func (c *commandVolumeConfigureReplication) Do(args []string, commandEnv *Comman } for _, dst := range allLocations { - err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, configureErr := volumeServerClient.VolumeConfigure(ctx, &volume_server_pb.VolumeConfigureRequest{ + err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, configureErr := volumeServerClient.VolumeConfigure(context.Background(), &volume_server_pb.VolumeConfigureRequest{ VolumeId: uint32(vid), Replication: replicaPlacement.String(), }) diff --git a/weed/shell/command_volume_copy.go b/weed/shell/command_volume_copy.go index 1c83ba655..aecc071ad 100644 --- a/weed/shell/command_volume_copy.go +++ b/weed/shell/command_volume_copy.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" @@ -47,7 +46,6 @@ func (c *commandVolumeCopy) Do(args []string, commandEnv *CommandEnv, writer io. return fmt.Errorf("source and target volume servers are the same!") } - ctx := context.Background() - _, err = copyVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer) + _, err = copyVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer) return } diff --git a/weed/shell/command_volume_delete.go b/weed/shell/command_volume_delete.go index 17d27ea3a..5869b1621 100644 --- a/weed/shell/command_volume_delete.go +++ b/weed/shell/command_volume_delete.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" @@ -42,7 +41,6 @@ func (c *commandVolumeDelete) Do(args []string, commandEnv *CommandEnv, writer i return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) } - ctx := context.Background() - return deleteVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) + return deleteVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) } diff --git a/weed/shell/command_volume_fix_replication.go b/weed/shell/command_volume_fix_replication.go index 7a1a77cbe..210f4819d 100644 --- a/weed/shell/command_volume_fix_replication.go +++ b/weed/shell/command_volume_fix_replication.go @@ -50,9 +50,8 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, } var resp *master_pb.VolumeListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { @@ -113,8 +112,8 @@ func (c *commandVolumeFixReplication) Do(args []string, commandEnv *CommandEnv, break } - err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{ + err := operation.WithVolumeServerClient(dst.dataNode.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{ VolumeId: volumeInfo.Id, SourceDataNode: sourceNode.dataNode.Id, }) diff --git a/weed/shell/command_volume_list.go b/weed/shell/command_volume_list.go index c6c79d150..c5a9388fa 100644 --- a/weed/shell/command_volume_list.go +++ b/weed/shell/command_volume_list.go @@ -32,9 +32,8 @@ func (c *commandVolumeList) Help() string { func (c *commandVolumeList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { var resp *master_pb.VolumeListResponse - ctx := context.Background() - err = commandEnv.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) return err }) if err != nil { diff --git a/weed/shell/command_volume_mount.go b/weed/shell/command_volume_mount.go index 21bc342b4..cffc7136b 100644 --- a/weed/shell/command_volume_mount.go +++ b/weed/shell/command_volume_mount.go @@ -45,14 +45,13 @@ func (c *commandVolumeMount) Do(args []string, commandEnv *CommandEnv, writer io return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) } - ctx := context.Background() - return mountVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) + return mountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) } -func mountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { - return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ +func mountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { + return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{ VolumeId: uint32(volumeId), }) return mountErr diff --git a/weed/shell/command_volume_move.go b/weed/shell/command_volume_move.go index 2e39c0600..c25b953a5 100644 --- a/weed/shell/command_volume_move.go +++ b/weed/shell/command_volume_move.go @@ -59,26 +59,25 @@ func (c *commandVolumeMove) Do(args []string, commandEnv *CommandEnv, writer io. return fmt.Errorf("source and target volume servers are the same!") } - ctx := context.Background() - return LiveMoveVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second) + return LiveMoveVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, 5*time.Second) } // LiveMoveVolume moves one volume from one source volume server to one target volume server, with idleTimeout to drain the incoming requests. -func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration) (err error) { +func LiveMoveVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, idleTimeout time.Duration) (err error) { log.Printf("copying volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer) - lastAppendAtNs, err := copyVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer) + lastAppendAtNs, err := copyVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer) if err != nil { return fmt.Errorf("copy volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err) } log.Printf("tailing volume %d from %s to %s", volumeId, sourceVolumeServer, targetVolumeServer) - if err = tailVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil { + if err = tailVolume(grpcDialOption, volumeId, sourceVolumeServer, targetVolumeServer, lastAppendAtNs, idleTimeout); err != nil { return fmt.Errorf("tail volume %d from %s to %s: %v", volumeId, sourceVolumeServer, targetVolumeServer, err) } log.Printf("deleting volume %d from %s", volumeId, sourceVolumeServer) - if err = deleteVolume(ctx, grpcDialOption, volumeId, sourceVolumeServer); err != nil { + if err = deleteVolume(grpcDialOption, volumeId, sourceVolumeServer); err != nil { return fmt.Errorf("delete volume %d from %s: %v", volumeId, sourceVolumeServer, err) } @@ -86,10 +85,10 @@ func LiveMoveVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeI return nil } -func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) { +func copyVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string) (lastAppendAtNs uint64, err error) { - err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, replicateErr := volumeServerClient.VolumeCopy(ctx, &volume_server_pb.VolumeCopyRequest{ + err = operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, replicateErr := volumeServerClient.VolumeCopy(context.Background(), &volume_server_pb.VolumeCopyRequest{ VolumeId: uint32(volumeId), SourceDataNode: sourceVolumeServer, }) @@ -102,10 +101,10 @@ func copyVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne return } -func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) { +func tailVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer, targetVolumeServer string, lastAppendAtNs uint64, idleTimeout time.Duration) (err error) { - return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, replicateErr := volumeServerClient.VolumeTailReceiver(ctx, &volume_server_pb.VolumeTailReceiverRequest{ + return operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, replicateErr := volumeServerClient.VolumeTailReceiver(context.Background(), &volume_server_pb.VolumeTailReceiverRequest{ VolumeId: uint32(volumeId), SinceNs: lastAppendAtNs, IdleTimeoutSeconds: uint32(idleTimeout.Seconds()), @@ -116,9 +115,9 @@ func tailVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId ne } -func deleteVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { - return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, deleteErr := volumeServerClient.VolumeDelete(ctx, &volume_server_pb.VolumeDeleteRequest{ +func deleteVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { + return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, deleteErr := volumeServerClient.VolumeDelete(context.Background(), &volume_server_pb.VolumeDeleteRequest{ VolumeId: uint32(volumeId), }) return deleteErr diff --git a/weed/shell/command_volume_tier_download.go b/weed/shell/command_volume_tier_download.go index 0f1a1bb6e..756dc4686 100644 --- a/weed/shell/command_volume_tier_download.go +++ b/weed/shell/command_volume_tier_download.go @@ -49,18 +49,17 @@ func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, wr return nil } - ctx := context.Background() vid := needle.VolumeId(*volumeId) // collect topology information - topologyInfo, err := collectTopologyInfo(ctx, commandEnv) + topologyInfo, err := collectTopologyInfo(commandEnv) if err != nil { return err } // volumeId is provided if vid != 0 { - return doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid) + return doVolumeTierDownload(commandEnv, writer, *collection, vid) } // apply to all volumes in the collection @@ -71,7 +70,7 @@ func (c *commandVolumeTierDownload) Do(args []string, commandEnv *CommandEnv, wr } fmt.Printf("tier download volumes: %v\n", volumeIds) for _, vid := range volumeIds { - if err = doVolumeTierDownload(ctx, commandEnv, writer, *collection, vid); err != nil { + if err = doVolumeTierDownload(commandEnv, writer, *collection, vid); err != nil { return err } } @@ -97,7 +96,7 @@ func collectRemoteVolumes(topoInfo *master_pb.TopologyInfo, selectedCollection s return } -func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) { +func doVolumeTierDownload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId) (err error) { // find volume location locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) if !found { @@ -107,7 +106,7 @@ func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io // TODO parallelize this for _, loc := range locations { // copy the .dat file from remote tier to local - err = downloadDatFromRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url) + err = downloadDatFromRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, loc.Url) if err != nil { return fmt.Errorf("download dat file for volume %d to %s: %v", vid, loc.Url, err) } @@ -116,10 +115,10 @@ func doVolumeTierDownload(ctx context.Context, commandEnv *CommandEnv, writer io return nil } -func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error { +func downloadDatFromRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, targetVolumeServer string) error { - err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(ctx, &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{ + err := operation.WithVolumeServerClient(targetVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + stream, downloadErr := volumeServerClient.VolumeTierMoveDatFromRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatFromRemoteRequest{ VolumeId: uint32(volumeId), Collection: collection, }) @@ -145,14 +144,14 @@ func downloadDatFromRemoteTier(ctx context.Context, grpcDialOption grpc.DialOpti return downloadErr } - _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{ + _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{ VolumeId: uint32(volumeId), }) if unmountErr != nil { return unmountErr } - _, mountErr := volumeServerClient.VolumeMount(ctx, &volume_server_pb.VolumeMountRequest{ + _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{ VolumeId: uint32(volumeId), }) if mountErr != nil { diff --git a/weed/shell/command_volume_tier_upload.go b/weed/shell/command_volume_tier_upload.go index 20da1187c..5131e8f85 100644 --- a/weed/shell/command_volume_tier_upload.go +++ b/weed/shell/command_volume_tier_upload.go @@ -67,23 +67,22 @@ func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writ return nil } - ctx := context.Background() vid := needle.VolumeId(*volumeId) // volumeId is provided if vid != 0 { - return doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile) + return doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile) } // apply to all volumes in the collection // reusing collectVolumeIdsForEcEncode for now - volumeIds, err := collectVolumeIdsForEcEncode(ctx, commandEnv, *collection, *fullPercentage, *quietPeriod) + volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod) if err != nil { return err } fmt.Printf("tier upload volumes: %v\n", volumeIds) for _, vid := range volumeIds { - if err = doVolumeTierUpload(ctx, commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil { + if err = doVolumeTierUpload(commandEnv, writer, *collection, vid, *dest, *keepLocalDatFile); err != nil { return err } } @@ -91,20 +90,20 @@ func (c *commandVolumeTierUpload) Do(args []string, commandEnv *CommandEnv, writ return nil } -func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) { +func doVolumeTierUpload(commandEnv *CommandEnv, writer io.Writer, collection string, vid needle.VolumeId, dest string, keepLocalDatFile bool) (err error) { // find volume location locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) if !found { return fmt.Errorf("volume %d not found", vid) } - err = markVolumeReadonly(ctx, commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) + err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) if err != nil { return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) } // copy the .dat file to remote tier - err = uploadDatToRemoteTier(ctx, commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile) + err = uploadDatToRemoteTier(commandEnv.option.GrpcDialOption, writer, needle.VolumeId(vid), collection, locations[0].Url, dest, keepLocalDatFile) if err != nil { return fmt.Errorf("copy dat file for volume %d on %s to %s: %v", vid, locations[0].Url, dest, err) } @@ -112,10 +111,10 @@ func doVolumeTierUpload(ctx context.Context, commandEnv *CommandEnv, writer io.W return nil } -func uploadDatToRemoteTier(ctx context.Context, grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error { +func uploadDatToRemoteTier(grpcDialOption grpc.DialOption, writer io.Writer, volumeId needle.VolumeId, collection string, sourceVolumeServer string, dest string, keepLocalDatFile bool) error { - err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(ctx, &volume_server_pb.VolumeTierMoveDatToRemoteRequest{ + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + stream, copyErr := volumeServerClient.VolumeTierMoveDatToRemote(context.Background(), &volume_server_pb.VolumeTierMoveDatToRemoteRequest{ VolumeId: uint32(volumeId), Collection: collection, DestinationBackendName: dest, diff --git a/weed/shell/command_volume_unmount.go b/weed/shell/command_volume_unmount.go index 826258dfb..6e5bef485 100644 --- a/weed/shell/command_volume_unmount.go +++ b/weed/shell/command_volume_unmount.go @@ -45,14 +45,13 @@ func (c *commandVolumeUnmount) Do(args []string, commandEnv *CommandEnv, writer return fmt.Errorf("wrong volume id format %s: %v", volumeId, err) } - ctx := context.Background() - return unmountVolume(ctx, commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) + return unmountVolume(commandEnv.option.GrpcDialOption, volumeId, sourceVolumeServer) } -func unmountVolume(ctx context.Context, grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { - return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, unmountErr := volumeServerClient.VolumeUnmount(ctx, &volume_server_pb.VolumeUnmountRequest{ +func unmountVolume(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceVolumeServer string) (err error) { + return operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, unmountErr := volumeServerClient.VolumeUnmount(context.Background(), &volume_server_pb.VolumeUnmountRequest{ VolumeId: uint32(volumeId), }) return unmountErr diff --git a/weed/shell/commands.go b/weed/shell/commands.go index f1fcb62d4..31ca31bc3 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -44,7 +44,7 @@ var ( func NewCommandEnv(options ShellOptions) *CommandEnv { return &CommandEnv{ env: make(map[string]string), - MasterClient: wdclient.NewMasterClient(context.Background(), + MasterClient: wdclient.NewMasterClient( options.GrpcDialOption, "shell", strings.Split(*options.Masters, ",")), option: options, } @@ -60,19 +60,19 @@ func (ce *CommandEnv) parseUrl(input string) (filerServer string, filerPort int6 return ce.option.FilerHost, ce.option.FilerPort, input, err } -func (ce *CommandEnv) isDirectory(ctx context.Context, filerServer string, filerPort int64, path string) bool { +func (ce *CommandEnv) isDirectory(filerServer string, filerPort int64, path string) bool { - return ce.checkDirectory(ctx, filerServer, filerPort, path) == nil + return ce.checkDirectory(filerServer, filerPort, path) == nil } -func (ce *CommandEnv) checkDirectory(ctx context.Context, filerServer string, filerPort int64, path string) error { +func (ce *CommandEnv) checkDirectory(filerServer string, filerPort int64, path string) error { dir, name := filer2.FullPath(path).DirAndName() - return ce.withFilerClient(ctx, filerServer, filerPort, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error { + return ce.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - resp, lookupErr := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{ + resp, lookupErr := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, Name: name, }) diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go index 47e061d05..e423e7dca 100644 --- a/weed/storage/store_ec.go +++ b/weed/storage/store_ec.go @@ -116,7 +116,7 @@ func (s *Store) DestroyEcVolume(vid needle.VolumeId) { } } -func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *needle.Needle) (int, error) { +func (s *Store) ReadEcShardNeedle(vid needle.VolumeId, n *needle.Needle) (int, error) { for _, location := range s.Locations { if localEcVolume, found := location.FindEcVolume(vid); found { @@ -133,7 +133,7 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n if len(intervals) > 1 { glog.V(3).Infof("ReadEcShardNeedle needle id %s intervals:%+v", n.String(), intervals) } - bytes, isDeleted, err := s.readEcShardIntervals(ctx, vid, n.Id, localEcVolume, intervals) + bytes, isDeleted, err := s.readEcShardIntervals(vid, n.Id, localEcVolume, intervals) if err != nil { return 0, fmt.Errorf("ReadEcShardIntervals: %v", err) } @@ -152,14 +152,14 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n return 0, fmt.Errorf("ec shard %d not found", vid) } -func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, is_deleted bool, err error) { +func (s *Store) readEcShardIntervals(vid needle.VolumeId, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, intervals []erasure_coding.Interval) (data []byte, is_deleted bool, err error) { - if err = s.cachedLookupEcShardLocations(ctx, ecVolume); err != nil { + if err = s.cachedLookupEcShardLocations(ecVolume); err != nil { return nil, false, fmt.Errorf("failed to locate shard via master grpc %s: %v", s.MasterAddress, err) } for i, interval := range intervals { - if d, isDeleted, e := s.readOneEcShardInterval(ctx, needleId, ecVolume, interval); e != nil { + if d, isDeleted, e := s.readOneEcShardInterval(needleId, ecVolume, interval); e != nil { return nil, isDeleted, e } else { if isDeleted { @@ -175,7 +175,7 @@ func (s *Store) readEcShardIntervals(ctx context.Context, vid needle.VolumeId, n return } -func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, is_deleted bool, err error) { +func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, interval erasure_coding.Interval) (data []byte, is_deleted bool, err error) { shardId, actualOffset := interval.ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize) data = make([]byte, interval.Size) if shard, found := ecVolume.FindEcVolumeShard(shardId); found { @@ -190,7 +190,7 @@ func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.Needl // try reading directly if hasShardIdLocation { - _, is_deleted, err = s.readRemoteEcShardInterval(ctx, sourceDataNodes, needleId, ecVolume.VolumeId, shardId, data, actualOffset) + _, is_deleted, err = s.readRemoteEcShardInterval(sourceDataNodes, needleId, ecVolume.VolumeId, shardId, data, actualOffset) if err == nil { return } @@ -199,7 +199,7 @@ func (s *Store) readOneEcShardInterval(ctx context.Context, needleId types.Needl } // try reading by recovering from other shards - _, is_deleted, err = s.recoverOneRemoteEcShardInterval(ctx, needleId, ecVolume, shardId, data, actualOffset) + _, is_deleted, err = s.recoverOneRemoteEcShardInterval(needleId, ecVolume, shardId, data, actualOffset) if err == nil { return } @@ -215,7 +215,7 @@ func forgetShardId(ecVolume *erasure_coding.EcVolume, shardId erasure_coding.Sha ecVolume.ShardLocationsLock.Unlock() } -func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *erasure_coding.EcVolume) (err error) { +func (s *Store) cachedLookupEcShardLocations(ecVolume *erasure_coding.EcVolume) (err error) { shardCount := len(ecVolume.ShardLocations) if shardCount < erasure_coding.DataShardsCount && @@ -230,11 +230,11 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras glog.V(3).Infof("lookup and cache ec volume %d locations", ecVolume.VolumeId) - err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(ctx context.Context, masterClient master_pb.SeaweedClient) error { + err = operation.WithMasterServerClient(s.MasterAddress, s.grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupEcVolumeRequest{ VolumeId: uint32(ecVolume.VolumeId), } - resp, err := masterClient.LookupEcVolume(ctx, req) + resp, err := masterClient.LookupEcVolume(context.Background(), req) if err != nil { return fmt.Errorf("lookup ec volume %d: %v", ecVolume.VolumeId, err) } @@ -258,7 +258,7 @@ func (s *Store) cachedLookupEcShardLocations(ctx context.Context, ecVolume *eras return } -func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes []string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { +func (s *Store) readRemoteEcShardInterval(sourceDataNodes []string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { if len(sourceDataNodes) == 0 { return 0, false, fmt.Errorf("failed to find ec shard %d.%d", vid, shardId) @@ -266,7 +266,7 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [ for _, sourceDataNode := range sourceDataNodes { glog.V(3).Infof("read remote ec shard %d.%d from %s", vid, shardId, sourceDataNode) - n, is_deleted, err = s.doReadRemoteEcShardInterval(ctx, sourceDataNode, needleId, vid, shardId, buf, offset) + n, is_deleted, err = s.doReadRemoteEcShardInterval(sourceDataNode, needleId, vid, shardId, buf, offset) if err == nil { return } @@ -276,12 +276,12 @@ func (s *Store) readRemoteEcShardInterval(ctx context.Context, sourceDataNodes [ return } -func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { +func (s *Store) doReadRemoteEcShardInterval(sourceDataNode string, needleId types.NeedleId, vid needle.VolumeId, shardId erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { - err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { // copy data slice - shardReadClient, err := client.VolumeEcShardRead(ctx, &volume_server_pb.VolumeEcShardReadRequest{ + shardReadClient, err := client.VolumeEcShardRead(context.Background(), &volume_server_pb.VolumeEcShardReadRequest{ VolumeId: uint32(vid), ShardId: uint32(shardId), Offset: offset, @@ -316,7 +316,7 @@ func (s *Store) doReadRemoteEcShardInterval(ctx context.Context, sourceDataNode return } -func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { +func (s *Store) recoverOneRemoteEcShardInterval(needleId types.NeedleId, ecVolume *erasure_coding.EcVolume, shardIdToRecover erasure_coding.ShardId, buf []byte, offset int64) (n int, is_deleted bool, err error) { glog.V(3).Infof("recover ec shard %d.%d from other locations", ecVolume.VolumeId, shardIdToRecover) enc, err := reedsolomon.New(erasure_coding.DataShardsCount, erasure_coding.ParityShardsCount) @@ -344,7 +344,7 @@ func (s *Store) recoverOneRemoteEcShardInterval(ctx context.Context, needleId ty go func(shardId erasure_coding.ShardId, locations []string) { defer wg.Done() data := make([]byte, len(buf)) - nRead, isDeleted, readErr := s.readRemoteEcShardInterval(ctx, locations, needleId, ecVolume.VolumeId, shardId, data, offset) + nRead, isDeleted, readErr := s.readRemoteEcShardInterval(locations, needleId, ecVolume.VolumeId, shardId, data, offset) if readErr != nil { glog.V(3).Infof("recover: readRemoteEcShardInterval %d.%d %d bytes from %+v: %v", ecVolume.VolumeId, shardId, nRead, locations, readErr) forgetShardId(ecVolume, shardId) diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go index 2ac907f6c..7e2d9e364 100644 --- a/weed/storage/store_ec_delete.go +++ b/weed/storage/store_ec_delete.go @@ -14,7 +14,7 @@ import ( func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) { - count, err := s.ReadEcShardNeedle(ctx, ecVolume.VolumeId, n) + count, err := s.ReadEcShardNeedle(ecVolume.VolumeId, n) if err != nil { return 0, err @@ -24,7 +24,7 @@ func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_codin return 0, fmt.Errorf("unexpected cookie %x", cookie) } - if err = s.doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx, ecVolume, n.Id); err != nil { + if err = s.doDeleteNeedleFromAtLeastOneRemoteEcShards(ecVolume, n.Id); err != nil { return 0, err } @@ -32,7 +32,7 @@ func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_codin } -func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { +func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { _, _, intervals, err := ecVolume.LocateEcShardNeedle(needleId, ecVolume.Version) @@ -43,13 +43,13 @@ func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context, shardId, _ := intervals[0].ToShardIdAndOffset(erasure_coding.ErasureCodingLargeBlockSize, erasure_coding.ErasureCodingSmallBlockSize) hasDeletionSuccess := false - err = s.doDeleteNeedleFromRemoteEcShardServers(ctx, shardId, ecVolume, needleId) + err = s.doDeleteNeedleFromRemoteEcShardServers(shardId, ecVolume, needleId) if err == nil { hasDeletionSuccess = true } for shardId = erasure_coding.DataShardsCount; shardId < erasure_coding.TotalShardsCount; shardId++ { - if parityDeletionError := s.doDeleteNeedleFromRemoteEcShardServers(ctx, shardId, ecVolume, needleId); parityDeletionError == nil { + if parityDeletionError := s.doDeleteNeedleFromRemoteEcShardServers(shardId, ecVolume, needleId); parityDeletionError == nil { hasDeletionSuccess = true } } @@ -62,7 +62,7 @@ func (s *Store) doDeleteNeedleFromAtLeastOneRemoteEcShards(ctx context.Context, } -func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shardId erasure_coding.ShardId, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { +func (s *Store) doDeleteNeedleFromRemoteEcShardServers(shardId erasure_coding.ShardId, ecVolume *erasure_coding.EcVolume, needleId types.NeedleId) error { ecVolume.ShardLocationsLock.RLock() sourceDataNodes, hasShardLocations := ecVolume.ShardLocations[shardId] @@ -74,7 +74,7 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shar for _, sourceDataNode := range sourceDataNodes { glog.V(4).Infof("delete from remote ec shard %d.%d from %s", ecVolume.VolumeId, shardId, sourceDataNode) - err := s.doDeleteNeedleFromRemoteEcShard(ctx, sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId) + err := s.doDeleteNeedleFromRemoteEcShard(sourceDataNode, ecVolume.VolumeId, ecVolume.Collection, ecVolume.Version, needleId) if err != nil { return err } @@ -85,12 +85,12 @@ func (s *Store) doDeleteNeedleFromRemoteEcShardServers(ctx context.Context, shar } -func (s *Store) doDeleteNeedleFromRemoteEcShard(ctx context.Context, sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error { +func (s *Store) doDeleteNeedleFromRemoteEcShard(sourceDataNode string, vid needle.VolumeId, collection string, version needle.Version, needleId types.NeedleId) error { - return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(sourceDataNode, s.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { // copy data slice - _, err := client.VolumeEcBlobDelete(ctx, &volume_server_pb.VolumeEcBlobDeleteRequest{ + _, err := client.VolumeEcBlobDelete(context.Background(), &volume_server_pb.VolumeEcBlobDeleteRequest{ VolumeId: uint32(vid), Collection: collection, FileKey: uint64(needleId), diff --git a/weed/storage/volume_backup.go b/weed/storage/volume_backup.go index 3763d5515..f7075fe2b 100644 --- a/weed/storage/volume_backup.go +++ b/weed/storage/volume_backup.go @@ -72,9 +72,9 @@ func (v *Volume) IncrementalBackup(volumeServer string, grpcDialOption grpc.Dial writeOffset := int64(startFromOffset) - err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - stream, err := client.VolumeIncrementalCopy(ctx, &volume_server_pb.VolumeIncrementalCopyRequest{ + stream, err := client.VolumeIncrementalCopy(context.Background(), &volume_server_pb.VolumeIncrementalCopyRequest{ VolumeId: uint32(v.Id), SinceNs: appendAtNs, }) diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go index 6ca987bc5..e5dc48652 100644 --- a/weed/topology/allocate_volume.go +++ b/weed/topology/allocate_volume.go @@ -15,7 +15,7 @@ type AllocateVolumeResult struct { func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid needle.VolumeId, option *VolumeGrowOption) error { - return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, client volume_server_pb.VolumeServerClient) error { + return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { _, deleteErr := client.AllocateVolume(context.Background(), &volume_server_pb.AllocateVolumeRequest{ VolumeId: uint32(vid), diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index e7dbf9b1e..ca626e973 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -19,8 +19,8 @@ func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vi errCount := int32(0) for index, dn := range locationlist.list { go func(index int, url string, vid needle.VolumeId) { - err := operation.WithVolumeServerClient(url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - resp, err := volumeServerClient.VacuumVolumeCheck(ctx, &volume_server_pb.VacuumVolumeCheckRequest{ + err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, err := volumeServerClient.VacuumVolumeCheck(context.Background(), &volume_server_pb.VacuumVolumeCheckRequest{ VolumeId: uint32(vid), }) if err != nil { @@ -63,8 +63,8 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, for index, dn := range locationlist.list { go func(index int, url string, vid needle.VolumeId) { glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) - err := operation.WithVolumeServerClient(url, grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCompact(ctx, &volume_server_pb.VacuumVolumeCompactRequest{ + err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{ VolumeId: uint32(vid), }) return err @@ -93,8 +93,8 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v isCommitSuccess := true for _, dn := range locationlist.list { glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) - err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCommit(ctx, &volume_server_pb.VacuumVolumeCommitRequest{ + err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ VolumeId: uint32(vid), }) return err @@ -114,8 +114,8 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) { for _, dn := range locationlist.list { glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url()) - err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(ctx context.Context, volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCleanup(ctx, &volume_server_pb.VacuumVolumeCleanupRequest{ + err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{ VolumeId: uint32(vid), }) return err diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index 7e396342b..4dace5e8b 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -57,14 +57,14 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr return grpc.DialContext(ctx, address, options...) } -func WithCachedGrpcClient(ctx context.Context, fn func(context.Context, *grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { +func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { grpcClientsLock.Lock() existingConnection, found := grpcClients[address] if found { grpcClientsLock.Unlock() - err := fn(ctx, existingConnection) + err := fn(existingConnection) if err != nil { grpcClientsLock.Lock() delete(grpcClients, address) @@ -74,7 +74,7 @@ func WithCachedGrpcClient(ctx context.Context, fn func(context.Context, *grpc.Cl return err } - grpcConnection, err := GrpcDial(ctx, address, opts...) + grpcConnection, err := GrpcDial(context.Background(), address, opts...) if err != nil { grpcClientsLock.Unlock() return fmt.Errorf("fail to dial %s: %v", address, err) @@ -83,7 +83,7 @@ func WithCachedGrpcClient(ctx context.Context, fn func(context.Context, *grpc.Cl grpcClients[address] = grpcConnection grpcClientsLock.Unlock() - err = fn(ctx, grpcConnection) + err = fn(grpcConnection) if err != nil { grpcClientsLock.Lock() delete(grpcClients, address) diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 30b0cf160..f197fa6f2 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -13,7 +13,6 @@ import ( ) type MasterClient struct { - ctx context.Context name string currentMaster string masters []string @@ -22,9 +21,8 @@ type MasterClient struct { vidMap } -func NewMasterClient(ctx context.Context, grpcDialOption grpc.DialOption, clientName string, masters []string) *MasterClient { +func NewMasterClient(grpcDialOption grpc.DialOption, clientName string, masters []string) *MasterClient { return &MasterClient{ - ctx: ctx, name: clientName, masters: masters, grpcDialOption: grpcDialOption, @@ -66,9 +64,9 @@ func (mc *MasterClient) tryAllMasters() { func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader string) { glog.V(1).Infof("%s Connecting to master %v", mc.name, master) - gprcErr := withMasterClient(context.Background(), master, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { + gprcErr := withMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { - stream, err := client.KeepConnected(ctx) + stream, err := client.KeepConnected(context.Background()) if err != nil { glog.V(0).Infof("%s failed to keep connected to %s: %v", mc.name, master, err) return err @@ -118,22 +116,22 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri return } -func withMasterClient(ctx context.Context, master string, grpcDialOption grpc.DialOption, fn func(ctx context.Context, client master_pb.SeaweedClient) error) error { +func withMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) if parseErr != nil { return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) } - return util.WithCachedGrpcClient(ctx, func(ctx2 context.Context, grpcConnection *grpc.ClientConn) error { + return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) - return fn(ctx2, client) + return fn(client) }, masterGrpcAddress, grpcDialOption) } -func (mc *MasterClient) WithClient(ctx context.Context, fn func(client master_pb.SeaweedClient) error) error { - return withMasterClient(ctx, mc.currentMaster, mc.grpcDialOption, func(ctx context.Context, client master_pb.SeaweedClient) error { +func (mc *MasterClient) WithClient(fn func(client master_pb.SeaweedClient) error) error { + return withMasterClient(mc.currentMaster, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { return fn(client) }) } From ca4ca1ae6f87c6fcbf9c486722f36b4f07166187 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 21:56:15 -0800 Subject: [PATCH 0155/2432] update docker local dev env --- docker/Dockerfile.go_build | 12 +++--------- docker/Makefile | 8 ++++++++ docker/README.md | 4 +--- 3 files changed, 12 insertions(+), 12 deletions(-) create mode 100644 docker/Makefile diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index 306ce3aa1..b4a7b6504 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -1,15 +1,9 @@ -FROM frolvlad/alpine-glibc as builder -RUN apk add git go g++ -RUN mkdir -p /go/src/github.com/chrislusf/ -RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs -RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install - FROM alpine AS final LABEL author="Chris Lu" -COPY --from=builder /root/go/bin/weed /usr/bin/ +COPY ./weed /usr/bin/ RUN mkdir -p /etc/seaweedfs -COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh +COPY ./filer.toml /etc/seaweedfs/filer.toml +COPY ./entrypoint.sh /entrypoint.sh # volume server gprc port EXPOSE 18080 diff --git a/docker/Makefile b/docker/Makefile new file mode 100644 index 000000000..4993fb2a5 --- /dev/null +++ b/docker/Makefile @@ -0,0 +1,8 @@ +all: gen + +.PHONY : gen + +gen: + cd ../weed; GOOS=linux go build; mv weed ../docker/ + docker-compose -f local-dev-compose.yml -p seaweedfs up + rm ./weed diff --git a/docker/README.md b/docker/README.md index 1a2833c7e..65241b517 100644 --- a/docker/README.md +++ b/docker/README.md @@ -25,7 +25,5 @@ docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up ```bash cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker - -docker-compose -f local-dev-compose.yml -p seaweedfs up - +make ``` From 97ab8a1976f3ba056af8d5b630dcb43006425b51 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 22:23:59 -0800 Subject: [PATCH 0156/2432] remove ctx if possible --- weed/command/filer_copy.go | 16 +++++----- weed/command/s3.go | 3 +- weed/filesys/dirty_page.go | 8 ++--- weed/filesys/filehandle.go | 12 +++---- .../gocdk_pub_sub/gocdk_pub_sub.go | 3 +- weed/replication/sink/b2sink/b2_sink.go | 3 +- weed/replication/sink/gcssink/gcs_sink.go | 3 +- weed/replication/sink/s3sink/s3_sink.go | 2 +- weed/replication/sink/s3sink/s3_write.go | 6 ++-- weed/s3api/filer_multipart.go | 31 +++++++++---------- weed/s3api/filer_util.go | 22 ++++++------- weed/s3api/s3api_bucket_handlers.go | 13 +++----- weed/s3api/s3api_object_handlers.go | 3 +- weed/s3api/s3api_object_multipart_handlers.go | 15 ++++----- weed/s3api/s3api_objects_list_handlers.go | 12 +++---- weed/server/filer_grpc_server.go | 3 +- weed/server/volume_grpc_client_to_master.go | 8 ++--- weed/server/volume_grpc_copy.go | 11 +++---- weed/server/volume_grpc_erasure_coding.go | 8 ++--- weed/server/volume_server_handlers_write.go | 3 +- weed/shell/command_ec_balance.go | 7 ++--- weed/shell/command_ec_encode.go | 4 +-- weed/shell/command_fs_meta_notify.go | 5 +-- weed/shell/command_fs_meta_save.go | 14 +++------ weed/shell/command_fs_tree.go | 9 ++---- weed/storage/store_ec_delete.go | 2 +- 26 files changed, 94 insertions(+), 132 deletions(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 2edceb715..18f41048b 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -107,9 +107,7 @@ func runCopy(cmd *Command, args []string) bool { filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") - ctx := context.Background() - - masters, collection, replication, maxMB, err := readFilerConfiguration(ctx, copy.grpcDialOption, filerGrpcAddress) + masters, collection, replication, maxMB, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress) if err != nil { fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err) return false @@ -149,7 +147,7 @@ func runCopy(cmd *Command, args []string) bool { filerHost: filerUrl.Host, filerGrpcAddress: filerGrpcAddress, } - if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil { + if err := worker.copyFiles(fileCopyTaskChan); err != nil { fmt.Fprintf(os.Stderr, "copy file error: %v\n", err) return } @@ -160,9 +158,9 @@ func runCopy(cmd *Command, args []string) bool { return true } -func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) { +func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) { err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) } @@ -211,9 +209,9 @@ type FileCopyWorker struct { filerGrpcAddress string } -func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error { +func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error { for task := range fileCopyTaskChan { - if err := worker.doEachCopy(ctx, task); err != nil { + if err := worker.doEachCopy(task); err != nil { return err } } @@ -229,7 +227,7 @@ type FileCopyTask struct { gid uint32 } -func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error { +func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error { f, err := os.Open(task.sourceLocation) if err != nil { diff --git a/weed/command/s3.go b/weed/command/s3.go index abd32e545..9e8236a83 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -126,10 +126,9 @@ func (s3opt *S3Options) startS3Server() bool { filerBucketsPath := "/buckets" grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") - ctx := context.Background() err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) } diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 3fbc7dca7..9b0f96951 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -34,7 +34,7 @@ func (pages *ContinuousDirtyPages) releaseResource() { var counter = int32(0) -func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { pages.lock.Lock() defer pages.lock.Unlock() @@ -43,7 +43,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) { // this is more than what buffer can hold. - return pages.flushAndSave(ctx, offset, data) + return pages.flushAndSave(offset, data) } pages.intervals.AddInterval(data, offset) @@ -61,7 +61,7 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da return } -func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { var chunk *filer_pb.FileChunk var newChunks []*filer_pb.FileChunk @@ -206,7 +206,7 @@ func min(x, y int64) int64 { return y } -func (pages *ContinuousDirtyPages) ReadDirtyData(ctx context.Context, data []byte, startOffset int64) (offset int64, size int) { +func (pages *ContinuousDirtyPages) ReadDirtyData(data []byte, startOffset int64) (offset int64, size int) { pages.lock.Lock() defer pages.lock.Unlock() diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 7b26c5952..100c9eba0 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -53,9 +53,9 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus buff := make([]byte, req.Size) - totalRead, err := fh.readFromChunks(ctx, buff, req.Offset) + totalRead, err := fh.readFromChunks(buff, req.Offset) if err == nil { - dirtyOffset, dirtySize := fh.readFromDirtyPages(ctx, buff, req.Offset) + dirtyOffset, dirtySize := fh.readFromDirtyPages(buff, req.Offset) if totalRead+req.Offset < dirtyOffset+int64(dirtySize) { totalRead = dirtyOffset + int64(dirtySize) - req.Offset } @@ -71,11 +71,11 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus return err } -func (fh *FileHandle) readFromDirtyPages(ctx context.Context, buff []byte, startOffset int64) (offset int64, size int) { - return fh.dirtyPages.ReadDirtyData(ctx, buff, startOffset) +func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (offset int64, size int) { + return fh.dirtyPages.ReadDirtyData(buff, startOffset) } -func (fh *FileHandle) readFromChunks(ctx context.Context, buff []byte, offset int64) (int64, error) { +func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { // this value should come from the filer instead of the old f if len(fh.f.entry.Chunks) == 0 { @@ -106,7 +106,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(req.Data)), int64(fh.f.entry.Attributes.FileSize))) // glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data))) - chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) + chunks, err := fh.dirtyPages.AddPage(req.Offset, req.Data) if err != nil { glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) return fuse.EIO diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go index 706261b3a..1ae102509 100644 --- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -60,8 +60,7 @@ func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error { if err != nil { return err } - ctx := context.Background() - err = k.topic.Send(ctx, &pubsub.Message{ + err = k.topic.Send(context.Background(), &pubsub.Message{ Body: bytes, Metadata: map[string]string{"key": key}, }) diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index d230719ce..e687170dd 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -45,8 +45,7 @@ func (g *B2Sink) SetSourceFiler(s *source.FilerSource) { } func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { - ctx := context.Background() - client, err := b2.NewClient(ctx, accountId, accountKey) + client, err := b2.NewClient(context.Background(), accountId, accountKey) if err != nil { return err } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index 31dd5a85a..d5b1e137f 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -50,7 +50,6 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str g.bucket = bucketName g.dir = dir - ctx := context.Background() // Creates a client. if google_application_credentials == "" { var found bool @@ -59,7 +58,7 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml") } } - client, err := storage.NewClient(ctx, option.WithCredentialsFile(google_application_credentials)) + client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials)) if err != nil { glog.Fatalf("Failed to create client: %v", err) } diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index fc3e6cd4d..5f548559b 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -113,7 +113,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { wg.Add(1) go func(chunk *filer2.ChunkView) { defer wg.Done() - if part, uploadErr := s3sink.uploadPart(context.Background(), key, uploadId, partId, chunk); uploadErr != nil { + if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { err = uploadErr } else { parts = append(parts, part) diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index 9386169b8..60885c30c 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -103,10 +103,10 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId } // To upload a part -func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { +func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { var readSeeker io.ReadSeeker - readSeeker, err := s3sink.buildReadSeeker(ctx, chunk) + readSeeker, err := s3sink.buildReadSeeker(chunk) if err != nil { glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) @@ -156,7 +156,7 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou return err } -func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) { +func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, error) { fileUrl, err := s3sink.filerSource.LookupFileId(chunk.FileId) if err != nil { return nil, err diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index d3bde66ee..792127771 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -1,7 +1,6 @@ package s3api import ( - "context" "encoding/xml" "fmt" "path/filepath" @@ -11,10 +10,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" + "github.com/google/uuid" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/google/uuid" ) type InitiateMultipartUploadResult struct { @@ -22,11 +22,11 @@ type InitiateMultipartUploadResult struct { s3.CreateMultipartUploadOutput } -func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { uploadId, _ := uuid.NewRandom() uploadIdString := uploadId.String() - if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { + if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { if entry.Extended == nil { entry.Extended = make(map[string][]byte) } @@ -52,11 +52,11 @@ type CompleteMultipartUploadResult struct { s3.CompleteMultipartUploadOutput } -func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) { uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId - entries, err := s3a.list(ctx, uploadDirectory, "", "", false, 0) + entries, err := s3a.list(uploadDirectory, "", "", false, 0) if err != nil { glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload @@ -96,7 +96,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C dirName = dirName[:len(dirName)-1] } - err = s3a.mkFile(ctx, dirName, entryName, finalParts) + err = s3a.mkFile(dirName, entryName, finalParts) if err != nil { glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err) @@ -112,22 +112,22 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C }, } - if err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { + if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) } return } -func (s3a *S3ApiServer) abortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { +func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { - exists, err := s3a.exists(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) + exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) if err != nil { glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload } if exists { - err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) + err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) } if err != nil { glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err) @@ -142,7 +142,7 @@ type ListMultipartUploadsResult struct { s3.ListMultipartUploadsOutput } -func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { +func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { output = &ListMultipartUploadsResult{ ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{ @@ -155,7 +155,7 @@ func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.List }, } - entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) + entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) if err != nil { glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err) return @@ -179,7 +179,7 @@ type ListPartsResult struct { s3.ListPartsOutput } -func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { +func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { output = &ListPartsResult{ ListPartsOutput: s3.ListPartsOutput{ Bucket: input.Bucket, @@ -190,8 +190,7 @@ func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListParts }, } - entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, - "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts)) + entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts)) if err != nil { glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index f726de515..b94b30a87 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -12,7 +12,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) -func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { +func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ @@ -46,7 +46,7 @@ func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, d }) } -func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { +func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ @@ -77,7 +77,7 @@ func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, }) } -func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { +func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { @@ -90,7 +90,7 @@ func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, s } glog.V(4).Infof("read directory: %v", request) - stream, err := client.ListEntries(ctx, request) + stream, err := client.ListEntries(context.Background(), request) if err != nil { glog.V(0).Infof("read directory %v: %v", request, err) return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err) @@ -117,7 +117,7 @@ func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, s } -func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error { +func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDirectory, isDeleteData, isRecursive bool) error { return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { @@ -129,7 +129,7 @@ func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entr } glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) - if _, err := client.DeleteEntry(ctx, request); err != nil { + if _, err := client.DeleteEntry(context.Background(), request); err != nil { glog.V(0).Infof("delete entry %v: %v", request, err) return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) } @@ -139,13 +139,11 @@ func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entr } -func (s3a *S3ApiServer) streamRemove(ctx context.Context, quiet bool, - fn func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool), - respFn func(err string)) error { +func (s3a *S3ApiServer) streamRemove(quiet bool, fn func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool), respFn func(err string)) error { return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - stream, err := client.StreamDeleteEntries(ctx) + stream, err := client.StreamDeleteEntries(context.Background()) if err != nil { glog.V(0).Infof("stream delete entry: %v", err) return fmt.Errorf("stream delete entry: %v", err) @@ -194,7 +192,7 @@ func (s3a *S3ApiServer) streamRemove(ctx context.Context, quiet bool, } -func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { +func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { @@ -204,7 +202,7 @@ func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, } glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) - resp, err := client.LookupDirectoryEntry(ctx, request) + resp, err := client.LookupDirectoryEntry(context.Background(), request) if err != nil { glog.V(0).Infof("exists entry %v: %v", request, err) return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 91267be09..a40c6244c 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -32,7 +32,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques var response ListAllMyBucketsResult - entries, err := s3a.list(context.Background(), s3a.option.BucketsPath, "", "", false, math.MaxInt32) + entries, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -66,7 +66,7 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) bucket := vars["bucket"] // create the folder for bucket, but lazily create actual collection - if err := s3a.mkdir(context.Background(), s3a.option.BucketsPath, bucket, nil); err != nil { + if err := s3a.mkdir(s3a.option.BucketsPath, bucket, nil); err != nil { writeErrorResponse(w, ErrInternalError, r.URL) return } @@ -79,7 +79,6 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque vars := mux.Vars(r) bucket := vars["bucket"] - ctx := context.Background() err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { // delete collection @@ -88,14 +87,14 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque } glog.V(1).Infof("delete collection: %v", deleteCollectionRequest) - if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil { + if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil { return fmt.Errorf("delete collection %s: %v", bucket, err) } return nil }) - err = s3a.rm(ctx, s3a.option.BucketsPath, bucket, true, false, true) + err = s3a.rm(s3a.option.BucketsPath, bucket, true, false, true) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -110,8 +109,6 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request vars := mux.Vars(r) bucket := vars["bucket"] - ctx := context.Background() - err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ @@ -120,7 +117,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request } glog.V(1).Infof("lookup bucket: %v", request) - if resp, err := client.LookupDirectoryEntry(ctx, request); err != nil || resp.Entry == nil { + if resp, err := client.LookupDirectoryEntry(context.Background(), request); err != nil || resp.Entry == nil { return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err) } diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index b7bdf334a..c14bfa2cb 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -1,7 +1,6 @@ package s3api import ( - "context" "crypto/md5" "encoding/json" "encoding/xml" @@ -170,7 +169,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h var deletedObjects []ObjectIdentifier var deleteErrors []DeleteError - s3a.streamRemove(context.Background(), deleteObjects.Quiet, func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool) { + s3a.streamRemove(deleteObjects.Quiet, func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool) { if index >= len(deleteObjects.Objects) { finished = true return diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 4b08ce5e1..3282e4176 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -1,7 +1,6 @@ package s3api import ( - "context" "fmt" "net/http" "net/url" @@ -27,7 +26,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http bucket = vars["bucket"] object = vars["object"] - response, errCode := s3a.createMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{ + response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), }) @@ -52,7 +51,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.completeMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{ + response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), @@ -78,7 +77,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.abortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{ + response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), @@ -113,7 +112,7 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht } } - response, errCode := s3a.listMultipartUploads(context.Background(), &s3.ListMultipartUploadsInput{ + response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{ Bucket: aws.String(bucket), Delimiter: aws.String(delimiter), EncodingType: aws.String(encodingType), @@ -150,7 +149,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re return } - response, errCode := s3a.listObjectParts(context.Background(), &s3.ListPartsInput{ + response, errCode := s3a.listObjectParts(&s3.ListPartsInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), MaxParts: aws.Int64(int64(maxParts)), @@ -176,10 +175,8 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ rAuthType := getRequestAuthType(r) - ctx := context.Background() - uploadID := r.URL.Query().Get("uploadId") - exists, err := s3a.exists(ctx, s3a.genUploadsFolder(bucket), uploadID, true) + exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true) if !exists { writeErrorResponse(w, ErrNoSuchUpload, r.URL) return diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index 97dc931ee..5006df6a0 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -43,9 +43,7 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ marker = startAfter } - ctx := context.Background() - - response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) + response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -63,8 +61,6 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ vars := mux.Vars(r) bucket := vars["bucket"] - ctx := context.Background() - originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query()) if maxKeys < 0 { @@ -76,7 +72,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ return } - response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) + response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -86,7 +82,7 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ writeSuccessResponseXML(w, encodeResponse(response)) } -func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { +func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { // convert full path prefix into directory name and prefix for entry name dir, prefix := filepath.Split(originalPrefix) @@ -105,7 +101,7 @@ func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPr InclusiveStartFrom: false, } - stream, err := client.ListEntries(ctx, request) + stream, err := client.ListEntries(context.Background(), request) if err != nil { return fmt.Errorf("list buckets: %v", err) } diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index c6ef04e85..30a5cc9de 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -232,9 +232,8 @@ func (fs *FilerServer) StreamDeleteEntries(stream filer_pb.SeaweedFiler_StreamDe if err != nil { return fmt.Errorf("receive delete entry request: %v", err) } - ctx := context.Background() fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))) - err = fs.filer.DeleteEntryMetaAndData(ctx, fullpath, req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) + err = fs.filer.DeleteEntryMetaAndData(context.Background(), fullpath, req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) resp := &filer_pb.DeleteEntryResponse{} if err != nil { resp.Error = err.Error() diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index dc47c2884..2168afee7 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -42,7 +42,7 @@ func (vs *VolumeServer) heartbeat() { continue } vs.store.MasterAddress = master - newLeader, err = vs.doHeartbeat(context.Background(), master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) + newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) if err != nil { glog.V(0).Infof("heartbeat error: %v", err) time.Sleep(time.Duration(vs.pulseSeconds) * time.Second) @@ -53,16 +53,16 @@ func (vs *VolumeServer) heartbeat() { } } -func (vs *VolumeServer) doHeartbeat(ctx context.Context, masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { +func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { - grpcConection, err := util.GrpcDial(ctx, masterGrpcAddress, grpcDialOption) + grpcConection, err := util.GrpcDial(context.Background(), masterGrpcAddress, grpcDialOption) if err != nil { return "", fmt.Errorf("fail to dial %s : %v", masterNode, err) } defer grpcConection.Close() client := master_pb.NewSeaweedClient(grpcConection) - stream, err := client.SendHeartbeat(ctx) + stream, err := client.SendHeartbeat(context.Background()) if err != nil { glog.V(0).Infof("SendHeartbeat to %s: %v", masterNode, err) return "", err diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go index fcd13d364..5cc75e74c 100644 --- a/weed/server/volume_grpc_copy.go +++ b/weed/server/volume_grpc_copy.go @@ -55,15 +55,15 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo // println("source:", volFileInfoResp.String()) // copy ecx file - if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil { + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil { return err } - if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil { + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil { return err } - if err := vs.doCopyFile(ctx, client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil { + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil { return err } @@ -95,10 +95,9 @@ func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.Vo }, err } -func (vs *VolumeServer) doCopyFile(ctx context.Context, client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid uint32, - compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend bool, ignoreSourceFileNotFound bool) error { +func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid, compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend, ignoreSourceFileNotFound bool) error { - copyFileClient, err := client.CopyFile(ctx, &volume_server_pb.CopyFileRequest{ + copyFileClient, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{ VolumeId: vid, Ext: ext, CompactionRevision: compactRevision, diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go index 67efc0f6d..66dd5bf8d 100644 --- a/weed/server/volume_grpc_erasure_coding.go +++ b/weed/server/volume_grpc_erasure_coding.go @@ -110,7 +110,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv // copy ec data slices for _, shardId := range req.ShardIds { - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil { + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil { return err } } @@ -118,7 +118,7 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv if req.CopyEcxFile { // copy ecx file - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil { + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil { return err } return nil @@ -126,14 +126,14 @@ func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_serv if req.CopyEcjFile { // copy ecj file - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil { + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil { return err } } if req.CopyVifFile { // copy vif file - if err := vs.doCopyFile(ctx, client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil { + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil { return err } } diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index ac8fa4f42..b6a242641 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -1,7 +1,6 @@ package weed_server import ( - "context" "errors" "fmt" "net/http" @@ -98,7 +97,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { ecVolume, hasEcVolume := vs.store.FindEcVolume(volumeId) if hasEcVolume { - count, err := vs.store.DeleteEcShardNeedle(context.Background(), ecVolume, n, cookie) + count, err := vs.store.DeleteEcShardNeedle(ecVolume, n, cookie) writeDeleteResult(err, count, w, r) return } diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go index 7230a869f..299d44fed 100644 --- a/weed/shell/command_ec_balance.go +++ b/weed/shell/command_ec_balance.go @@ -1,7 +1,6 @@ package shell import ( - "context" "flag" "fmt" "io" @@ -160,11 +159,9 @@ func collectRacks(allEcNodes []*EcNode) map[RackId]*EcRack { func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { - ctx := context.Background() - fmt.Printf("balanceEcVolumes %s\n", collection) - if err := deleteDuplicatedEcShards(ctx, commandEnv, allEcNodes, collection, applyBalancing); err != nil { + if err := deleteDuplicatedEcShards(commandEnv, allEcNodes, collection, applyBalancing); err != nil { return fmt.Errorf("delete duplicated collection %s ec shards: %v", collection, err) } @@ -179,7 +176,7 @@ func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*E return nil } -func deleteDuplicatedEcShards(ctx context.Context, commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error { +func deleteDuplicatedEcShards(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error { // vid => []ecNode vidLocations := collectVolumeIdToEcNodes(allEcNodes) // deduplicate ec shards diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index e22691c00..6efb05488 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -107,7 +107,7 @@ func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) } // balance the ec shards to current cluster - err = spreadEcShards(context.Background(), commandEnv, vid, collection, locations) + err = spreadEcShards(commandEnv, vid, collection, locations) if err != nil { return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err) } @@ -149,7 +149,7 @@ func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, } -func spreadEcShards(ctx context.Context, commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { +func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "") if err != nil { diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go index e2b2d22cc..099e04506 100644 --- a/weed/shell/command_fs_meta_notify.go +++ b/weed/shell/command_fs_meta_notify.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" @@ -43,11 +42,9 @@ func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer i v := util.GetViper() notification.LoadConfiguration(v, "notification.") - ctx := context.Background() - var dirCount, fileCount uint64 - err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { + err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { if entry.IsDirectory { dirCount++ diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index 7112c7526..b51fdd0f6 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -1,7 +1,6 @@ package shell import ( - "context" "flag" "fmt" "io" @@ -59,8 +58,6 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. return parseErr } - ctx := context.Background() - t := time.Now() fileName := *outputFileName if fileName == "" { @@ -89,7 +86,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. var dirCount, fileCount uint64 - err = doTraverseBFS(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { + err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { protoMessage := &filer_pb.FullEntry{ Dir: string(parentPath), @@ -128,8 +125,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. return err } -func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, - parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { +func doTraverseBFS(writer io.Writer, filerClient filer2.FilerClient, parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { K := 5 @@ -151,7 +147,7 @@ func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.Fil continue } dir := t.(filer2.FullPath) - processErr := processOneDirectory(ctx, writer, filerClient, dir, queue, &jobQueueWg, fn) + processErr := processOneDirectory(writer, filerClient, dir, queue, &jobQueueWg, fn) if processErr != nil { err = processErr } @@ -164,9 +160,7 @@ func doTraverseBFS(ctx context.Context, writer io.Writer, filerClient filer2.Fil return } -func processOneDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, - parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, - fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { +func processOneDirectory(writer io.Writer, filerClient filer2.FilerClient, parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { return filer2.ReadDirAllEntries(filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) { diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index fb2583240..04530571c 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" "strings" @@ -37,9 +36,7 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ dir, name := filer2.FullPath(path).DirAndName() - ctx := context.Background() - - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, newPrefix(), -1) + dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, newPrefix(), -1) if terr == nil { fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) @@ -49,7 +46,7 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ } -func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient filer2.FilerClient, dir filer2.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { +func treeTraverseDirectory(writer io.Writer, filerClient filer2.FilerClient, dir filer2.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { prefix.addMarker(level) @@ -65,7 +62,7 @@ func treeTraverseDirectory(ctx context.Context, writer io.Writer, filerClient fi if entry.IsDirectory { directoryCount++ subDir := dir.Child(entry.Name) - dirCount, fCount, terr := treeTraverseDirectory(ctx, writer, filerClient, subDir, "", prefix, level+1) + dirCount, fCount, terr := treeTraverseDirectory(writer, filerClient, subDir, "", prefix, level+1) directoryCount += dirCount fileCount += fCount err = terr diff --git a/weed/storage/store_ec_delete.go b/weed/storage/store_ec_delete.go index 7e2d9e364..4a75fb20b 100644 --- a/weed/storage/store_ec_delete.go +++ b/weed/storage/store_ec_delete.go @@ -12,7 +12,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/types" ) -func (s *Store) DeleteEcShardNeedle(ctx context.Context, ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) { +func (s *Store) DeleteEcShardNeedle(ecVolume *erasure_coding.EcVolume, n *needle.Needle, cookie types.Cookie) (int64, error) { count, err := s.ReadEcShardNeedle(ecVolume.VolumeId, n) From 86cce3eb58f959066d2d11b8dde82a694e8f0999 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 22:29:01 -0800 Subject: [PATCH 0157/2432] fix test --- weed/shell/command_ec_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go index ddd52303c..4fddcbea5 100644 --- a/weed/shell/command_ec_test.go +++ b/weed/shell/command_ec_test.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "testing" From fd9612d66ef69ba699cd733eccf1ef9940c1d91f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 22:37:54 -0800 Subject: [PATCH 0158/2432] remove ctx --- weed/filesys/dir.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index c897ed827..7dcd45f2e 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -46,7 +46,7 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { return nil } - if err := dir.maybeLoadEntry(ctx); err != nil { + if err := dir.maybeLoadEntry(); err != nil { glog.V(3).Infof("dir Attr %s,err: %+v", dir.Path, err) return err } @@ -67,7 +67,7 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f glog.V(4).Infof("dir Getxattr %s", dir.Path) - if err := dir.maybeLoadEntry(ctx); err != nil { + if err := dir.maybeLoadEntry(); err != nil { return err } @@ -332,7 +332,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus glog.V(3).Infof("%v dir setattr %+v", dir.Path, req) - if err := dir.maybeLoadEntry(ctx); err != nil { + if err := dir.maybeLoadEntry(); err != nil { return err } @@ -362,7 +362,7 @@ func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { glog.V(4).Infof("dir Setxattr %s: %s", dir.Path, req.Name) - if err := dir.maybeLoadEntry(ctx); err != nil { + if err := dir.maybeLoadEntry(); err != nil { return err } @@ -380,7 +380,7 @@ func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) e glog.V(4).Infof("dir Removexattr %s: %s", dir.Path, req.Name) - if err := dir.maybeLoadEntry(ctx); err != nil { + if err := dir.maybeLoadEntry(); err != nil { return err } @@ -398,7 +398,7 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp glog.V(4).Infof("dir Listxattr %s", dir.Path) - if err := dir.maybeLoadEntry(ctx); err != nil { + if err := dir.maybeLoadEntry(); err != nil { return err } @@ -416,7 +416,7 @@ func (dir *Dir) Forget() { dir.wfs.forgetNode(filer2.FullPath(dir.Path)) } -func (dir *Dir) maybeLoadEntry(ctx context.Context) error { +func (dir *Dir) maybeLoadEntry() error { if dir.entry == nil { parentDirPath, name := filer2.FullPath(dir.Path).DirAndName() entry, err := dir.wfs.maybeLoadEntry(parentDirPath, name) From 36d6595658fbd42634c3b3121a490e519bc579fb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 22:38:27 -0800 Subject: [PATCH 0159/2432] remove ctx --- weed/filesys/dir.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 7dcd45f2e..1b11ddb9e 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -354,7 +354,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) - return dir.saveEntry(ctx) + return dir.saveEntry() } @@ -372,7 +372,7 @@ func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) - return dir.saveEntry(ctx) + return dir.saveEntry() } @@ -390,7 +390,7 @@ func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) e dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) - return dir.saveEntry(ctx) + return dir.saveEntry() } @@ -428,7 +428,7 @@ func (dir *Dir) maybeLoadEntry() error { return nil } -func (dir *Dir) saveEntry(ctx context.Context) error { +func (dir *Dir) saveEntry() error { parentDir, name := filer2.FullPath(dir.Path).DirAndName() From b1f377f8227e97a4fae0db6d38cd045eddba38f6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 23:51:34 -0800 Subject: [PATCH 0160/2432] mount: fail fast when connecting to filer fix https://github.com/chrislusf/seaweedfs/issues/1034 --- weed/command/mount_std.go | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index ba7fb852b..1e6444ae1 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -3,6 +3,7 @@ package command import ( + "context" "fmt" "os" "os/user" @@ -16,6 +17,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" @@ -131,6 +133,7 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente c.Close() }) + // parse filer grpc address filerGrpcAddress, err := parseFilerGrpcAddress(filer) if err != nil { glog.V(0).Infof("parseFilerGrpcAddress: %v", err) @@ -138,6 +141,23 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente return true } + // try to connect to filer, filerBucketsPath may be useful later + filerBucketsPath := "/buckets" + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + filerBucketsPath = resp.DirBuckets + return nil + }) + if err != nil { + glog.Fatal(err) + return false + } + + // find mount point mountRoot := filerMountRootPath if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") { mountRoot = mountRoot[0 : len(mountRoot)-1] @@ -147,7 +167,7 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), + GrpcDialOption: grpcDialOption, FilerMountRootPath: mountRoot, Collection: collection, Replication: replication, From 543cf1c80e222f3ff4be5fcfa88d2a7e9c868de2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 25 Feb 2020 23:59:30 -0800 Subject: [PATCH 0161/2432] fix compilation error --- weed/command/mount_std.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 1e6444ae1..e26b7c3f5 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -142,14 +142,12 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente } // try to connect to filer, filerBucketsPath may be useful later - filerBucketsPath := "/buckets" grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + _, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) } - filerBucketsPath = resp.DirBuckets return nil }) if err != nil { From 0156e2975aa4aabe142301deb72cc2657eb79ee9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 26 Feb 2020 16:46:01 -0800 Subject: [PATCH 0162/2432] mount: add mode to run external to SeaweedFS container cluster --- weed/command/mount.go | 30 ++++++++++++++++---------- weed/command/mount_std.go | 36 +++++++++++++++++--------------- weed/filer2/filer_client_util.go | 8 ++++--- weed/filesys/dirty_page.go | 1 + weed/filesys/wfs.go | 16 ++++++++++++++ weed/filesys/wfs_deletion.go | 9 ++++---- weed/server/webdav_server.go | 3 +++ weed/shell/command_fs_du.go | 3 +++ 8 files changed, 71 insertions(+), 35 deletions(-) diff --git a/weed/command/mount.go b/weed/command/mount.go index f09b285f7..4bdb3415a 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -7,17 +7,18 @@ import ( ) type MountOptions struct { - filer *string - filerMountRootPath *string - dir *string - dirListCacheLimit *int64 - collection *string - replication *string - ttlSec *int - chunkSizeLimitMB *int - dataCenter *string - allowOthers *bool - umaskString *string + filer *string + filerMountRootPath *string + dir *string + dirListCacheLimit *int64 + collection *string + replication *string + ttlSec *int + chunkSizeLimitMB *int + dataCenter *string + allowOthers *bool + umaskString *string + outsideContainerClusterMode *bool } var ( @@ -41,6 +42,7 @@ func init() { mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111") mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") + mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access the file system") } var cmdMount = &Command{ @@ -58,6 +60,12 @@ var cmdMount = &Command{ On OS X, it requires OSXFUSE (http://osxfuse.github.com/). + If the SeaweedFS systemm runs in a container cluster, e.g. managed by kubernetes or docker compose, + the volume servers are not accessible by their own ip addresses. + In "outsideContainerClusterMode", the mount will use the filer ip address instead, assuming: + * All volume server containers are accessible through the same hostname or IP address as the filer. + * All volume server container ports are open external to the cluster. + `, } diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index e26b7c3f5..e8e3fb030 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -46,11 +46,12 @@ func runMount(cmd *Command, args []string) bool { *mountOptions.ttlSec, *mountOptions.dirListCacheLimit, os.FileMode(umask), + *mountOptions.outsideContainerClusterMode, ) } func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int, - allowOthers bool, ttlSec int, dirListCacheLimit int64, umask os.FileMode) bool { + allowOthers bool, ttlSec int, dirListCacheLimit int64, umask os.FileMode, outsideContainerClusterMode bool) bool { util.LoadConfiguration("security", false) @@ -164,22 +165,23 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente daemonize.SignalOutcome(nil) err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ - FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: grpcDialOption, - FilerMountRootPath: mountRoot, - Collection: collection, - Replication: replication, - TtlSec: int32(ttlSec), - ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, - DataCenter: dataCenter, - DirListCacheLimit: dirListCacheLimit, - EntryCacheTtl: 3 * time.Second, - MountUid: uid, - MountGid: gid, - MountMode: mountMode, - MountCtime: fileInfo.ModTime(), - MountMtime: time.Now(), - Umask: umask, + FilerGrpcAddress: filerGrpcAddress, + GrpcDialOption: grpcDialOption, + FilerMountRootPath: mountRoot, + Collection: collection, + Replication: replication, + TtlSec: int32(ttlSec), + ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, + DataCenter: dataCenter, + DirListCacheLimit: dirListCacheLimit, + EntryCacheTtl: 3 * time.Second, + MountUid: uid, + MountGid: gid, + MountMode: mountMode, + MountCtime: fileInfo.ModTime(), + MountMtime: time.Now(), + Umask: umask, + OutsideContainerClusterMode: outsideContainerClusterMode, })) if err != nil { fuse.Unmount(dir) diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index 9e03d60c4..ab9db2992 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -23,6 +23,7 @@ func VolumeId(fileId string) string { type FilerClient interface { WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error + AdjustedUrl(hostAndPort string) string } func ReadIntoBuffer(filerClient FilerClient, fullFilePath FullPath, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { @@ -67,9 +68,10 @@ func ReadIntoBuffer(filerClient FilerClient, fullFilePath FullPath, buff []byte, return } + volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url) var n int64 n, err = util.ReadUrl( - fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId), + fmt.Sprintf("http://%s/%s", volumeServerAddress, chunkView.FileId), chunkView.Offset, int(chunkView.Size), buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)], @@ -77,10 +79,10 @@ func ReadIntoBuffer(filerClient FilerClient, fullFilePath FullPath, buff []byte, if err != nil { - glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, locations.Locations[0].Url, chunkView.FileId, n, err) + glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, volumeServerAddress, chunkView.FileId, n, err) err = fmt.Errorf("failed to read http://%s/%s: %v", - locations.Locations[0].Url, chunkView.FileId, err) + volumeServerAddress, chunkView.FileId, err) return } diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 9b0f96951..67e1d57ef 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -165,6 +165,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) + host = pages.f.wfs.AdjustedUrl(host) pages.collection, pages.replication = resp.Collection, resp.Replication return nil diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 8f4225fb0..aa530f6aa 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -5,6 +5,7 @@ import ( "fmt" "math" "os" + "strings" "sync" "time" @@ -37,6 +38,9 @@ type Option struct { MountMode os.FileMode MountCtime time.Time MountMtime time.Time + + // whether the mount runs outside SeaweedFS containers + OutsideContainerClusterMode bool } var _ = fs.FS(&WFS{}) @@ -247,5 +251,17 @@ func (wfs *WFS) forgetNode(fullpath filer2.FullPath) { defer wfs.nodesLock.Unlock() delete(wfs.nodes, fullpath.AsInode()) +} + +func (wfs *WFS) AdjustedUrl(hostAndPort string) string { + if !wfs.option.OutsideContainerClusterMode { + return hostAndPort + } + commaIndex := strings.Index(hostAndPort, ":") + if commaIndex < 0 { + return hostAndPort + } + filerCommaIndex := strings.Index(wfs.option.FilerGrpcAddress, ":") + return fmt.Sprintf("%s:%s", wfs.option.FilerGrpcAddress[:filerCommaIndex], hostAndPort[commaIndex+1:]) } diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index f53e95d26..bf21b1808 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -3,11 +3,12 @@ package filesys import ( "context" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "google.golang.org/grpc" ) func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { @@ -21,12 +22,12 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { } wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - deleteFileIds(wfs.option.GrpcDialOption, client, fileIds) + wfs.deleteFileIds(wfs.option.GrpcDialOption, client, fileIds) return nil }) } -func deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { +func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { var vids []string for _, fileId := range fileIds { @@ -56,7 +57,7 @@ func deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerC } for _, loc := range locations.Locations { lr.Locations = append(lr.Locations, operation.Location{ - Url: loc.Url, + Url: wfs.AdjustedUrl(loc.Url), PublicUrl: loc.PublicUrl, }) } diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 959e50128..ddd611724 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -104,6 +104,9 @@ func (fs *WebDavFileSystem) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) }, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption) } +func (fs *WebDavFileSystem) AdjustedUrl(hostAndPort string) string { + return hostAndPort +} func clearName(name string) (string, error) { slashed := strings.HasSuffix(name, "/") diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index a1e21bfa6..6c31ebdff 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -105,3 +105,6 @@ func (env *CommandEnv) getFilerClient(filerServer string, filerPort int64) *comm func (c *commandFilerClient) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { return c.env.withFilerClient(c.filerServer, c.filerPort, fn) } +func (c *commandFilerClient) AdjustedUrl(hostAndPort string) string { + return hostAndPort +} From c4de81434711c6734d8dacef14ad5dfa45977d70 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 26 Feb 2020 16:46:29 -0800 Subject: [PATCH 0163/2432] docker: add cluster mode --- docker/Makefile | 13 +++++++- docker/local-cluster-compose.yml | 53 ++++++++++++++++++++++++++++++++ docker/local-dev-compose.yml | 44 +++++++++++--------------- docker/seaweedfs-compose.yml | 32 +++++++++---------- docker/seaweedfs-dev-compose.yml | 28 ++++++++--------- 5 files changed, 113 insertions(+), 57 deletions(-) create mode 100644 docker/local-cluster-compose.yml diff --git a/docker/Makefile b/docker/Makefile index 4993fb2a5..5a40d36d2 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -2,7 +2,18 @@ all: gen .PHONY : gen -gen: +gen: dev + +build: cd ../weed; GOOS=linux go build; mv weed ../docker/ + docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.go_build . + rm ./weed + +dev: build docker-compose -f local-dev-compose.yml -p seaweedfs up + +cluster: build + docker-compose -f local-cluster-compose.yml -p seaweedfs up + +clean: rm ./weed diff --git a/docker/local-cluster-compose.yml b/docker/local-cluster-compose.yml new file mode 100644 index 000000000..a5bd25fd6 --- /dev/null +++ b/docker/local-cluster-compose.yml @@ -0,0 +1,53 @@ +version: '2' + +services: + master0: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335" + master1: + image: chrislusf/seaweedfs:local + ports: + - 9334:9334 + - 19334:19334 + command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335" + master2: + image: chrislusf/seaweedfs:local + ports: + - 9335:9335 + - 19335:19335 + command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: '-v=2 volume -max=5 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume' + depends_on: + - master0 + - master1 + - master2 + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + command: '-v=4 filer -master="master0:9333,master1:9334,master2:9335"' + depends_on: + - master0 + - master1 + - master2 + - volume + s3: + image: chrislusf/seaweedfs:local + ports: + - 8333:8333 + command: '-v=4 s3 -filer="filer:8888" -waitForFiler' + depends_on: + - master0 + - master1 + - master2 + - volume + - filer diff --git a/docker/local-dev-compose.yml b/docker/local-dev-compose.yml index c2f588a60..51bea642f 100644 --- a/docker/local-dev-compose.yml +++ b/docker/local-dev-compose.yml @@ -2,42 +2,34 @@ version: '2' services: master: - build: - context: . - dockerfile: Dockerfile.go_build + image: chrislusf/seaweedfs:local ports: - - 9333:9333 - - 19333:19333 + - 9333:9333 + - 19333:19333 command: "master -ip=master" volume: - build: - context: . - dockerfile: Dockerfile.go_build + image: chrislusf/seaweedfs:local ports: - - 8080:8080 - - 18080:18080 + - 8080:8080 + - 18080:18080 command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' depends_on: - - master + - master filer: - build: - context: . - dockerfile: Dockerfile.go_build + image: chrislusf/seaweedfs:local ports: - - 8888:8888 - - 18888:18888 + - 8888:8888 + - 18888:18888 command: '-v=4 filer -master="master:9333"' depends_on: - - master - - volume + - master + - volume s3: - build: - context: . - dockerfile: Dockerfile.go_build + image: chrislusf/seaweedfs:local ports: - - 8333:8333 - command: '-v=4 s3 -filer="filer:8888"' + - 8333:8333 + command: '-v=4 s3 -filer="filer:8888" -waitForFiler' depends_on: - - master - - volume - - filer + - master + - volume + - filer diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml index 7f0cbc6f9..329b6d4e7 100644 --- a/docker/seaweedfs-compose.yml +++ b/docker/seaweedfs-compose.yml @@ -4,28 +4,28 @@ services: master: image: chrislusf/seaweedfs # use a remote image ports: - - 9333:9333 - - 19333:19333 + - 9333:9333 + - 19333:19333 command: "master -ip=master" volume: image: chrislusf/seaweedfs # use a remote image ports: - - 8080:8080 - - 18080:18080 + - 8080:8080 + - 18080:18080 command: 'volume -max=15 -mserver="master:9333" -port=8080' depends_on: - - master + - master filer: image: chrislusf/seaweedfs # use a remote image ports: - - 8888:8888 - - 18888:18888 + - 8888:8888 + - 18888:18888 command: 'filer -master="master:9333"' tty: true stdin_open: true depends_on: - - master - - volume + - master + - volume cronjob: image: chrislusf/seaweedfs # use a remote image command: 'cronjob' @@ -34,14 +34,14 @@ services: CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *' WEED_MASTER: master:9333 # Default: localhost:9333 depends_on: - - master - - volume + - master + - volume s3: image: chrislusf/seaweedfs # use a remote image ports: - - 8333:8333 - command: 's3 -filer="filer:8888"' + - 8333:8333 + command: 's3 -filer="filer:8888" -waitForFiler' depends_on: - - master - - volume - - filer + - master + - volume + - filer diff --git a/docker/seaweedfs-dev-compose.yml b/docker/seaweedfs-dev-compose.yml index 765770084..a9e3c9e3d 100644 --- a/docker/seaweedfs-dev-compose.yml +++ b/docker/seaweedfs-dev-compose.yml @@ -4,32 +4,32 @@ services: master: image: chrislusf/seaweedfs:dev # use a remote dev image ports: - - 9333:9333 - - 19333:19333 + - 9333:9333 + - 19333:19333 command: "master -ip=master" volume: image: chrislusf/seaweedfs:dev # use a remote dev image ports: - - 8080:8080 - - 18080:18080 + - 8080:8080 + - 18080:18080 command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' depends_on: - - master + - master filer: image: chrislusf/seaweedfs:dev # use a remote dev image ports: - - 8888:8888 - - 18888:18888 + - 8888:8888 + - 18888:18888 command: '-v=4 filer -master="master:9333"' depends_on: - - master - - volume + - master + - volume s3: image: chrislusf/seaweedfs:dev # use a remote dev image ports: - - 8333:8333 - command: '-v=4 s3 -filer="filer:8888"' + - 8333:8333 + command: '-v=4 s3 -filer="filer:8888" -waitForFiler' depends_on: - - master - - volume - - filer + - master + - volume + - filer From 556dd76303dd95eb2cfac9e27fc59b0a999a57e6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 26 Feb 2020 16:49:47 -0800 Subject: [PATCH 0164/2432] s3: wait to connect to filer --- weed/command/s3.go | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/weed/command/s3.go b/weed/command/s3.go index 9e8236a83..39d0c04fc 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -127,18 +127,23 @@ func (s3opt *S3Options) startS3Server() bool { grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") - err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + for { + err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + filerBucketsPath = resp.DirBuckets + glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath) + return nil + }) if err != nil { - return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress) + break } - filerBucketsPath = resp.DirBuckets - glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath) - return nil - }) - if err != nil { - glog.Fatal(err) - return false } router := mux.NewRouter().SkipClean(true) From f69a1694e73bb2e24e4c0b0160bce6c133a7d0f8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 26 Feb 2020 16:50:40 -0800 Subject: [PATCH 0165/2432] clean up waitForFiler option --- docker/local-cluster-compose.yml | 2 +- docker/local-dev-compose.yml | 2 +- docker/seaweedfs-compose.yml | 2 +- docker/seaweedfs-dev-compose.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/local-cluster-compose.yml b/docker/local-cluster-compose.yml index a5bd25fd6..0b6860fa1 100644 --- a/docker/local-cluster-compose.yml +++ b/docker/local-cluster-compose.yml @@ -44,7 +44,7 @@ services: image: chrislusf/seaweedfs:local ports: - 8333:8333 - command: '-v=4 s3 -filer="filer:8888" -waitForFiler' + command: '-v=4 s3 -filer="filer:8888"' depends_on: - master0 - master1 diff --git a/docker/local-dev-compose.yml b/docker/local-dev-compose.yml index 51bea642f..5ff42ed28 100644 --- a/docker/local-dev-compose.yml +++ b/docker/local-dev-compose.yml @@ -28,7 +28,7 @@ services: image: chrislusf/seaweedfs:local ports: - 8333:8333 - command: '-v=4 s3 -filer="filer:8888" -waitForFiler' + command: '-v=4 s3 -filer="filer:8888"' depends_on: - master - volume diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml index 329b6d4e7..35509c541 100644 --- a/docker/seaweedfs-compose.yml +++ b/docker/seaweedfs-compose.yml @@ -40,7 +40,7 @@ services: image: chrislusf/seaweedfs # use a remote image ports: - 8333:8333 - command: 's3 -filer="filer:8888" -waitForFiler' + command: 's3 -filer="filer:8888"' depends_on: - master - volume diff --git a/docker/seaweedfs-dev-compose.yml b/docker/seaweedfs-dev-compose.yml index a9e3c9e3d..197510a9f 100644 --- a/docker/seaweedfs-dev-compose.yml +++ b/docker/seaweedfs-dev-compose.yml @@ -28,7 +28,7 @@ services: image: chrislusf/seaweedfs:dev # use a remote dev image ports: - 8333:8333 - command: '-v=4 s3 -filer="filer:8888" -waitForFiler' + command: '-v=4 s3 -filer="filer:8888"' depends_on: - master - volume From 37a3628b2e9dbd5149060f8688b34182b2d66f6c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 26 Feb 2020 16:51:38 -0800 Subject: [PATCH 0166/2432] grpc add PermitWithoutStream to client and server options --- weed/util/grpc_client_server.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index 4dace5e8b..dc896ccb4 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -30,6 +30,7 @@ func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { Timeout: 20 * time.Second, // ping timeout }), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ MinTime: 60 * time.Second, // min time a client should wait before sending a ping + PermitWithoutStream: true, })) for _, opt := range opts { if opt != nil { @@ -48,6 +49,7 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: 30 * time.Second, // client ping server if no activity for this long Timeout: 20 * time.Second, + PermitWithoutStream: true, })) for _, opt := range opts { if opt != nil { From ed0acd17227ce4561ff6a2c77564de0bd8062c56 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 26 Feb 2020 16:52:57 -0800 Subject: [PATCH 0167/2432] go fmt --- weed/s3api/auth_signature_v4.go | 2 +- weed/s3api/auto_signature_v4_test.go | 1 - weed/s3api/s3api_auth.go | 2 +- weed/shell/command_fs_mv.go | 2 +- weed/storage/store.go | 2 +- weed/storage/volume_super_block.go | 2 +- weed/util/grpc_client_server.go | 6 +++--- 7 files changed, 8 insertions(+), 9 deletions(-) diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go index 757016a55..cdfd8be1d 100644 --- a/weed/s3api/auth_signature_v4.go +++ b/weed/s3api/auth_signature_v4.go @@ -48,7 +48,7 @@ func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Ide const ( emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" + signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the // client did not calculate sha256 of the payload. diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go index 0502d105c..036b5c052 100644 --- a/weed/s3api/auto_signature_v4_test.go +++ b/weed/s3api/auto_signature_v4_test.go @@ -54,7 +54,6 @@ func TestIsRequestPresignedSignatureV4(t *testing.T) { } } - // Tests is requested authenticated function, tests replies for s3 errors. func TestIsReqAuthenticated(t *testing.T) { iam := NewIdentityAccessManagement("", "") diff --git a/weed/s3api/s3api_auth.go b/weed/s3api/s3api_auth.go index 43afbaae5..bf5cf5fab 100644 --- a/weed/s3api/s3api_auth.go +++ b/weed/s3api/s3api_auth.go @@ -26,7 +26,7 @@ func isRequestSignatureV4(r *http.Request) bool { // Verify if request has AWS Signature Version '2'. func isRequestSignatureV2(r *http.Request) bool { return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && - strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) + strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) } // Verify if request has AWS PreSign Version '4'. diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index 78f797f6c..9b74e85e9 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -63,7 +63,7 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer var targetDir, targetName string // moving a file or folder - if err == nil && respDestinationLookupEntry.Entry!= nil && respDestinationLookupEntry.Entry.IsDirectory { + if err == nil && respDestinationLookupEntry.Entry != nil && respDestinationLookupEntry.Entry.IsDirectory { // to a directory targetDir = filepath.ToSlash(filepath.Join(destinationDir, destinationName)) targetName = sourceName diff --git a/weed/storage/store.go b/weed/storage/store.go index 2d02e2f80..e29680f6f 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -358,7 +358,7 @@ func (s *Store) ConfigureVolume(i needle.VolumeId, replication string) error { } // load, modify, save baseFileName := strings.TrimSuffix(fileInfo.Name(), filepath.Ext(fileInfo.Name())) - vifFile := filepath.Join(location.Directory, baseFileName + ".vif") + vifFile := filepath.Join(location.Directory, baseFileName+".vif") volumeInfo, _, err := pb.MaybeLoadVolumeInfo(vifFile) if err != nil { return fmt.Errorf("volume %d fail to load vif", i) diff --git a/weed/storage/volume_super_block.go b/weed/storage/volume_super_block.go index 1d7f35595..5e913e062 100644 --- a/weed/storage/volume_super_block.go +++ b/weed/storage/volume_super_block.go @@ -37,7 +37,7 @@ func (v *Volume) maybeWriteSuperBlock() error { func (v *Volume) readSuperBlock() (err error) { v.SuperBlock, err = super_block.ReadSuperBlock(v.DataBackend) - if v.volumeInfo != nil && v.volumeInfo.Replication != ""{ + if v.volumeInfo != nil && v.volumeInfo.Replication != "" { if replication, err := super_block.NewReplicaPlacementFromString(v.volumeInfo.Replication); err != nil { return fmt.Errorf("Error parse volume %d replication %s : %v", v.Id, v.volumeInfo.Replication, err) } else { diff --git a/weed/util/grpc_client_server.go b/weed/util/grpc_client_server.go index dc896ccb4..d6a9ee3c3 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/util/grpc_client_server.go @@ -29,7 +29,7 @@ func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { Time: 10 * time.Second, // wait time before ping if no activity Timeout: 20 * time.Second, // ping timeout }), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: 60 * time.Second, // min time a client should wait before sending a ping + MinTime: 60 * time.Second, // min time a client should wait before sending a ping PermitWithoutStream: true, })) for _, opt := range opts { @@ -47,8 +47,8 @@ func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*gr options = append(options, // grpc.WithInsecure(), grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 30 * time.Second, // client ping server if no activity for this long - Timeout: 20 * time.Second, + Time: 30 * time.Second, // client ping server if no activity for this long + Timeout: 20 * time.Second, PermitWithoutStream: true, })) for _, opt := range opts { From 8c1da714026ac5be61be928a2464735dbc37fcc8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 26 Feb 2020 17:27:49 -0800 Subject: [PATCH 0168/2432] remove dead code --- weed/server/master_grpc_server.go | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index fcfd98f7b..d308130d6 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -3,16 +3,16 @@ package weed_server import ( "fmt" "net" - "strings" "time" "github.com/chrislusf/raft" + "google.golang.org/grpc/peer" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" - "google.golang.org/grpc/peer" ) func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error { @@ -61,14 +61,6 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ t.Sequence.SetMax(heartbeat.MaxFileKey) if dn == nil { - if heartbeat.Ip == "" { - if pr, ok := peer.FromContext(stream.Context()); ok { - if pr.Addr != net.Addr(nil) { - heartbeat.Ip = pr.Addr.String()[0:strings.LastIndex(pr.Addr.String(), ":")] - glog.V(0).Infof("remote IP address is detected as %v", heartbeat.Ip) - } - } - } dcName, rackName := t.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack) dc := t.GetOrCreateDataCenter(dcName) rack := dc.GetOrCreateRack(rackName) From 430b5a49e2abd9e3751e8c5840a690541ddc66ca Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 26 Feb 2020 19:35:00 -0800 Subject: [PATCH 0169/2432] unused code --- weed/storage/volume_vacuum.go | 1 - 1 file changed, 1 deletion(-) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 523b37e34..db9765cff 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -122,7 +122,6 @@ func (v *Volume) CommitCompact() error { //time.Sleep(20 * time.Second) os.RemoveAll(v.FileName() + ".ldb") - os.RemoveAll(v.FileName() + ".bdb") glog.V(3).Infof("Loading volume %d commit file...", v.Id) if e = v.load(true, false, v.needleMapKind, 0); e != nil { From 555413d9fc4837302ef1c5b2b921b406c9de6777 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 27 Feb 2020 00:07:13 -0800 Subject: [PATCH 0170/2432] weed queue starts --- other/java/client/src/main/proto/filer.proto | 1 + weed/command/command.go | 1 + weed/command/queue.go | 107 ++++ weed/command/scaffold.go | 6 + weed/filer2/filer.go | 3 +- weed/filer2/leveldb/leveldb_store_test.go | 4 +- weed/filer2/leveldb2/leveldb2_store_test.go | 4 +- weed/pb/Makefile | 1 + weed/pb/filer.proto | 1 + weed/pb/filer_pb/filer.pb.go | 223 +++++---- weed/pb/queue.proto | 64 +++ weed/pb/queue_pb/queue.pb.go | 497 +++++++++++++++++++ weed/server/filer_grpc_server.go | 3 +- weed/server/filer_server.go | 9 +- weed/server/queue_server.go | 49 ++ 15 files changed, 856 insertions(+), 117 deletions(-) create mode 100644 weed/command/queue.go create mode 100644 weed/pb/queue.proto create mode 100644 weed/pb/queue_pb/queue.pb.go create mode 100644 weed/server/queue_server.go diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 04901770a..9ee552561 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -228,4 +228,5 @@ message GetFilerConfigurationResponse { string collection = 3; uint32 max_mb = 4; string dir_buckets = 5; + string dir_queues = 6; } diff --git a/weed/command/command.go b/weed/command/command.go index 79c00d4cd..6687469f1 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -20,6 +20,7 @@ var Commands = []*Command{ cmdS3, cmdUpload, cmdDownload, + cmdQueue, cmdScaffold, cmdShell, cmdVersion, diff --git a/weed/command/queue.go b/weed/command/queue.go new file mode 100644 index 000000000..d09d5d8b3 --- /dev/null +++ b/weed/command/queue.go @@ -0,0 +1,107 @@ +package command + +import ( + "context" + "fmt" + "strconv" + "time" + + "google.golang.org/grpc/reflection" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/queue_pb" + "github.com/chrislusf/seaweedfs/weed/security" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + queueStandaloneOptions QueueOptions +) + +type QueueOptions struct { + filer *string + port *int + tlsPrivateKey *string + tlsCertificate *string + defaultTtl *string +} + +func init() { + cmdQueue.Run = runQueue // break init cycle + queueStandaloneOptions.filer = cmdQueue.Flag.String("filer", "localhost:8888", "filer server address") + queueStandaloneOptions.port = cmdQueue.Flag.Int("port", 17777, "queue server gRPC listen port") + queueStandaloneOptions.tlsPrivateKey = cmdQueue.Flag.String("key.file", "", "path to the TLS private key file") + queueStandaloneOptions.tlsCertificate = cmdQueue.Flag.String("cert.file", "", "path to the TLS certificate file") + queueStandaloneOptions.defaultTtl = cmdQueue.Flag.String("ttl", "1h", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") +} + +var cmdQueue = &Command{ + UsageLine: " queue [-port=17777] [-filer=]", + Short: "start a queue gRPC server that is backed by a filer", + Long: `start a queue gRPC server that is backed by a filer. + +`, +} + +func runQueue(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + return queueStandaloneOptions.startQueueServer() + +} + +func (queueopt *QueueOptions) startQueueServer() bool { + + filerGrpcAddress, err := parseFilerGrpcAddress(*queueopt.filer) + if err != nil { + glog.Fatal(err) + return false + } + + filerQueuesPath := "/queues" + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + for { + err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + filerQueuesPath = resp.DirQueues + glog.V(0).Infof("Queue read filer queues dir: %s", filerQueuesPath) + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *queueopt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *queueopt.filer, filerGrpcAddress) + break + } + } + + qs, err := weed_server.NewQueueServer(&weed_server.QueueServerOption{ + Filers: []string{*queueopt.filer}, + DefaultReplication: "", + MaxMB: 0, + Port: *queueopt.port, + }) + + // start grpc listener + grpcL, err := util.NewListener(":"+strconv.Itoa(*queueopt.port), 0) + if err != nil { + glog.Fatalf("failed to listen on grpc port %d: %v", *queueopt.port, err) + } + grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.queue")) + queue_pb.RegisterSeaweedQueueServer(grpcS, qs) + reflection.Register(grpcS) + go grpcS.Serve(grpcL) + + return true + +} diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 09f1cac6c..156aa8482 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -76,6 +76,8 @@ const ( recursive_delete = false # directories under this folder will be automatically creating a separate bucket buckets_folder = /buckets +# directories under this folder will be store message queue data +queues_folder = /queues #################################################### # The following are filer store options @@ -324,6 +326,10 @@ key = "" cert = "" key = "" +[grpc.queue] +cert = "" +key = "" + # use this for any place needs a grpc client # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" [grpc.client] diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 181a250f2..e70e013ae 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -31,10 +31,11 @@ type Filer struct { fileIdDeletionQueue *util.UnboundedQueue GrpcDialOption grpc.DialOption DirBucketsPath string + DirQueuesPath string buckets *FilerBuckets } -func NewFiler(masters []string, grpcDialOption grpc.DialOption, bucketFolder string) *Filer { +func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer { f := &Filer{ directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", masters), diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index aaed5a8ae..983e1cbe9 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -9,7 +9,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil, "") + filer := filer2.NewFiler(nil, nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDBStore{} @@ -64,7 +64,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil, "") + filer := filer2.NewFiler(nil, nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDBStore{} diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go index e5146e8bd..58637b7b6 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -9,7 +9,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil, "") + filer := filer2.NewFiler(nil, nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDB2Store{} @@ -64,7 +64,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil, "") + filer := filer2.NewFiler(nil, nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDB2Store{} diff --git a/weed/pb/Makefile b/weed/pb/Makefile index edfcd9a72..6680b7ca2 100644 --- a/weed/pb/Makefile +++ b/weed/pb/Makefile @@ -7,5 +7,6 @@ gen: protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb protoc filer.proto --go_out=plugins=grpc:./filer_pb protoc iam.proto --go_out=plugins=grpc:./iam_pb + protoc queue.proto --go_out=plugins=grpc:./queue_pb # protoc filer.proto --java_out=../../other/java/client/src/main/java cp filer.proto ../../other/java/client/src/main/proto diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 04901770a..9ee552561 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -228,4 +228,5 @@ message GetFilerConfigurationResponse { string collection = 3; uint32 max_mb = 4; string dir_buckets = 5; + string dir_queues = 6; } diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index d77e5b125..269abb8c7 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -1013,6 +1013,7 @@ type GetFilerConfigurationResponse struct { Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"` DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets" json:"dir_buckets,omitempty"` + DirQueues string `protobuf:"bytes,6,opt,name=dir_queues,json=dirQueues" json:"dir_queues,omitempty"` } func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} } @@ -1055,6 +1056,13 @@ func (m *GetFilerConfigurationResponse) GetDirBuckets() string { return "" } +func (m *GetFilerConfigurationResponse) GetDirQueues() string { + if m != nil { + return m.DirQueues + } + return "" +} + func init() { proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest") proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse") @@ -1586,112 +1594,113 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1697 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xdb, 0x6e, 0xdb, 0xc8, - 0x19, 0x36, 0x75, 0xe6, 0x2f, 0x29, 0xb1, 0x47, 0x76, 0xa2, 0xc8, 0x87, 0x3a, 0x74, 0x93, 0xba, - 0x48, 0xe0, 0x1a, 0x6e, 0x2e, 0x92, 0xa6, 0xbd, 0x48, 0x7c, 0x28, 0x8c, 0x3a, 0x07, 0xd0, 0x49, - 0x91, 0xa2, 0x40, 0x09, 0x9a, 0x1c, 0xc9, 0x53, 0x93, 0x1c, 0x75, 0x38, 0xb4, 0x9d, 0x3e, 0x4a, - 0x81, 0x5e, 0xf4, 0x19, 0x7a, 0xbb, 0xd8, 0x9b, 0xc5, 0x02, 0xfb, 0x1c, 0xfb, 0x00, 0x7b, 0xb9, - 0xd7, 0x8b, 0x99, 0x21, 0xa9, 0xa1, 0x28, 0xdb, 0xc9, 0x2e, 0x72, 0xc7, 0xf9, 0x4f, 0xf3, 0xcf, - 0xf7, 0x1f, 0x25, 0x68, 0x0f, 0x49, 0x80, 0xd9, 0xd6, 0x98, 0x51, 0x4e, 0x51, 0x4b, 0x1e, 0x9c, - 0xf1, 0x89, 0xf5, 0x06, 0x96, 0x8f, 0x28, 0x3d, 0x4b, 0xc6, 0x7b, 0x84, 0x61, 0x8f, 0x53, 0xf6, - 0x71, 0x3f, 0xe2, 0xec, 0xa3, 0x8d, 0xff, 0x95, 0xe0, 0x98, 0xa3, 0x15, 0x30, 0xfd, 0x8c, 0xd1, - 0x37, 0xd6, 0x8d, 0x4d, 0xd3, 0x9e, 0x10, 0x10, 0x82, 0x5a, 0xe4, 0x86, 0xb8, 0x5f, 0x91, 0x0c, - 0xf9, 0x6d, 0xed, 0xc3, 0xca, 0x6c, 0x83, 0xf1, 0x98, 0x46, 0x31, 0x46, 0x0f, 0xa0, 0x8e, 0x05, - 0x41, 0x5a, 0x6b, 0xef, 0xdc, 0xde, 0xca, 0x5c, 0xd9, 0x52, 0x72, 0x8a, 0x6b, 0x7d, 0x6d, 0x00, - 0x3a, 0x22, 0x31, 0x17, 0x44, 0x82, 0xe3, 0x4f, 0xf3, 0xe7, 0x0e, 0x34, 0xc6, 0x0c, 0x0f, 0xc9, - 0x65, 0xea, 0x51, 0x7a, 0x42, 0x8f, 0x61, 0x21, 0xe6, 0x2e, 0xe3, 0x07, 0x8c, 0x86, 0x07, 0x24, - 0xc0, 0xaf, 0x85, 0xd3, 0x55, 0x29, 0x52, 0x66, 0xa0, 0x2d, 0x40, 0x24, 0xf2, 0x82, 0x24, 0x26, - 0xe7, 0xf8, 0x38, 0xe3, 0xf6, 0x6b, 0xeb, 0xc6, 0x66, 0xcb, 0x9e, 0xc1, 0x41, 0x8b, 0x50, 0x0f, - 0x48, 0x48, 0x78, 0xbf, 0xbe, 0x6e, 0x6c, 0x76, 0x6d, 0x75, 0xb0, 0xfe, 0x08, 0xbd, 0x82, 0xff, - 0x9f, 0xf7, 0xfc, 0xff, 0x56, 0xa0, 0x2e, 0x09, 0x39, 0xc6, 0xc6, 0x04, 0x63, 0x74, 0x1f, 0x3a, - 0x24, 0x76, 0x26, 0x40, 0x54, 0xa4, 0x6f, 0x6d, 0x12, 0xe7, 0x98, 0xa3, 0x47, 0xd0, 0xf0, 0x4e, - 0x93, 0xe8, 0x2c, 0xee, 0x57, 0xd7, 0xab, 0x9b, 0xed, 0x9d, 0xde, 0xe4, 0x22, 0xf1, 0xd0, 0x5d, - 0xc1, 0xb3, 0x53, 0x11, 0xf4, 0x14, 0xc0, 0xe5, 0x9c, 0x91, 0x93, 0x84, 0xe3, 0x58, 0xbe, 0xb4, - 0xbd, 0xd3, 0xd7, 0x14, 0x92, 0x18, 0xbf, 0xc8, 0xf9, 0xb6, 0x26, 0x8b, 0x9e, 0x41, 0x0b, 0x5f, - 0x72, 0x1c, 0xf9, 0xd8, 0xef, 0xd7, 0xe5, 0x45, 0xab, 0x53, 0x2f, 0xda, 0xda, 0x4f, 0xf9, 0xea, - 0x7d, 0xb9, 0xf8, 0xe0, 0x39, 0x74, 0x0b, 0x2c, 0x34, 0x0f, 0xd5, 0x33, 0x9c, 0x45, 0x55, 0x7c, - 0x0a, 0x64, 0xcf, 0xdd, 0x20, 0x51, 0x09, 0xd6, 0xb1, 0xd5, 0xe1, 0x0f, 0x95, 0xa7, 0x86, 0xb5, - 0x07, 0xe6, 0x41, 0x12, 0x04, 0xb9, 0xa2, 0x4f, 0x58, 0xa6, 0xe8, 0x13, 0x36, 0x41, 0xb9, 0x72, - 0x2d, 0xca, 0x5f, 0x19, 0xb0, 0xb0, 0x7f, 0x8e, 0x23, 0xfe, 0x9a, 0x72, 0x32, 0x24, 0x9e, 0xcb, - 0x09, 0x8d, 0xd0, 0x63, 0x30, 0x69, 0xe0, 0x3b, 0xd7, 0x86, 0xa9, 0x45, 0x83, 0xd4, 0xeb, 0xc7, - 0x60, 0x46, 0xf8, 0xc2, 0xb9, 0xf6, 0xba, 0x56, 0x84, 0x2f, 0x94, 0xf4, 0x06, 0x74, 0x7d, 0x1c, - 0x60, 0x8e, 0x9d, 0x3c, 0x3a, 0x22, 0x74, 0x1d, 0x45, 0xdc, 0x55, 0xe1, 0x78, 0x08, 0xb7, 0x85, - 0xc9, 0xb1, 0xcb, 0x70, 0xc4, 0x9d, 0xb1, 0xcb, 0x4f, 0x65, 0x4c, 0x4c, 0xbb, 0x1b, 0xe1, 0x8b, - 0xb7, 0x92, 0xfa, 0xd6, 0xe5, 0xa7, 0xd6, 0x8f, 0x06, 0x98, 0x79, 0x30, 0xd1, 0x5d, 0x68, 0x8a, - 0x6b, 0x1d, 0xe2, 0xa7, 0x48, 0x34, 0xc4, 0xf1, 0xd0, 0x17, 0x55, 0x41, 0x87, 0xc3, 0x18, 0x73, - 0xe9, 0x5e, 0xd5, 0x4e, 0x4f, 0x22, 0xb3, 0x62, 0xf2, 0x6f, 0x55, 0x08, 0x35, 0x5b, 0x7e, 0x0b, - 0xc4, 0x43, 0x4e, 0x42, 0x2c, 0x2f, 0xac, 0xda, 0xea, 0x80, 0x7a, 0x50, 0xc7, 0x0e, 0x77, 0x47, - 0x32, 0xc3, 0x4d, 0xbb, 0x86, 0xdf, 0xb9, 0x23, 0xf4, 0x6b, 0xb8, 0x15, 0xd3, 0x84, 0x79, 0xd8, - 0xc9, 0xae, 0x6d, 0x48, 0x6e, 0x47, 0x51, 0x0f, 0xd4, 0xe5, 0x16, 0x54, 0x87, 0xc4, 0xef, 0x37, - 0x25, 0x30, 0xf3, 0xc5, 0x24, 0x3c, 0xf4, 0x6d, 0xc1, 0x44, 0xbf, 0x03, 0xc8, 0x2d, 0xf9, 0xfd, - 0xd6, 0x15, 0xa2, 0x66, 0x66, 0xd7, 0xb7, 0x3e, 0x40, 0x23, 0x35, 0xbf, 0x0c, 0xe6, 0x39, 0x0d, - 0x92, 0x30, 0x7f, 0x76, 0xd7, 0x6e, 0x29, 0xc2, 0xa1, 0x8f, 0xee, 0x81, 0xec, 0x73, 0x8e, 0xc8, - 0xaa, 0x8a, 0x7c, 0xa4, 0x44, 0xe8, 0x2f, 0x58, 0x76, 0x0a, 0x8f, 0xd2, 0x33, 0xa2, 0x5e, 0xdf, - 0xb4, 0xd3, 0x93, 0xf5, 0x43, 0x05, 0x6e, 0x15, 0xd3, 0x5d, 0x5c, 0x21, 0xad, 0x48, 0xac, 0x0c, - 0x69, 0x46, 0x9a, 0x3d, 0x2e, 0xe0, 0x55, 0xd1, 0xf1, 0xca, 0x54, 0x42, 0xea, 0xab, 0x0b, 0xba, - 0x4a, 0xe5, 0x15, 0xf5, 0xb1, 0xc8, 0xd6, 0x84, 0xf8, 0x12, 0xe0, 0xae, 0x2d, 0x3e, 0x05, 0x65, - 0x44, 0xfc, 0xb4, 0x7d, 0x88, 0x4f, 0xe9, 0x1e, 0x93, 0x76, 0x1b, 0x2a, 0x64, 0xea, 0x24, 0x42, - 0x16, 0x0a, 0x6a, 0x53, 0xc5, 0x41, 0x7c, 0xa3, 0x75, 0x68, 0x33, 0x3c, 0x0e, 0xd2, 0xec, 0x95, - 0xf0, 0x99, 0xb6, 0x4e, 0x42, 0x6b, 0x00, 0x1e, 0x0d, 0x02, 0xec, 0x49, 0x01, 0x53, 0x0a, 0x68, - 0x14, 0x91, 0x39, 0x9c, 0x07, 0x4e, 0x8c, 0xbd, 0x3e, 0xac, 0x1b, 0x9b, 0x75, 0xbb, 0xc1, 0x79, - 0x70, 0x8c, 0x3d, 0xf1, 0x8e, 0x24, 0xc6, 0xcc, 0x91, 0x0d, 0xa8, 0x2d, 0xf5, 0x5a, 0x82, 0x20, - 0xdb, 0xe4, 0x2a, 0xc0, 0x88, 0xd1, 0x64, 0xac, 0xb8, 0x9d, 0xf5, 0xaa, 0xe8, 0xc5, 0x92, 0x22, - 0xd9, 0x0f, 0xe0, 0x56, 0xfc, 0x31, 0x0c, 0x48, 0x74, 0xe6, 0x70, 0x97, 0x8d, 0x30, 0xef, 0x77, - 0x55, 0x0e, 0xa7, 0xd4, 0x77, 0x92, 0x68, 0x8d, 0x01, 0xed, 0x32, 0xec, 0x72, 0xfc, 0x19, 0x63, - 0xe7, 0xd3, 0xaa, 0x1b, 0x2d, 0x41, 0x83, 0x3a, 0xf8, 0xd2, 0x0b, 0xd2, 0x22, 0xab, 0xd3, 0xfd, - 0x4b, 0x2f, 0xb0, 0x1e, 0x41, 0xaf, 0x70, 0x63, 0xda, 0x98, 0x17, 0xa1, 0x8e, 0x19, 0xa3, 0x59, - 0x1b, 0x51, 0x07, 0xeb, 0x6f, 0x80, 0xde, 0x8f, 0xfd, 0x2f, 0xe1, 0x9e, 0xb5, 0x04, 0xbd, 0x82, - 0x69, 0xe5, 0x87, 0xf5, 0xad, 0x01, 0x68, 0x4f, 0x76, 0x83, 0x5f, 0x36, 0x88, 0x45, 0x7d, 0x8a, - 0x21, 0xa1, 0xba, 0x8d, 0xef, 0x72, 0x37, 0x1d, 0x61, 0x1d, 0x12, 0x2b, 0xfb, 0x7b, 0x2e, 0x77, - 0xd3, 0x51, 0xc2, 0xb0, 0x97, 0x30, 0x31, 0xd5, 0x64, 0x12, 0xca, 0x51, 0x62, 0x67, 0x24, 0xf4, - 0x04, 0xee, 0x90, 0x51, 0x44, 0x19, 0x9e, 0x88, 0x39, 0x0a, 0xaa, 0x86, 0x14, 0x5e, 0x54, 0xdc, - 0x5c, 0x61, 0x5f, 0x22, 0xf7, 0x08, 0x7a, 0x85, 0x67, 0x5c, 0x0b, 0xf3, 0x7f, 0x0c, 0xe8, 0xbf, - 0xe0, 0x34, 0x24, 0x9e, 0x8d, 0x85, 0xf3, 0x85, 0xa7, 0x6f, 0x40, 0x57, 0xf4, 0xe3, 0xe9, 0xe7, - 0x77, 0x68, 0xe0, 0x4f, 0xe6, 0xdd, 0x3d, 0x10, 0x2d, 0xd9, 0xd1, 0x50, 0x68, 0xd2, 0xc0, 0x97, - 0x99, 0xb8, 0x01, 0xa2, 0x6f, 0x6a, 0xfa, 0x6a, 0xf2, 0x77, 0x22, 0x7c, 0x51, 0xd0, 0x17, 0x42, - 0x52, 0x5f, 0x35, 0xdb, 0x66, 0x84, 0x2f, 0x84, 0xbe, 0xb5, 0x0c, 0xf7, 0x66, 0xf8, 0x96, 0x86, - 0xeb, 0x3b, 0x03, 0x7a, 0x2f, 0xe2, 0x98, 0x8c, 0xa2, 0xbf, 0xca, 0xb6, 0x93, 0x39, 0xbd, 0x08, - 0x75, 0x8f, 0x26, 0x11, 0x97, 0xce, 0xd6, 0x6d, 0x75, 0x98, 0xaa, 0xc4, 0x4a, 0xa9, 0x12, 0xa7, - 0x6a, 0xb9, 0x5a, 0xae, 0x65, 0xad, 0x56, 0x6b, 0x85, 0x5a, 0xfd, 0x15, 0xb4, 0x45, 0x90, 0x1d, - 0x0f, 0x47, 0x1c, 0xb3, 0xb4, 0x53, 0x83, 0x20, 0xed, 0x4a, 0x8a, 0x10, 0xd0, 0x27, 0x8a, 0x6a, - 0xd6, 0x30, 0x9e, 0x8c, 0x93, 0xef, 0x0d, 0x58, 0x2c, 0x3e, 0x25, 0x8d, 0xd9, 0x95, 0x93, 0x45, - 0xb4, 0x32, 0x16, 0xa4, 0xef, 0x10, 0x9f, 0xa2, 0x29, 0x8c, 0x93, 0x93, 0x80, 0x78, 0x8e, 0x60, - 0x28, 0xff, 0x4d, 0x45, 0x79, 0xcf, 0x82, 0x09, 0x2a, 0x35, 0x1d, 0x15, 0x04, 0x35, 0x37, 0xe1, - 0xa7, 0xd9, 0x74, 0x11, 0xdf, 0x53, 0x48, 0x35, 0x6e, 0x42, 0xaa, 0x59, 0x46, 0x2a, 0xcf, 0xb4, - 0x96, 0x9e, 0x69, 0x4f, 0xa0, 0xa7, 0xd6, 0xd3, 0x62, 0xb8, 0x56, 0x01, 0xf2, 0x39, 0x12, 0xf7, - 0x0d, 0xd5, 0xcc, 0xb2, 0x41, 0x12, 0x5b, 0x7f, 0x02, 0xf3, 0x88, 0x2a, 0xbb, 0x31, 0xda, 0x06, - 0x33, 0xc8, 0x0e, 0x52, 0xb4, 0xbd, 0x83, 0x26, 0x35, 0x9e, 0xc9, 0xd9, 0x13, 0x21, 0xeb, 0x39, - 0xb4, 0x32, 0x72, 0x86, 0x99, 0x71, 0x15, 0x66, 0x95, 0x29, 0xcc, 0xac, 0x6f, 0x0c, 0x58, 0x2c, - 0xba, 0x9c, 0x86, 0xe5, 0x3d, 0x74, 0xf3, 0x2b, 0x9c, 0xd0, 0x1d, 0xa7, 0xbe, 0x6c, 0xeb, 0xbe, - 0x94, 0xd5, 0x72, 0x07, 0xe3, 0x57, 0xee, 0x58, 0xe5, 0x72, 0x27, 0xd0, 0x48, 0x83, 0x77, 0xb0, - 0x50, 0x12, 0x99, 0xb1, 0x9b, 0xfd, 0x56, 0xdf, 0xcd, 0x0a, 0xfb, 0x65, 0xae, 0xad, 0x2f, 0x6c, - 0xcf, 0xe0, 0xae, 0x6a, 0x07, 0xbb, 0x79, 0x0c, 0x33, 0xec, 0x8b, 0xa1, 0x36, 0xa6, 0x43, 0x6d, - 0x0d, 0xa0, 0x5f, 0x56, 0x4d, 0xcb, 0x6f, 0x04, 0x0b, 0xc7, 0xdc, 0xe5, 0x24, 0xe6, 0xc4, 0xcb, - 0x7f, 0x24, 0x4c, 0xe5, 0x86, 0x71, 0xd3, 0x44, 0x2c, 0xd7, 0xe1, 0x3c, 0x54, 0x39, 0xcf, 0xf2, - 0x57, 0x7c, 0x8a, 0x28, 0x20, 0xfd, 0xa6, 0x34, 0x06, 0x5f, 0xe0, 0x2a, 0x91, 0x0f, 0x9c, 0x72, - 0x37, 0x50, 0x1b, 0x47, 0x4d, 0x6e, 0x1c, 0xa6, 0xa4, 0xc8, 0x95, 0x43, 0x0d, 0x65, 0x5f, 0x71, - 0xeb, 0x6a, 0x1f, 0x11, 0x04, 0xc9, 0x5c, 0x05, 0x90, 0xa5, 0xaa, 0xaa, 0xac, 0xa1, 0x74, 0x05, - 0x65, 0x57, 0x10, 0xac, 0x35, 0x58, 0xf9, 0x33, 0xe6, 0x62, 0x77, 0x62, 0xbb, 0x34, 0x1a, 0x92, - 0x51, 0xc2, 0x5c, 0x2d, 0x14, 0xd6, 0xff, 0x0d, 0x58, 0xbd, 0x42, 0x20, 0x7d, 0x70, 0x1f, 0x9a, - 0xa1, 0x1b, 0x73, 0xcc, 0xb2, 0x2a, 0xc9, 0x8e, 0xd3, 0x50, 0x54, 0x6e, 0x82, 0xa2, 0x5a, 0x82, - 0x62, 0x09, 0x1a, 0xa1, 0x7b, 0xe9, 0x84, 0x27, 0xe9, 0x72, 0x54, 0x0f, 0xdd, 0xcb, 0x57, 0x27, - 0xb2, 0xb3, 0x11, 0xe6, 0x9c, 0x24, 0xde, 0x19, 0xe6, 0x71, 0xde, 0xd9, 0x08, 0x7b, 0xa9, 0x28, - 0x3b, 0xff, 0x6b, 0x41, 0xe7, 0x18, 0xbb, 0x17, 0x18, 0xfb, 0xd2, 0x73, 0x34, 0xca, 0x2a, 0xa6, - 0xf8, 0x1b, 0x14, 0x3d, 0x98, 0x2e, 0x8d, 0x99, 0x3f, 0x7a, 0x07, 0x0f, 0x6f, 0x12, 0x4b, 0x93, - 0x6f, 0x0e, 0xbd, 0x86, 0xb6, 0xf6, 0x23, 0x0f, 0xad, 0x68, 0x8a, 0xa5, 0xdf, 0xae, 0x83, 0xd5, - 0x2b, 0xb8, 0x99, 0xb5, 0x6d, 0x03, 0x1d, 0x41, 0x5b, 0xdb, 0x4d, 0x74, 0x7b, 0xe5, 0x25, 0x49, - 0xb7, 0x37, 0x63, 0xa1, 0xb1, 0xe6, 0x84, 0x35, 0x6d, 0xc3, 0xd0, 0xad, 0x95, 0x77, 0x1a, 0xdd, - 0xda, 0xac, 0xb5, 0x44, 0x5a, 0xd3, 0x06, 0xba, 0x6e, 0xad, 0xbc, 0xae, 0xe8, 0xd6, 0x66, 0x6c, - 0x01, 0xd6, 0x1c, 0xfa, 0x00, 0xbd, 0x63, 0xce, 0xb0, 0x1b, 0x4e, 0xd8, 0x53, 0x08, 0xfe, 0x0c, - 0xab, 0x9b, 0xc6, 0xb6, 0x81, 0xfe, 0x01, 0x0b, 0xa5, 0x71, 0x8d, 0xac, 0x89, 0xe6, 0x55, 0x7b, - 0xc6, 0x60, 0xe3, 0x5a, 0x99, 0xdc, 0xf3, 0x37, 0xd0, 0xd1, 0xa7, 0x24, 0xd2, 0x9c, 0x9a, 0xb1, - 0x08, 0x0c, 0xd6, 0xae, 0x62, 0xeb, 0x06, 0xf5, 0x46, 0xad, 0x1b, 0x9c, 0x31, 0xaa, 0x74, 0x83, - 0xb3, 0xfa, 0xbb, 0x35, 0x87, 0xfe, 0x0e, 0xf3, 0xd3, 0x0d, 0x13, 0xdd, 0x9f, 0x86, 0xae, 0xd4, - 0x87, 0x07, 0xd6, 0x75, 0x22, 0xb9, 0xf1, 0x43, 0x80, 0x49, 0x1f, 0x44, 0xcb, 0x13, 0x9d, 0x52, - 0x1f, 0x1e, 0xac, 0xcc, 0x66, 0xe6, 0xa6, 0xfe, 0x09, 0x4b, 0x33, 0x9b, 0x0d, 0xd2, 0x0a, 0xf0, - 0xba, 0x76, 0x35, 0xf8, 0xcd, 0x8d, 0x72, 0xd9, 0x5d, 0x2f, 0xd7, 0x60, 0x3e, 0x56, 0x2d, 0x62, - 0x18, 0x6f, 0x79, 0x01, 0xc1, 0x11, 0x7f, 0x09, 0x52, 0xe3, 0x2d, 0xa3, 0x9c, 0x9e, 0x34, 0xe4, - 0x1f, 0x63, 0xbf, 0xff, 0x29, 0x00, 0x00, 0xff, 0xff, 0x83, 0xa2, 0xc7, 0xb2, 0x27, 0x13, 0x00, + // 1713 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcb, 0x6e, 0xdb, 0xca, + 0x19, 0x36, 0x75, 0xe7, 0x2f, 0x29, 0xb1, 0x47, 0x76, 0xa2, 0xc8, 0x97, 0x3a, 0x74, 0x93, 0xba, + 0x48, 0xe0, 0x1a, 0x6e, 0x16, 0x49, 0xd3, 0x2e, 0x12, 0x5f, 0x0a, 0xa3, 0xce, 0xa5, 0x74, 0x52, + 0xa4, 0x28, 0x50, 0x82, 0x26, 0x47, 0xf2, 0xd4, 0x24, 0x47, 0x19, 0x0e, 0x6d, 0xa7, 0x8f, 0x52, + 0xa0, 0x8b, 0xbe, 0x47, 0xd1, 0x4d, 0x51, 0xa0, 0xeb, 0xf3, 0x08, 0xe7, 0x01, 0xce, 0xf2, 0xac, + 0x0f, 0x66, 0x86, 0xa4, 0x86, 0xa2, 0x6c, 0x27, 0xe7, 0x20, 0x3b, 0xce, 0x7f, 0x9b, 0x7f, 0xbe, + 0xff, 0x2a, 0x41, 0x7b, 0x48, 0x02, 0xcc, 0xb6, 0xc6, 0x8c, 0x72, 0x8a, 0x5a, 0xf2, 0xe0, 0x8c, + 0x4f, 0xac, 0x37, 0xb0, 0x7c, 0x44, 0xe9, 0x59, 0x32, 0xde, 0x23, 0x0c, 0x7b, 0x9c, 0xb2, 0x4f, + 0xfb, 0x11, 0x67, 0x9f, 0x6c, 0xfc, 0x31, 0xc1, 0x31, 0x47, 0x2b, 0x60, 0xfa, 0x19, 0xa3, 0x6f, + 0xac, 0x1b, 0x9b, 0xa6, 0x3d, 0x21, 0x20, 0x04, 0xb5, 0xc8, 0x0d, 0x71, 0xbf, 0x22, 0x19, 0xf2, + 0xdb, 0xda, 0x87, 0x95, 0xd9, 0x06, 0xe3, 0x31, 0x8d, 0x62, 0x8c, 0x1e, 0x40, 0x1d, 0x0b, 0x82, + 0xb4, 0xd6, 0xde, 0xb9, 0xbd, 0x95, 0xb9, 0xb2, 0xa5, 0xe4, 0x14, 0xd7, 0xfa, 0x8f, 0x01, 0xe8, + 0x88, 0xc4, 0x5c, 0x10, 0x09, 0x8e, 0x3f, 0xcf, 0x9f, 0x3b, 0xd0, 0x18, 0x33, 0x3c, 0x24, 0x97, + 0xa9, 0x47, 0xe9, 0x09, 0x3d, 0x86, 0x85, 0x98, 0xbb, 0x8c, 0x1f, 0x30, 0x1a, 0x1e, 0x90, 0x00, + 0xbf, 0x16, 0x4e, 0x57, 0xa5, 0x48, 0x99, 0x81, 0xb6, 0x00, 0x91, 0xc8, 0x0b, 0x92, 0x98, 0x9c, + 0xe3, 0xe3, 0x8c, 0xdb, 0xaf, 0xad, 0x1b, 0x9b, 0x2d, 0x7b, 0x06, 0x07, 0x2d, 0x42, 0x3d, 0x20, + 0x21, 0xe1, 0xfd, 0xfa, 0xba, 0xb1, 0xd9, 0xb5, 0xd5, 0xc1, 0xfa, 0x2d, 0xf4, 0x0a, 0xfe, 0x7f, + 0xd9, 0xf3, 0xff, 0x59, 0x81, 0xba, 0x24, 0xe4, 0x18, 0x1b, 0x13, 0x8c, 0xd1, 0x7d, 0xe8, 0x90, + 0xd8, 0x99, 0x00, 0x51, 0x91, 0xbe, 0xb5, 0x49, 0x9c, 0x63, 0x8e, 0x1e, 0x41, 0xc3, 0x3b, 0x4d, + 0xa2, 0xb3, 0xb8, 0x5f, 0x5d, 0xaf, 0x6e, 0xb6, 0x77, 0x7a, 0x93, 0x8b, 0xc4, 0x43, 0x77, 0x05, + 0xcf, 0x4e, 0x45, 0xd0, 0x53, 0x00, 0x97, 0x73, 0x46, 0x4e, 0x12, 0x8e, 0x63, 0xf9, 0xd2, 0xf6, + 0x4e, 0x5f, 0x53, 0x48, 0x62, 0xfc, 0x22, 0xe7, 0xdb, 0x9a, 0x2c, 0x7a, 0x06, 0x2d, 0x7c, 0xc9, + 0x71, 0xe4, 0x63, 0xbf, 0x5f, 0x97, 0x17, 0xad, 0x4e, 0xbd, 0x68, 0x6b, 0x3f, 0xe5, 0xab, 0xf7, + 0xe5, 0xe2, 0x83, 0xe7, 0xd0, 0x2d, 0xb0, 0xd0, 0x3c, 0x54, 0xcf, 0x70, 0x16, 0x55, 0xf1, 0x29, + 0x90, 0x3d, 0x77, 0x83, 0x44, 0x25, 0x58, 0xc7, 0x56, 0x87, 0xdf, 0x54, 0x9e, 0x1a, 0xd6, 0x1e, + 0x98, 0x07, 0x49, 0x10, 0xe4, 0x8a, 0x3e, 0x61, 0x99, 0xa2, 0x4f, 0xd8, 0x04, 0xe5, 0xca, 0xb5, + 0x28, 0xff, 0xdb, 0x80, 0x85, 0xfd, 0x73, 0x1c, 0xf1, 0xd7, 0x94, 0x93, 0x21, 0xf1, 0x5c, 0x4e, + 0x68, 0x84, 0x1e, 0x83, 0x49, 0x03, 0xdf, 0xb9, 0x36, 0x4c, 0x2d, 0x1a, 0xa4, 0x5e, 0x3f, 0x06, + 0x33, 0xc2, 0x17, 0xce, 0xb5, 0xd7, 0xb5, 0x22, 0x7c, 0xa1, 0xa4, 0x37, 0xa0, 0xeb, 0xe3, 0x00, + 0x73, 0xec, 0xe4, 0xd1, 0x11, 0xa1, 0xeb, 0x28, 0xe2, 0xae, 0x0a, 0xc7, 0x43, 0xb8, 0x2d, 0x4c, + 0x8e, 0x5d, 0x86, 0x23, 0xee, 0x8c, 0x5d, 0x7e, 0x2a, 0x63, 0x62, 0xda, 0xdd, 0x08, 0x5f, 0xbc, + 0x95, 0xd4, 0xb7, 0x2e, 0x3f, 0xb5, 0xbe, 0x37, 0xc0, 0xcc, 0x83, 0x89, 0xee, 0x42, 0x53, 0x5c, + 0xeb, 0x10, 0x3f, 0x45, 0xa2, 0x21, 0x8e, 0x87, 0xbe, 0xa8, 0x0a, 0x3a, 0x1c, 0xc6, 0x98, 0x4b, + 0xf7, 0xaa, 0x76, 0x7a, 0x12, 0x99, 0x15, 0x93, 0xbf, 0xab, 0x42, 0xa8, 0xd9, 0xf2, 0x5b, 0x20, + 0x1e, 0x72, 0x12, 0x62, 0x79, 0x61, 0xd5, 0x56, 0x07, 0xd4, 0x83, 0x3a, 0x76, 0xb8, 0x3b, 0x92, + 0x19, 0x6e, 0xda, 0x35, 0xfc, 0xce, 0x1d, 0xa1, 0x9f, 0xc3, 0xad, 0x98, 0x26, 0xcc, 0xc3, 0x4e, + 0x76, 0x6d, 0x43, 0x72, 0x3b, 0x8a, 0x7a, 0xa0, 0x2e, 0xb7, 0xa0, 0x3a, 0x24, 0x7e, 0xbf, 0x29, + 0x81, 0x99, 0x2f, 0x26, 0xe1, 0xa1, 0x6f, 0x0b, 0x26, 0xfa, 0x15, 0x40, 0x6e, 0xc9, 0xef, 0xb7, + 0xae, 0x10, 0x35, 0x33, 0xbb, 0xbe, 0xf5, 0x01, 0x1a, 0xa9, 0xf9, 0x65, 0x30, 0xcf, 0x69, 0x90, + 0x84, 0xf9, 0xb3, 0xbb, 0x76, 0x4b, 0x11, 0x0e, 0x7d, 0x74, 0x0f, 0x64, 0x9f, 0x73, 0x44, 0x56, + 0x55, 0xe4, 0x23, 0x25, 0x42, 0x7f, 0xc0, 0xb2, 0x53, 0x78, 0x94, 0x9e, 0x11, 0xf5, 0xfa, 0xa6, + 0x9d, 0x9e, 0xac, 0xef, 0x2a, 0x70, 0xab, 0x98, 0xee, 0xe2, 0x0a, 0x69, 0x45, 0x62, 0x65, 0x48, + 0x33, 0xd2, 0xec, 0x71, 0x01, 0xaf, 0x8a, 0x8e, 0x57, 0xa6, 0x12, 0x52, 0x5f, 0x5d, 0xd0, 0x55, + 0x2a, 0xaf, 0xa8, 0x8f, 0x45, 0xb6, 0x26, 0xc4, 0x97, 0x00, 0x77, 0x6d, 0xf1, 0x29, 0x28, 0x23, + 0xe2, 0xa7, 0xed, 0x43, 0x7c, 0x4a, 0xf7, 0x98, 0xb4, 0xdb, 0x50, 0x21, 0x53, 0x27, 0x11, 0xb2, + 0x50, 0x50, 0x9b, 0x2a, 0x0e, 0xe2, 0x1b, 0xad, 0x43, 0x9b, 0xe1, 0x71, 0x90, 0x66, 0xaf, 0x84, + 0xcf, 0xb4, 0x75, 0x12, 0x5a, 0x03, 0xf0, 0x68, 0x10, 0x60, 0x4f, 0x0a, 0x98, 0x52, 0x40, 0xa3, + 0x88, 0xcc, 0xe1, 0x3c, 0x70, 0x62, 0xec, 0xf5, 0x61, 0xdd, 0xd8, 0xac, 0xdb, 0x0d, 0xce, 0x83, + 0x63, 0xec, 0x89, 0x77, 0x24, 0x31, 0x66, 0x8e, 0x6c, 0x40, 0x6d, 0xa9, 0xd7, 0x12, 0x04, 0xd9, + 0x26, 0x57, 0x01, 0x46, 0x8c, 0x26, 0x63, 0xc5, 0xed, 0xac, 0x57, 0x45, 0x2f, 0x96, 0x14, 0xc9, + 0x7e, 0x00, 0xb7, 0xe2, 0x4f, 0x61, 0x40, 0xa2, 0x33, 0x87, 0xbb, 0x6c, 0x84, 0x79, 0xbf, 0xab, + 0x72, 0x38, 0xa5, 0xbe, 0x93, 0x44, 0x6b, 0x0c, 0x68, 0x97, 0x61, 0x97, 0xe3, 0x2f, 0x18, 0x3b, + 0x9f, 0x57, 0xdd, 0x68, 0x09, 0x1a, 0xd4, 0xc1, 0x97, 0x5e, 0x90, 0x16, 0x59, 0x9d, 0xee, 0x5f, + 0x7a, 0x81, 0xf5, 0x08, 0x7a, 0x85, 0x1b, 0xd3, 0xc6, 0xbc, 0x08, 0x75, 0xcc, 0x18, 0xcd, 0xda, + 0x88, 0x3a, 0x58, 0x7f, 0x06, 0xf4, 0x7e, 0xec, 0x7f, 0x0d, 0xf7, 0xac, 0x25, 0xe8, 0x15, 0x4c, + 0x2b, 0x3f, 0xac, 0xff, 0x19, 0x80, 0xf6, 0x64, 0x37, 0xf8, 0x69, 0x83, 0x58, 0xd4, 0xa7, 0x18, + 0x12, 0xaa, 0xdb, 0xf8, 0x2e, 0x77, 0xd3, 0x11, 0xd6, 0x21, 0xb1, 0xb2, 0xbf, 0xe7, 0x72, 0x37, + 0x1d, 0x25, 0x0c, 0x7b, 0x09, 0x13, 0x53, 0x4d, 0x26, 0xa1, 0x1c, 0x25, 0x76, 0x46, 0x42, 0x4f, + 0xe0, 0x0e, 0x19, 0x45, 0x94, 0xe1, 0x89, 0x98, 0xa3, 0xa0, 0x6a, 0x48, 0xe1, 0x45, 0xc5, 0xcd, + 0x15, 0xf6, 0x25, 0x72, 0x8f, 0xa0, 0x57, 0x78, 0xc6, 0xb5, 0x30, 0xff, 0xc3, 0x80, 0xfe, 0x0b, + 0x4e, 0x43, 0xe2, 0xd9, 0x58, 0x38, 0x5f, 0x78, 0xfa, 0x06, 0x74, 0x45, 0x3f, 0x9e, 0x7e, 0x7e, + 0x87, 0x06, 0xfe, 0x64, 0xde, 0xdd, 0x03, 0xd1, 0x92, 0x1d, 0x0d, 0x85, 0x26, 0x0d, 0x7c, 0x99, + 0x89, 0x1b, 0x20, 0xfa, 0xa6, 0xa6, 0xaf, 0x26, 0x7f, 0x27, 0xc2, 0x17, 0x05, 0x7d, 0x21, 0x24, + 0xf5, 0x55, 0xb3, 0x6d, 0x46, 0xf8, 0x42, 0xe8, 0x5b, 0xcb, 0x70, 0x6f, 0x86, 0x6f, 0x69, 0xb8, + 0xfe, 0x6f, 0x40, 0xef, 0x45, 0x1c, 0x93, 0x51, 0xf4, 0x27, 0xd9, 0x76, 0x32, 0xa7, 0x17, 0xa1, + 0xee, 0xd1, 0x24, 0xe2, 0xd2, 0xd9, 0xba, 0xad, 0x0e, 0x53, 0x95, 0x58, 0x29, 0x55, 0xe2, 0x54, + 0x2d, 0x57, 0xcb, 0xb5, 0xac, 0xd5, 0x6a, 0xad, 0x50, 0xab, 0x3f, 0x83, 0xb6, 0x08, 0xb2, 0xe3, + 0xe1, 0x88, 0x63, 0x96, 0x76, 0x6a, 0x10, 0xa4, 0x5d, 0x49, 0x11, 0x02, 0xfa, 0x44, 0x51, 0xcd, + 0x1a, 0xc6, 0x93, 0x71, 0xf2, 0xad, 0x01, 0x8b, 0xc5, 0xa7, 0xa4, 0x31, 0xbb, 0x72, 0xb2, 0x88, + 0x56, 0xc6, 0x82, 0xf4, 0x1d, 0xe2, 0x53, 0x34, 0x85, 0x71, 0x72, 0x12, 0x10, 0xcf, 0x11, 0x0c, + 0xe5, 0xbf, 0xa9, 0x28, 0xef, 0x59, 0x30, 0x41, 0xa5, 0xa6, 0xa3, 0x82, 0xa0, 0xe6, 0x26, 0xfc, + 0x34, 0x9b, 0x2e, 0xe2, 0x7b, 0x0a, 0xa9, 0xc6, 0x4d, 0x48, 0x35, 0xcb, 0x48, 0xe5, 0x99, 0xd6, + 0xd2, 0x33, 0xed, 0x09, 0xf4, 0xd4, 0x7a, 0x5a, 0x0c, 0xd7, 0x2a, 0x40, 0x3e, 0x47, 0xe2, 0xbe, + 0xa1, 0x9a, 0x59, 0x36, 0x48, 0x62, 0xeb, 0x77, 0x60, 0x1e, 0x51, 0x65, 0x37, 0x46, 0xdb, 0x60, + 0x06, 0xd9, 0x41, 0x8a, 0xb6, 0x77, 0xd0, 0xa4, 0xc6, 0x33, 0x39, 0x7b, 0x22, 0x64, 0x3d, 0x87, + 0x56, 0x46, 0xce, 0x30, 0x33, 0xae, 0xc2, 0xac, 0x32, 0x85, 0x99, 0xf5, 0x5f, 0x03, 0x16, 0x8b, + 0x2e, 0xa7, 0x61, 0x79, 0x0f, 0xdd, 0xfc, 0x0a, 0x27, 0x74, 0xc7, 0xa9, 0x2f, 0xdb, 0xba, 0x2f, + 0x65, 0xb5, 0xdc, 0xc1, 0xf8, 0x95, 0x3b, 0x56, 0xb9, 0xdc, 0x09, 0x34, 0xd2, 0xe0, 0x1d, 0x2c, + 0x94, 0x44, 0x66, 0xec, 0x66, 0xbf, 0xd4, 0x77, 0xb3, 0xc2, 0x7e, 0x99, 0x6b, 0xeb, 0x0b, 0xdb, + 0x33, 0xb8, 0xab, 0xda, 0xc1, 0x6e, 0x1e, 0xc3, 0x0c, 0xfb, 0x62, 0xa8, 0x8d, 0xe9, 0x50, 0x5b, + 0x03, 0xe8, 0x97, 0x55, 0xd3, 0xf2, 0x1b, 0xc1, 0xc2, 0x31, 0x77, 0x39, 0x89, 0x39, 0xf1, 0xf2, + 0x1f, 0x09, 0x53, 0xb9, 0x61, 0xdc, 0x34, 0x11, 0xcb, 0x75, 0x38, 0x0f, 0x55, 0xce, 0xb3, 0xfc, + 0x15, 0x9f, 0x22, 0x0a, 0x48, 0xbf, 0x29, 0x8d, 0xc1, 0x57, 0xb8, 0x4a, 0xe4, 0x03, 0xa7, 0xdc, + 0x0d, 0xd4, 0xc6, 0x51, 0x93, 0x1b, 0x87, 0x29, 0x29, 0x72, 0xe5, 0x50, 0x43, 0xd9, 0x57, 0xdc, + 0xba, 0xda, 0x47, 0x04, 0x41, 0x32, 0x57, 0x01, 0x64, 0xa9, 0xaa, 0x2a, 0x6b, 0x28, 0x5d, 0x41, + 0xd9, 0x15, 0x04, 0x6b, 0x0d, 0x56, 0x7e, 0x8f, 0xb9, 0xd8, 0x9d, 0xd8, 0x2e, 0x8d, 0x86, 0x64, + 0x94, 0x30, 0x57, 0x0b, 0x85, 0xf5, 0x8d, 0x01, 0xab, 0x57, 0x08, 0xa4, 0x0f, 0xee, 0x43, 0x33, + 0x74, 0x63, 0x8e, 0x59, 0x56, 0x25, 0xd9, 0x71, 0x1a, 0x8a, 0xca, 0x4d, 0x50, 0x54, 0x4b, 0x50, + 0x2c, 0x41, 0x23, 0x74, 0x2f, 0x9d, 0xf0, 0x24, 0x5d, 0x8e, 0xea, 0xa1, 0x7b, 0xf9, 0xea, 0x44, + 0x76, 0x36, 0xc2, 0x9c, 0x93, 0xc4, 0x3b, 0xc3, 0x3c, 0xce, 0x3b, 0x1b, 0x61, 0x2f, 0x15, 0x45, + 0x3c, 0x5a, 0x08, 0x7c, 0x4c, 0x70, 0x82, 0xe3, 0xb4, 0x57, 0x88, 0xe1, 0xf8, 0x47, 0x49, 0xd8, + 0xf9, 0x57, 0x0b, 0x3a, 0xc7, 0xd8, 0xbd, 0xc0, 0xd8, 0x97, 0x0f, 0x43, 0xa3, 0xac, 0xa0, 0x8a, + 0x3f, 0x51, 0xd1, 0x83, 0xe9, 0xca, 0x99, 0xf9, 0x9b, 0x78, 0xf0, 0xf0, 0x26, 0xb1, 0x34, 0x37, + 0xe7, 0xd0, 0x6b, 0x68, 0x6b, 0xbf, 0x01, 0xd1, 0x8a, 0xa6, 0x58, 0xfa, 0x69, 0x3b, 0x58, 0xbd, + 0x82, 0x9b, 0x59, 0xdb, 0x36, 0xd0, 0x11, 0xb4, 0xb5, 0xd5, 0x45, 0xb7, 0x57, 0xde, 0xa1, 0x74, + 0x7b, 0x33, 0xf6, 0x1d, 0x6b, 0x4e, 0x58, 0xd3, 0x16, 0x10, 0xdd, 0x5a, 0x79, 0xe5, 0xd1, 0xad, + 0xcd, 0xda, 0x5a, 0xa4, 0x35, 0x6d, 0xde, 0xeb, 0xd6, 0xca, 0xdb, 0x8c, 0x6e, 0x6d, 0xc6, 0x92, + 0x60, 0xcd, 0xa1, 0x0f, 0xd0, 0x3b, 0xe6, 0x0c, 0xbb, 0xe1, 0x84, 0x3d, 0x85, 0xe0, 0x8f, 0xb0, + 0xba, 0x69, 0x6c, 0x1b, 0xe8, 0xaf, 0xb0, 0x50, 0x9a, 0xe6, 0xc8, 0x9a, 0x68, 0x5e, 0xb5, 0x86, + 0x0c, 0x36, 0xae, 0x95, 0xc9, 0x3d, 0x7f, 0x03, 0x1d, 0x7d, 0x88, 0x22, 0xcd, 0xa9, 0x19, 0x7b, + 0xc2, 0x60, 0xed, 0x2a, 0xb6, 0x6e, 0x50, 0xef, 0xe3, 0xba, 0xc1, 0x19, 0x93, 0x4c, 0x37, 0x38, + 0xab, 0xfd, 0x5b, 0x73, 0xe8, 0x2f, 0x30, 0x3f, 0xdd, 0x4f, 0xd1, 0xfd, 0x69, 0xe8, 0x4a, 0x6d, + 0x7a, 0x60, 0x5d, 0x27, 0x92, 0x1b, 0x3f, 0x04, 0x98, 0xb4, 0x49, 0xb4, 0x3c, 0xd1, 0x29, 0xb5, + 0xe9, 0xc1, 0xca, 0x6c, 0x66, 0x6e, 0xea, 0x6f, 0xb0, 0x34, 0xb3, 0x17, 0x21, 0xad, 0x00, 0xaf, + 0xeb, 0x66, 0x83, 0x5f, 0xdc, 0x28, 0x97, 0xdd, 0xf5, 0x72, 0x0d, 0xe6, 0x63, 0xd5, 0x22, 0x86, + 0xf1, 0x96, 0x17, 0x10, 0x1c, 0xf1, 0x97, 0x20, 0x35, 0xde, 0x32, 0xca, 0xe9, 0x49, 0x43, 0xfe, + 0x6f, 0xf6, 0xeb, 0x1f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x25, 0xa7, 0x6f, 0x46, 0x13, 0x00, 0x00, } diff --git a/weed/pb/queue.proto b/weed/pb/queue.proto new file mode 100644 index 000000000..e212991d3 --- /dev/null +++ b/weed/pb/queue.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package queue_pb; + +option java_package = "seaweedfs.client"; +option java_outer_classname = "QueueProto"; + +////////////////////////////////////////////////// + +service SeaweedQueue { + + rpc StreamWrite (stream WriteMessageRequest) returns (stream WriteMessageResponse) { + } + + rpc StreamRead (ReadMessageRequest) returns (stream ReadMessageResponse) { + } + + rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) { + } + + rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) { + } + +} + +////////////////////////////////////////////////// + + +message WriteMessageRequest { + string topic = 1; + int64 event_ns = 2; + bytes data = 3; +} + +message WriteMessageResponse { + string error = 1; + int64 ack_ns = 2; +} + +message ReadMessageRequest { + string topic = 1; + int64 start_ns = 2; +} + +message ReadMessageResponse { + string error = 1; + int64 event_ns = 2; + bytes data = 3; +} + +message ConfigureTopicRequest { + string topic = 1; + int64 ttl_seconds = 2; +} +message ConfigureTopicResponse { + string error = 1; +} + +message DeleteTopicRequest { + string topic = 1; +} +message DeleteTopicResponse { + string error = 1; +} diff --git a/weed/pb/queue_pb/queue.pb.go b/weed/pb/queue_pb/queue.pb.go new file mode 100644 index 000000000..16147a77a --- /dev/null +++ b/weed/pb/queue_pb/queue.pb.go @@ -0,0 +1,497 @@ +// Code generated by protoc-gen-go. +// source: queue.proto +// DO NOT EDIT! + +/* +Package queue_pb is a generated protocol buffer package. + +It is generated from these files: + queue.proto + +It has these top-level messages: + WriteMessageRequest + WriteMessageResponse + ReadMessageRequest + ReadMessageResponse + ConfigureTopicRequest + ConfigureTopicResponse + DeleteTopicRequest + DeleteTopicResponse +*/ +package queue_pb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type WriteMessageRequest struct { + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + EventNs int64 `protobuf:"varint,2,opt,name=event_ns,json=eventNs" json:"event_ns,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *WriteMessageRequest) Reset() { *m = WriteMessageRequest{} } +func (m *WriteMessageRequest) String() string { return proto.CompactTextString(m) } +func (*WriteMessageRequest) ProtoMessage() {} +func (*WriteMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *WriteMessageRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *WriteMessageRequest) GetEventNs() int64 { + if m != nil { + return m.EventNs + } + return 0 +} + +func (m *WriteMessageRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type WriteMessageResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + AckNs int64 `protobuf:"varint,2,opt,name=ack_ns,json=ackNs" json:"ack_ns,omitempty"` +} + +func (m *WriteMessageResponse) Reset() { *m = WriteMessageResponse{} } +func (m *WriteMessageResponse) String() string { return proto.CompactTextString(m) } +func (*WriteMessageResponse) ProtoMessage() {} +func (*WriteMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *WriteMessageResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *WriteMessageResponse) GetAckNs() int64 { + if m != nil { + return m.AckNs + } + return 0 +} + +type ReadMessageRequest struct { + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + StartNs int64 `protobuf:"varint,2,opt,name=start_ns,json=startNs" json:"start_ns,omitempty"` +} + +func (m *ReadMessageRequest) Reset() { *m = ReadMessageRequest{} } +func (m *ReadMessageRequest) String() string { return proto.CompactTextString(m) } +func (*ReadMessageRequest) ProtoMessage() {} +func (*ReadMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ReadMessageRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *ReadMessageRequest) GetStartNs() int64 { + if m != nil { + return m.StartNs + } + return 0 +} + +type ReadMessageResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + EventNs int64 `protobuf:"varint,2,opt,name=event_ns,json=eventNs" json:"event_ns,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *ReadMessageResponse) Reset() { *m = ReadMessageResponse{} } +func (m *ReadMessageResponse) String() string { return proto.CompactTextString(m) } +func (*ReadMessageResponse) ProtoMessage() {} +func (*ReadMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *ReadMessageResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *ReadMessageResponse) GetEventNs() int64 { + if m != nil { + return m.EventNs + } + return 0 +} + +func (m *ReadMessageResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type ConfigureTopicRequest struct { + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + TtlSeconds int64 `protobuf:"varint,2,opt,name=ttl_seconds,json=ttlSeconds" json:"ttl_seconds,omitempty"` +} + +func (m *ConfigureTopicRequest) Reset() { *m = ConfigureTopicRequest{} } +func (m *ConfigureTopicRequest) String() string { return proto.CompactTextString(m) } +func (*ConfigureTopicRequest) ProtoMessage() {} +func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ConfigureTopicRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +func (m *ConfigureTopicRequest) GetTtlSeconds() int64 { + if m != nil { + return m.TtlSeconds + } + return 0 +} + +type ConfigureTopicResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` +} + +func (m *ConfigureTopicResponse) Reset() { *m = ConfigureTopicResponse{} } +func (m *ConfigureTopicResponse) String() string { return proto.CompactTextString(m) } +func (*ConfigureTopicResponse) ProtoMessage() {} +func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ConfigureTopicResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type DeleteTopicRequest struct { + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` +} + +func (m *DeleteTopicRequest) Reset() { *m = DeleteTopicRequest{} } +func (m *DeleteTopicRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTopicRequest) ProtoMessage() {} +func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *DeleteTopicRequest) GetTopic() string { + if m != nil { + return m.Topic + } + return "" +} + +type DeleteTopicResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` +} + +func (m *DeleteTopicResponse) Reset() { *m = DeleteTopicResponse{} } +func (m *DeleteTopicResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteTopicResponse) ProtoMessage() {} +func (*DeleteTopicResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *DeleteTopicResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func init() { + proto.RegisterType((*WriteMessageRequest)(nil), "queue_pb.WriteMessageRequest") + proto.RegisterType((*WriteMessageResponse)(nil), "queue_pb.WriteMessageResponse") + proto.RegisterType((*ReadMessageRequest)(nil), "queue_pb.ReadMessageRequest") + proto.RegisterType((*ReadMessageResponse)(nil), "queue_pb.ReadMessageResponse") + proto.RegisterType((*ConfigureTopicRequest)(nil), "queue_pb.ConfigureTopicRequest") + proto.RegisterType((*ConfigureTopicResponse)(nil), "queue_pb.ConfigureTopicResponse") + proto.RegisterType((*DeleteTopicRequest)(nil), "queue_pb.DeleteTopicRequest") + proto.RegisterType((*DeleteTopicResponse)(nil), "queue_pb.DeleteTopicResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for SeaweedQueue service + +type SeaweedQueueClient interface { + StreamWrite(ctx context.Context, opts ...grpc.CallOption) (SeaweedQueue_StreamWriteClient, error) + StreamRead(ctx context.Context, in *ReadMessageRequest, opts ...grpc.CallOption) (SeaweedQueue_StreamReadClient, error) + ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) + DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) +} + +type seaweedQueueClient struct { + cc *grpc.ClientConn +} + +func NewSeaweedQueueClient(cc *grpc.ClientConn) SeaweedQueueClient { + return &seaweedQueueClient{cc} +} + +func (c *seaweedQueueClient) StreamWrite(ctx context.Context, opts ...grpc.CallOption) (SeaweedQueue_StreamWriteClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SeaweedQueue_serviceDesc.Streams[0], c.cc, "/queue_pb.SeaweedQueue/StreamWrite", opts...) + if err != nil { + return nil, err + } + x := &seaweedQueueStreamWriteClient{stream} + return x, nil +} + +type SeaweedQueue_StreamWriteClient interface { + Send(*WriteMessageRequest) error + Recv() (*WriteMessageResponse, error) + grpc.ClientStream +} + +type seaweedQueueStreamWriteClient struct { + grpc.ClientStream +} + +func (x *seaweedQueueStreamWriteClient) Send(m *WriteMessageRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedQueueStreamWriteClient) Recv() (*WriteMessageResponse, error) { + m := new(WriteMessageResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedQueueClient) StreamRead(ctx context.Context, in *ReadMessageRequest, opts ...grpc.CallOption) (SeaweedQueue_StreamReadClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SeaweedQueue_serviceDesc.Streams[1], c.cc, "/queue_pb.SeaweedQueue/StreamRead", opts...) + if err != nil { + return nil, err + } + x := &seaweedQueueStreamReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedQueue_StreamReadClient interface { + Recv() (*ReadMessageResponse, error) + grpc.ClientStream +} + +type seaweedQueueStreamReadClient struct { + grpc.ClientStream +} + +func (x *seaweedQueueStreamReadClient) Recv() (*ReadMessageResponse, error) { + m := new(ReadMessageResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedQueueClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) { + out := new(ConfigureTopicResponse) + err := grpc.Invoke(ctx, "/queue_pb.SeaweedQueue/ConfigureTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedQueueClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) { + out := new(DeleteTopicResponse) + err := grpc.Invoke(ctx, "/queue_pb.SeaweedQueue/DeleteTopic", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for SeaweedQueue service + +type SeaweedQueueServer interface { + StreamWrite(SeaweedQueue_StreamWriteServer) error + StreamRead(*ReadMessageRequest, SeaweedQueue_StreamReadServer) error + ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) + DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) +} + +func RegisterSeaweedQueueServer(s *grpc.Server, srv SeaweedQueueServer) { + s.RegisterService(&_SeaweedQueue_serviceDesc, srv) +} + +func _SeaweedQueue_StreamWrite_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedQueueServer).StreamWrite(&seaweedQueueStreamWriteServer{stream}) +} + +type SeaweedQueue_StreamWriteServer interface { + Send(*WriteMessageResponse) error + Recv() (*WriteMessageRequest, error) + grpc.ServerStream +} + +type seaweedQueueStreamWriteServer struct { + grpc.ServerStream +} + +func (x *seaweedQueueStreamWriteServer) Send(m *WriteMessageResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedQueueStreamWriteServer) Recv() (*WriteMessageRequest, error) { + m := new(WriteMessageRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedQueue_StreamRead_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadMessageRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SeaweedQueueServer).StreamRead(m, &seaweedQueueStreamReadServer{stream}) +} + +type SeaweedQueue_StreamReadServer interface { + Send(*ReadMessageResponse) error + grpc.ServerStream +} + +type seaweedQueueStreamReadServer struct { + grpc.ServerStream +} + +func (x *seaweedQueueStreamReadServer) Send(m *ReadMessageResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SeaweedQueue_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigureTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedQueueServer).ConfigureTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queue_pb.SeaweedQueue/ConfigureTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedQueueServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedQueue_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedQueueServer).DeleteTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/queue_pb.SeaweedQueue/DeleteTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedQueueServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SeaweedQueue_serviceDesc = grpc.ServiceDesc{ + ServiceName: "queue_pb.SeaweedQueue", + HandlerType: (*SeaweedQueueServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ConfigureTopic", + Handler: _SeaweedQueue_ConfigureTopic_Handler, + }, + { + MethodName: "DeleteTopic", + Handler: _SeaweedQueue_DeleteTopic_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamWrite", + Handler: _SeaweedQueue_StreamWrite_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "StreamRead", + Handler: _SeaweedQueue_StreamRead_Handler, + ServerStreams: true, + }, + }, + Metadata: "queue.proto", +} + +func init() { proto.RegisterFile("queue.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 380 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x93, 0xd1, 0x6e, 0xda, 0x30, + 0x14, 0x86, 0x09, 0x0c, 0xc6, 0x4e, 0xd0, 0x34, 0x19, 0x98, 0x18, 0x1a, 0x10, 0xf9, 0x2a, 0xda, + 0xa4, 0x08, 0x6d, 0x6f, 0x00, 0xed, 0x5d, 0x89, 0xda, 0xd0, 0xaa, 0x52, 0x6f, 0x90, 0x49, 0x0e, + 0x28, 0x22, 0x4d, 0x82, 0xed, 0xb4, 0x6f, 0xda, 0xe7, 0xa9, 0xe2, 0x28, 0x22, 0x29, 0x10, 0xd1, + 0xbb, 0xfc, 0xb1, 0xfd, 0x9d, 0xdf, 0xff, 0x39, 0x06, 0x7d, 0x9f, 0x60, 0x82, 0x56, 0xcc, 0x23, + 0x19, 0x91, 0xb6, 0x12, 0xab, 0x78, 0x4d, 0x9f, 0xa0, 0xfb, 0xc8, 0x7d, 0x89, 0x0b, 0x14, 0x82, + 0x6d, 0xd1, 0xc1, 0x7d, 0x82, 0x42, 0x92, 0x1e, 0x34, 0x65, 0x14, 0xfb, 0xee, 0x40, 0x33, 0x34, + 0xf3, 0x9b, 0x93, 0x09, 0xf2, 0x0b, 0xda, 0xf8, 0x82, 0xa1, 0x5c, 0x85, 0x62, 0x50, 0x37, 0x34, + 0xb3, 0xe1, 0x7c, 0x55, 0xda, 0x16, 0x84, 0xc0, 0x17, 0x8f, 0x49, 0x36, 0x68, 0x18, 0x9a, 0xd9, + 0x71, 0xd4, 0x37, 0x9d, 0x43, 0xaf, 0xcc, 0x16, 0x71, 0x14, 0x0a, 0x4c, 0xe1, 0xc8, 0x79, 0xc4, + 0x73, 0xb8, 0x12, 0xa4, 0x0f, 0x2d, 0xe6, 0xee, 0x0e, 0xe8, 0x26, 0x73, 0x77, 0xb6, 0xa0, 0xd7, + 0x40, 0x1c, 0x64, 0xde, 0xa5, 0xfe, 0x84, 0x64, 0xbc, 0xe8, 0x4f, 0x69, 0x5b, 0xa4, 0xf7, 0x2c, + 0x61, 0x2a, 0xad, 0x7c, 0xf2, 0x9e, 0x36, 0xf4, 0xe7, 0x51, 0xb8, 0xf1, 0xb7, 0x09, 0xc7, 0xfb, + 0xd4, 0x48, 0xb5, 0xcb, 0x09, 0xe8, 0x52, 0x06, 0x2b, 0x81, 0x6e, 0x14, 0x7a, 0x79, 0x01, 0x90, + 0x32, 0x58, 0x66, 0x7f, 0xa8, 0x05, 0x3f, 0x3f, 0xf2, 0xaa, 0xec, 0xd2, 0x3f, 0x40, 0xae, 0x30, + 0x40, 0x79, 0x41, 0x71, 0xfa, 0x17, 0xba, 0xa5, 0xbd, 0x55, 0xe0, 0x7f, 0x6f, 0x75, 0xe8, 0x2c, + 0x91, 0xbd, 0x22, 0x7a, 0x77, 0xe9, 0xc0, 0x10, 0x07, 0xf4, 0xa5, 0xe4, 0xc8, 0x9e, 0x55, 0x5f, + 0xc9, 0xc8, 0xca, 0xe7, 0xc8, 0x3a, 0x31, 0x44, 0xc3, 0xf1, 0xb9, 0xe5, 0xac, 0x28, 0xad, 0x99, + 0xda, 0x54, 0x23, 0x0b, 0x80, 0x8c, 0x99, 0xf6, 0x87, 0xfc, 0x3e, 0x9c, 0x39, 0x6e, 0xfb, 0x70, + 0x74, 0x66, 0x35, 0x07, 0x4e, 0x35, 0xf2, 0x00, 0xdf, 0xcb, 0xe1, 0x91, 0xc9, 0xe1, 0xd0, 0xc9, + 0x36, 0x0d, 0x8d, 0xf3, 0x1b, 0x72, 0x30, 0xb9, 0x01, 0xbd, 0x90, 0x5b, 0xd1, 0xe6, 0x71, 0xf4, + 0x45, 0x9b, 0x27, 0xc2, 0xa6, 0xb5, 0xd9, 0x18, 0x7e, 0x88, 0x2c, 0xd7, 0x8d, 0xb0, 0xdc, 0xc0, + 0xc7, 0x50, 0xce, 0x40, 0x45, 0x7c, 0x9b, 0xbe, 0xcf, 0x75, 0x4b, 0x3d, 0xd3, 0xff, 0xef, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x34, 0x84, 0x96, 0x74, 0xb5, 0x03, 0x00, 0x00, +} diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 30a5cc9de..35539acca 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -336,6 +336,7 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb. Collection: fs.option.Collection, Replication: fs.option.DefaultReplication, MaxMb: uint32(fs.option.MaxMB), - DirBuckets: fs.option.DirBucketsPath, + DirBuckets: fs.filer.DirBucketsPath, + DirQueues: fs.filer.DirQueuesPath, }, nil } diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 5fc038e17..bfb182dbe 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -46,7 +46,6 @@ type FilerOption struct { DisableHttp bool Port int recursiveDelete bool - DirBucketsPath string } type FilerServer struct { @@ -67,7 +66,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) glog.Fatal("master list is required!") } - fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.DirBucketsPath) + fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption) go fs.filer.KeepConnectedToMaster() @@ -84,7 +83,9 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") v.Set("filer.option.buckets_folder", "/buckets") - fs.option.DirBucketsPath = v.GetString("filer.option.buckets_folder") + v.Set("filer.option.queues_folder", "/queues") + fs.filer.DirBucketsPath = v.GetString("filer.option.buckets_folder") + fs.filer.DirQueuesPath = v.GetString("filer.option.queues_folder") fs.filer.LoadConfiguration(v) notification.LoadConfiguration(v, "notification.") @@ -97,7 +98,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) } - fs.filer.LoadBuckets(fs.option.DirBucketsPath) + fs.filer.LoadBuckets(fs.filer.DirBucketsPath) maybeStartMetrics(fs, option) diff --git a/weed/server/queue_server.go b/weed/server/queue_server.go new file mode 100644 index 000000000..078c76a30 --- /dev/null +++ b/weed/server/queue_server.go @@ -0,0 +1,49 @@ +package weed_server + +import ( + "context" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb/queue_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type QueueServerOption struct { + Filers []string + DefaultReplication string + MaxMB int + Port int +} + +type QueueServer struct { + option *QueueServerOption + grpcDialOption grpc.DialOption +} + +func (q *QueueServer) ConfigureTopic(context.Context, *queue_pb.ConfigureTopicRequest) (*queue_pb.ConfigureTopicResponse, error) { + panic("implement me") +} + +func (q *QueueServer) DeleteTopic(context.Context, *queue_pb.DeleteTopicRequest) (*queue_pb.DeleteTopicResponse, error) { + panic("implement me") +} + +func (q *QueueServer) StreamWrite(queue_pb.SeaweedQueue_StreamWriteServer) error { + panic("implement me") +} + +func (q *QueueServer) StreamRead(*queue_pb.ReadMessageRequest, queue_pb.SeaweedQueue_StreamReadServer) error { + panic("implement me") +} + +func NewQueueServer(option *QueueServerOption) (qs *QueueServer, err error) { + + qs = &QueueServer{ + option: option, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.queue"), + } + + return qs, nil +} From 757c7d67adaa58ca710bf39cfa87fd4fc23c756b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 27 Feb 2020 00:07:25 -0800 Subject: [PATCH 0171/2432] avoid nil DataBackend --- weed/storage/volume_vacuum.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index db9765cff..34e3db66b 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -91,8 +91,10 @@ func (v *Volume) CommitCompact() error { glog.V(3).Infof("Got volume %d committing lock...", v.Id) v.nm.Close() - if err := v.DataBackend.Close(); err != nil { - glog.V(0).Infof("fail to close volume %d", v.Id) + if v.DataBackend != nil { + if err := v.DataBackend.Close(); err != nil { + glog.V(0).Infof("fail to close volume %d", v.Id) + } } v.DataBackend = nil stats.VolumeServerVolumeCounter.WithLabelValues(v.Collection, "volume").Dec() From f87f2045b34ed2ff593cfb6dba258463e560e3cb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 27 Feb 2020 00:59:35 -0800 Subject: [PATCH 0172/2432] delete old file first on windows fix https://github.com/chrislusf/seaweedfs/issues/1210 --- weed/storage/volume_vacuum.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 34e3db66b..185484477 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -3,6 +3,7 @@ package storage import ( "fmt" "os" + "runtime" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -111,6 +112,10 @@ func (v *Volume) CommitCompact() error { return e } } else { + if runtime.GOOS == "windows" { + os.RemoveAll(v.FileName() + ".dat") + os.RemoveAll(v.FileName() + ".idx") + } var e error if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil { return fmt.Errorf("rename %s: %v", v.FileName()+".cpd", e) From 1df13d0d6d0fbc6e7aae3e580fbb08092d8b35f0 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 27 Feb 2020 13:15:21 -0800 Subject: [PATCH 0173/2432] adjust instructions --- weed/command/scaffold.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 156aa8482..fc7f8636d 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -387,6 +387,11 @@ sequencer_etcd_urls = "http://127.0.0.1:2379" bucket = "your_bucket_name" # an existing bucket # create this number of logical volumes if no more writable volumes +# count_x means how many copies of data. +# e.g.: +# 000 has only one copy, count_1 +# 010 and 001 has two copies, count_2 +# 011 has only 3 copies, count_3 [master.volume_growth] count_1 = 7 # create 1 x 7 = 7 actual volumes count_2 = 6 # create 2 x 6 = 12 actual volumes From 151114ff59295d06ad5573b88ba43570364c55d4 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 1 Mar 2020 17:57:37 -0800 Subject: [PATCH 0174/2432] volume: fix readonly status reporting --- weed/storage/volume.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/storage/volume.go b/weed/storage/volume.go index acede66bf..88a5db4c5 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -215,7 +215,7 @@ func (v *Volume) ToVolumeInformationMessage() *master_pb.VolumeInformationMessag FileCount: v.FileCount(), DeleteCount: v.DeletedCount(), DeletedByteCount: v.DeletedSize(), - ReadOnly: v.noWriteOrDelete, + ReadOnly: v.noWriteOrDelete || v.noWriteCanDelete, ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), Version: uint32(v.Version()), Ttl: v.Ttl.ToUint32(), From 4532640ffd790ef71705b120803615b64713ee43 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 1 Mar 2020 17:59:12 -0800 Subject: [PATCH 0175/2432] 1.58 --- weed/util/constants.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/constants.go b/weed/util/constants.go index 01227194f..11b6e0382 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 57) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 58) ) From 4dec5e0e4ae9da6d7749128a81d7010f7a3cd801 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 1 Mar 2020 18:23:07 -0800 Subject: [PATCH 0176/2432] fix dockerhub build --- docker/Dockerfile.go_build | 12 +++++++++--- docker/Dockerfile.local | 29 +++++++++++++++++++++++++++++ docker/Makefile | 2 +- 3 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 docker/Dockerfile.local diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index b4a7b6504..306ce3aa1 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -1,9 +1,15 @@ +FROM frolvlad/alpine-glibc as builder +RUN apk add git go g++ +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install + FROM alpine AS final LABEL author="Chris Lu" -COPY ./weed /usr/bin/ +COPY --from=builder /root/go/bin/weed /usr/bin/ RUN mkdir -p /etc/seaweedfs -COPY ./filer.toml /etc/seaweedfs/filer.toml -COPY ./entrypoint.sh /entrypoint.sh +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh # volume server gprc port EXPOSE 18080 diff --git a/docker/Dockerfile.local b/docker/Dockerfile.local new file mode 100644 index 000000000..b4a7b6504 --- /dev/null +++ b/docker/Dockerfile.local @@ -0,0 +1,29 @@ +FROM alpine AS final +LABEL author="Chris Lu" +COPY ./weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY ./filer.toml /etc/seaweedfs/filer.toml +COPY ./entrypoint.sh /entrypoint.sh + +# volume server gprc port +EXPOSE 18080 +# volume server http port +EXPOSE 8080 +# filer server gprc port +EXPOSE 18888 +# filer server http port +EXPOSE 8888 +# master server shared gprc port +EXPOSE 19333 +# master server shared http port +EXPOSE 9333 +# s3 server http port +EXPOSE 8333 + +RUN mkdir -p /data/filerldb2 + +VOLUME /data + +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Makefile b/docker/Makefile index 5a40d36d2..166188bc3 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -6,7 +6,7 @@ gen: dev build: cd ../weed; GOOS=linux go build; mv weed ../docker/ - docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.go_build . + docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local . rm ./weed dev: build From 0ca68a2a6d87786dbebe87fbd4b786b3318c1dcb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 1 Mar 2020 22:13:15 -0800 Subject: [PATCH 0177/2432] WIP --- weed/pb/queue.proto | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/weed/pb/queue.proto b/weed/pb/queue.proto index e212991d3..39b6ee05a 100644 --- a/weed/pb/queue.proto +++ b/weed/pb/queue.proto @@ -29,7 +29,8 @@ service SeaweedQueue { message WriteMessageRequest { string topic = 1; int64 event_ns = 2; - bytes data = 3; + bytes partition_key = 3; + bytes data = 4; } message WriteMessageResponse { @@ -51,6 +52,7 @@ message ReadMessageResponse { message ConfigureTopicRequest { string topic = 1; int64 ttl_seconds = 2; + int32 partition_count = 3; } message ConfigureTopicResponse { string error = 1; From 6a8484b4ae2615c2cc88e3a66d03aead3966ba7c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 1 Mar 2020 22:13:47 -0800 Subject: [PATCH 0178/2432] master able to list all master clients by type --- weed/command/benchmark.go | 2 +- weed/command/filer.go | 2 +- weed/filer2/filer.go | 4 +- weed/filer2/leveldb/leveldb_store_test.go | 4 +- weed/filer2/leveldb2/leveldb2_store_test.go | 4 +- weed/pb/master.proto | 10 + weed/pb/master_pb/master.pb.go | 350 ++++++++++++-------- weed/pb/queue_pb/queue.pb.go | 79 +++-- weed/server/filer_server.go | 4 +- weed/server/master_grpc_server.go | 84 +++-- weed/server/master_server.go | 2 +- weed/shell/commands.go | 3 +- weed/stats/metrics.go | 2 +- weed/wdclient/masterclient.go | 9 +- 14 files changed, 353 insertions(+), 206 deletions(-) diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index c83db02b4..e85ab1b9b 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -127,7 +127,7 @@ func runBenchmark(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", strings.Split(*b.masters, ",")) + b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", 0, strings.Split(*b.masters, ",")) go b.masterClient.KeepConnectedToMaster() b.masterClient.WaitUntilConnected() diff --git a/weed/command/filer.go b/weed/command/filer.go index ea8392fac..31e65acea 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -109,7 +109,7 @@ func (fo *FilerOptions) startFiler() { DataCenter: *fo.dataCenter, DefaultLevelDbDir: defaultLevelDbDirectory, DisableHttp: *fo.disableHttp, - Port: *fo.port, + Port: uint32(*fo.port), }) if nfs_err != nil { glog.Fatalf("Filer startup error: %v", nfs_err) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index e70e013ae..bf43c3c52 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -35,10 +35,10 @@ type Filer struct { buckets *FilerBuckets } -func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer { +func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerGrpcPort uint32) *Filer { f := &Filer{ directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), - MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", masters), + MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerGrpcPort, masters), fileIdDeletionQueue: util.NewUnboundedQueue(), GrpcDialOption: grpcDialOption, } diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 983e1cbe9..497158420 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -9,7 +9,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, 0) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDBStore{} @@ -64,7 +64,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, 0) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDBStore{} diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go index 58637b7b6..dc94f2ac7 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -9,7 +9,7 @@ import ( ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, 0) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDB2Store{} @@ -64,7 +64,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + filer := filer2.NewFiler(nil, nil, 0) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDB2Store{} diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 9b1e884c7..4310b2602 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -25,6 +25,8 @@ service Seaweed { } rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) { } + rpc ListMasterClients (ListMasterClientsRequest) returns (ListMasterClientsResponse) { + } } ////////////////////////////////////////////////// @@ -112,6 +114,7 @@ message SuperBlockExtra { message KeepConnectedRequest { string name = 1; + uint32 grpc_port = 2; } message VolumeLocation { @@ -264,3 +267,10 @@ message GetMasterConfigurationResponse { string metrics_address = 1; uint32 metrics_interval_seconds = 2; } + +message ListMasterClientsRequest { + string client_type = 1; +} +message ListMasterClientsResponse { + repeated string grpc_addresses = 1; +} diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index ea4362c92..c33e2b768 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -42,6 +42,8 @@ It has these top-level messages: LookupEcVolumeResponse GetMasterConfigurationRequest GetMasterConfigurationResponse + ListMasterClientsRequest + ListMasterClientsResponse */ package master_pb @@ -543,7 +545,8 @@ func (m *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 { } type KeepConnectedRequest struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort" json:"grpc_port,omitempty"` } func (m *KeepConnectedRequest) Reset() { *m = KeepConnectedRequest{} } @@ -558,6 +561,13 @@ func (m *KeepConnectedRequest) GetName() string { return "" } +func (m *KeepConnectedRequest) GetGrpcPort() uint32 { + if m != nil { + return m.GrpcPort + } + return 0 +} + type VolumeLocation struct { Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` @@ -1431,6 +1441,38 @@ func (m *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 { return 0 } +type ListMasterClientsRequest struct { + ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType" json:"client_type,omitempty"` +} + +func (m *ListMasterClientsRequest) Reset() { *m = ListMasterClientsRequest{} } +func (m *ListMasterClientsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMasterClientsRequest) ProtoMessage() {} +func (*ListMasterClientsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *ListMasterClientsRequest) GetClientType() string { + if m != nil { + return m.ClientType + } + return "" +} + +type ListMasterClientsResponse struct { + GrpcAddresses []string `protobuf:"bytes,1,rep,name=grpc_addresses,json=grpcAddresses" json:"grpc_addresses,omitempty"` +} + +func (m *ListMasterClientsResponse) Reset() { *m = ListMasterClientsResponse{} } +func (m *ListMasterClientsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMasterClientsResponse) ProtoMessage() {} +func (*ListMasterClientsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *ListMasterClientsResponse) GetGrpcAddresses() []string { + if m != nil { + return m.GrpcAddresses + } + return nil +} + func init() { proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat") proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse") @@ -1468,6 +1510,8 @@ func init() { proto.RegisterType((*LookupEcVolumeResponse_EcShardIdLocation)(nil), "master_pb.LookupEcVolumeResponse.EcShardIdLocation") proto.RegisterType((*GetMasterConfigurationRequest)(nil), "master_pb.GetMasterConfigurationRequest") proto.RegisterType((*GetMasterConfigurationResponse)(nil), "master_pb.GetMasterConfigurationResponse") + proto.RegisterType((*ListMasterClientsRequest)(nil), "master_pb.ListMasterClientsRequest") + proto.RegisterType((*ListMasterClientsResponse)(nil), "master_pb.ListMasterClientsResponse") } // Reference imports to suppress errors if they are not otherwise used. @@ -1491,6 +1535,7 @@ type SeaweedClient interface { VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) + ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) } type seaweedClient struct { @@ -1635,6 +1680,15 @@ func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMaste return out, nil } +func (c *seaweedClient) ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) { + out := new(ListMasterClientsResponse) + err := grpc.Invoke(ctx, "/master_pb.Seaweed/ListMasterClients", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Seaweed service type SeaweedServer interface { @@ -1648,6 +1702,7 @@ type SeaweedServer interface { VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) + ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error) } func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) { @@ -1850,6 +1905,24 @@ func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _Seaweed_ListMasterClients_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMasterClientsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).ListMasterClients(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/ListMasterClients", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).ListMasterClients(ctx, req.(*ListMasterClientsRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Seaweed_serviceDesc = grpc.ServiceDesc{ ServiceName: "master_pb.Seaweed", HandlerType: (*SeaweedServer)(nil), @@ -1886,6 +1959,10 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ MethodName: "GetMasterConfiguration", Handler: _Seaweed_GetMasterConfiguration_Handler, }, + { + MethodName: "ListMasterClients", + Handler: _Seaweed_ListMasterClients_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1907,137 +1984,142 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("master.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 2102 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0x4b, 0x6f, 0x1c, 0xc7, - 0x11, 0xd6, 0xec, 0x2e, 0x97, 0xbb, 0xb5, 0xef, 0x26, 0x45, 0xaf, 0xd6, 0x96, 0xb5, 0x1a, 0x07, - 0x30, 0xa5, 0x38, 0x8c, 0x43, 0x1b, 0x88, 0x91, 0xc4, 0x30, 0x24, 0x8a, 0x76, 0x08, 0x89, 0xb4, - 0x34, 0x54, 0x64, 0x20, 0x40, 0x30, 0xe9, 0x9d, 0x69, 0x92, 0x03, 0xce, 0x2b, 0xd3, 0xbd, 0x14, - 0xd7, 0xb9, 0x04, 0xc8, 0x31, 0xa7, 0x20, 0x87, 0xfc, 0x85, 0x5c, 0x72, 0x4a, 0xce, 0xbe, 0xe4, - 0x47, 0xe4, 0x7f, 0xe4, 0xea, 0x4b, 0xd0, 0xaf, 0x99, 0x9e, 0x7d, 0x90, 0xa6, 0x01, 0x1f, 0x74, - 0x9b, 0xae, 0xaa, 0xae, 0xae, 0xfe, 0xaa, 0xbb, 0xea, 0xeb, 0x5d, 0x68, 0x47, 0x98, 0x32, 0x92, - 0xed, 0xa4, 0x59, 0xc2, 0x12, 0xd4, 0x94, 0x23, 0x37, 0x9d, 0xd8, 0x7f, 0xa9, 0x43, 0xf3, 0xd7, - 0x04, 0x67, 0x6c, 0x42, 0x30, 0x43, 0x5d, 0xa8, 0x04, 0xe9, 0xd0, 0x1a, 0x5b, 0xdb, 0x4d, 0xa7, - 0x12, 0xa4, 0x08, 0x41, 0x2d, 0x4d, 0x32, 0x36, 0xac, 0x8c, 0xad, 0xed, 0x8e, 0x23, 0xbe, 0xd1, - 0x5d, 0x80, 0x74, 0x3a, 0x09, 0x03, 0xcf, 0x9d, 0x66, 0xe1, 0xb0, 0x2a, 0x6c, 0x9b, 0x52, 0xf2, - 0x9b, 0x2c, 0x44, 0xdb, 0xd0, 0x8f, 0xf0, 0xa5, 0x7b, 0x91, 0x84, 0xd3, 0x88, 0xb8, 0x5e, 0x32, - 0x8d, 0xd9, 0xb0, 0x26, 0xa6, 0x77, 0x23, 0x7c, 0xf9, 0x4a, 0x88, 0xf7, 0xb8, 0x14, 0x8d, 0x79, - 0x54, 0x97, 0xee, 0x49, 0x10, 0x12, 0xf7, 0x9c, 0xcc, 0x86, 0x6b, 0x63, 0x6b, 0xbb, 0xe6, 0x40, - 0x84, 0x2f, 0x3f, 0x0f, 0x42, 0xf2, 0x94, 0xcc, 0xd0, 0x3d, 0x68, 0xf9, 0x98, 0x61, 0xd7, 0x23, - 0x31, 0x23, 0xd9, 0xb0, 0x2e, 0xd6, 0x02, 0x2e, 0xda, 0x13, 0x12, 0x1e, 0x5f, 0x86, 0xbd, 0xf3, - 0xe1, 0xba, 0xd0, 0x88, 0x6f, 0x1e, 0x1f, 0xf6, 0xa3, 0x20, 0x76, 0x45, 0xe4, 0x0d, 0xb1, 0x74, - 0x53, 0x48, 0x9e, 0xf3, 0xf0, 0x3f, 0x85, 0x75, 0x19, 0x1b, 0x1d, 0x36, 0xc7, 0xd5, 0xed, 0xd6, - 0xee, 0x7b, 0x3b, 0x39, 0x1a, 0x3b, 0x32, 0xbc, 0x83, 0xf8, 0x24, 0xc9, 0x22, 0xcc, 0x82, 0x24, - 0x3e, 0x24, 0x94, 0xe2, 0x53, 0xe2, 0xe8, 0x39, 0xe8, 0x00, 0x5a, 0x31, 0x79, 0xed, 0x6a, 0x17, - 0x20, 0x5c, 0x6c, 0x2f, 0xb8, 0x38, 0x3e, 0x4b, 0x32, 0xb6, 0xc4, 0x0f, 0xc4, 0xe4, 0xf5, 0x2b, - 0xe5, 0xea, 0x05, 0xf4, 0x7c, 0x12, 0x12, 0x46, 0xfc, 0xdc, 0x5d, 0xeb, 0x86, 0xee, 0xba, 0xca, - 0x81, 0x76, 0xf9, 0x23, 0xe8, 0x9e, 0x61, 0xea, 0xc6, 0x49, 0xee, 0xb1, 0x3d, 0xb6, 0xb6, 0x1b, - 0x4e, 0xfb, 0x0c, 0xd3, 0xa3, 0x44, 0x5b, 0x7d, 0x01, 0x4d, 0xe2, 0xb9, 0xf4, 0x0c, 0x67, 0x3e, - 0x1d, 0xf6, 0xc5, 0x92, 0x0f, 0x17, 0x96, 0xdc, 0xf7, 0x8e, 0xb9, 0xc1, 0x92, 0x45, 0x1b, 0x44, - 0xaa, 0x28, 0x3a, 0x82, 0x0e, 0x07, 0xa3, 0x70, 0x36, 0xb8, 0xb1, 0x33, 0x8e, 0xe6, 0xbe, 0xf6, - 0xf7, 0x0a, 0x06, 0x1a, 0x91, 0xc2, 0x27, 0xba, 0xb1, 0x4f, 0x0d, 0x6b, 0xee, 0xf7, 0x7d, 0xe8, - 0x2b, 0x58, 0x0a, 0xb7, 0x1b, 0x02, 0x98, 0x8e, 0x00, 0x46, 0x1b, 0xda, 0x7f, 0xaa, 0xc0, 0x20, - 0xbf, 0x0d, 0x0e, 0xa1, 0x69, 0x12, 0x53, 0x82, 0x1e, 0xc2, 0x40, 0x1d, 0x67, 0x1a, 0x7c, 0x4d, - 0xdc, 0x30, 0x88, 0x02, 0x26, 0x2e, 0x49, 0xcd, 0xe9, 0x49, 0xc5, 0x71, 0xf0, 0x35, 0x79, 0xc6, - 0xc5, 0x68, 0x0b, 0xea, 0x21, 0xc1, 0x3e, 0xc9, 0xc4, 0x9d, 0x69, 0x3a, 0x6a, 0x84, 0xde, 0x87, - 0x5e, 0x44, 0x58, 0x16, 0x78, 0xd4, 0xc5, 0xbe, 0x9f, 0x11, 0x4a, 0xd5, 0xd5, 0xe9, 0x2a, 0xf1, - 0x23, 0x29, 0x45, 0x9f, 0xc0, 0x50, 0x1b, 0x06, 0xfc, 0x8c, 0x5f, 0xe0, 0xd0, 0xa5, 0xc4, 0x4b, - 0x62, 0x9f, 0xaa, 0x7b, 0xb4, 0xa5, 0xf4, 0x07, 0x4a, 0x7d, 0x2c, 0xb5, 0xe8, 0x09, 0xf4, 0x29, - 0x4b, 0x32, 0x7c, 0x4a, 0xdc, 0x09, 0xf6, 0xce, 0x09, 0x9f, 0xb1, 0x26, 0xc0, 0xbb, 0x63, 0x80, - 0x77, 0x2c, 0x4d, 0x1e, 0x4b, 0x0b, 0xa7, 0x47, 0x4b, 0x63, 0x6a, 0x7f, 0x5b, 0x85, 0xe1, 0xaa, - 0x6b, 0x20, 0xea, 0x83, 0x2f, 0xb6, 0xde, 0x71, 0x2a, 0x81, 0xcf, 0xef, 0x1f, 0x87, 0x44, 0xec, - 0xb5, 0xe6, 0x88, 0x6f, 0xf4, 0x2e, 0x80, 0x97, 0x84, 0x21, 0xf1, 0xf8, 0x44, 0xb5, 0x49, 0x43, - 0xc2, 0xef, 0xa7, 0xb8, 0xf2, 0x45, 0x69, 0xa8, 0x39, 0x4d, 0x2e, 0x91, 0x55, 0xe1, 0x3e, 0xb4, - 0x65, 0xfa, 0x94, 0x81, 0xac, 0x0a, 0x2d, 0x29, 0x93, 0x26, 0x1f, 0x00, 0xd2, 0xc7, 0x64, 0x32, - 0xcb, 0x0d, 0xeb, 0xc2, 0xb0, 0xaf, 0x34, 0x8f, 0x67, 0xda, 0xfa, 0x6d, 0x68, 0x66, 0x04, 0xfb, - 0x6e, 0x12, 0x87, 0x33, 0x51, 0x28, 0x1a, 0x4e, 0x83, 0x0b, 0xbe, 0x8c, 0xc3, 0x19, 0xfa, 0x31, - 0x0c, 0x32, 0x92, 0x86, 0x81, 0x87, 0xdd, 0x34, 0xc4, 0x1e, 0x89, 0x48, 0xac, 0x6b, 0x46, 0x5f, - 0x29, 0x9e, 0x6b, 0x39, 0x1a, 0xc2, 0xfa, 0x05, 0xc9, 0x28, 0xdf, 0x56, 0x53, 0x98, 0xe8, 0x21, - 0xea, 0x43, 0x95, 0xb1, 0x70, 0x08, 0x42, 0xca, 0x3f, 0xd1, 0x03, 0xe8, 0x7b, 0x49, 0x94, 0x62, - 0x8f, 0xb9, 0x19, 0xb9, 0x08, 0xc4, 0xa4, 0x96, 0x50, 0xf7, 0x94, 0xdc, 0x51, 0x62, 0xbe, 0x9d, - 0x28, 0xf1, 0x83, 0x93, 0x80, 0xf8, 0x2e, 0x66, 0x2a, 0xd9, 0xe2, 0xe2, 0x56, 0x9d, 0xbe, 0xd6, - 0x3c, 0x62, 0x32, 0xcd, 0x68, 0x07, 0x36, 0x32, 0x12, 0x25, 0x8c, 0xb8, 0x3a, 0xd9, 0x31, 0x8e, - 0xc8, 0xb0, 0x23, 0x70, 0x1e, 0x48, 0x95, 0xca, 0xf1, 0x11, 0x8e, 0x08, 0xf7, 0x3e, 0x67, 0xcf, - 0x6b, 0x6d, 0x57, 0x98, 0xf7, 0x4b, 0xe6, 0x4f, 0xc9, 0xcc, 0xfe, 0x87, 0x05, 0x77, 0xaf, 0x2c, - 0x39, 0x0b, 0x47, 0xe0, 0xba, 0x74, 0xff, 0x50, 0x08, 0xdb, 0x53, 0xb8, 0x77, 0x4d, 0x21, 0xb8, - 0x26, 0xd6, 0xca, 0x42, 0xac, 0x36, 0x74, 0x88, 0xe7, 0x06, 0xb1, 0x4f, 0x2e, 0xdd, 0x49, 0xc0, - 0xe4, 0x15, 0xed, 0x38, 0x2d, 0xe2, 0x1d, 0x70, 0xd9, 0xe3, 0x80, 0x51, 0xfb, 0x1b, 0x0b, 0xba, - 0xe5, 0x3b, 0xc4, 0x6f, 0x01, 0x9b, 0xa5, 0x44, 0xf5, 0x4d, 0xf1, 0xad, 0x96, 0xae, 0xa8, 0x4e, - 0xea, 0xa3, 0x03, 0x80, 0x34, 0x4b, 0x52, 0x92, 0xb1, 0x80, 0x70, 0xbf, 0xfc, 0x5a, 0x3e, 0x58, - 0x79, 0x2d, 0x77, 0x9e, 0xe7, 0xb6, 0xfb, 0x31, 0xcb, 0x66, 0x8e, 0x31, 0x79, 0xf4, 0x29, 0xf4, - 0xe6, 0xd4, 0x1c, 0x1d, 0x9e, 0x55, 0x19, 0x00, 0xff, 0x44, 0x9b, 0xb0, 0x76, 0x81, 0xc3, 0x29, - 0x51, 0x21, 0xc8, 0xc1, 0x2f, 0x2a, 0x9f, 0x58, 0xf6, 0x3a, 0xac, 0xed, 0x47, 0x29, 0x9b, 0xf1, - 0x9d, 0xf4, 0x8e, 0xa7, 0x29, 0xc9, 0x1e, 0x87, 0x89, 0x77, 0xbe, 0x7f, 0xc9, 0x32, 0x8c, 0xbe, - 0x84, 0x2e, 0xc9, 0x30, 0x9d, 0x66, 0xfc, 0x56, 0xf9, 0x41, 0x7c, 0x2a, 0x7c, 0x96, 0x5b, 0xd2, - 0xdc, 0x9c, 0x9d, 0x7d, 0x39, 0x61, 0x4f, 0xd8, 0x3b, 0x1d, 0x62, 0x0e, 0x47, 0xbf, 0x85, 0x4e, - 0x49, 0xcf, 0xc1, 0xe2, 0x0d, 0x5c, 0x65, 0x45, 0x7c, 0xf3, 0xa2, 0x99, 0xe2, 0x2c, 0x60, 0x33, - 0x45, 0x34, 0xd4, 0x88, 0x97, 0x0a, 0x55, 0x78, 0x03, 0x5f, 0x82, 0xd6, 0x71, 0x9a, 0x52, 0x72, - 0xe0, 0x53, 0xfb, 0x21, 0x6c, 0x3e, 0x25, 0x24, 0xdd, 0x4b, 0xe2, 0x98, 0x78, 0x8c, 0xf8, 0x0e, - 0xf9, 0xc3, 0x94, 0x50, 0xc6, 0x97, 0x10, 0x77, 0x42, 0xe5, 0x83, 0x7f, 0xdb, 0x7f, 0xb7, 0xa0, - 0x2b, 0x8f, 0xcb, 0xb3, 0xc4, 0x13, 0x87, 0x84, 0x83, 0xc6, 0x19, 0x8c, 0x02, 0x6d, 0x9a, 0x85, - 0x73, 0xd4, 0xa6, 0x32, 0x4f, 0x6d, 0xee, 0x40, 0x43, 0xf4, 0xfe, 0x22, 0x98, 0x75, 0xde, 0xce, - 0x03, 0x9f, 0x16, 0x55, 0xcb, 0x97, 0xea, 0x9a, 0x50, 0xb7, 0x74, 0x7b, 0xe6, 0x26, 0x45, 0x67, - 0x58, 0x33, 0x3b, 0x83, 0xfd, 0x12, 0x36, 0x9e, 0x25, 0xc9, 0xf9, 0x34, 0x95, 0xe1, 0xe9, 0x4d, - 0x94, 0xf7, 0x6e, 0x8d, 0xab, 0x3c, 0x96, 0x7c, 0xef, 0xd7, 0x1d, 0x65, 0xfb, 0x7f, 0x16, 0x6c, - 0x96, 0xdd, 0xaa, 0x66, 0xf6, 0x7b, 0xd8, 0xc8, 0xfd, 0xba, 0xa1, 0xc2, 0x42, 0x2e, 0xd0, 0xda, - 0xfd, 0xd0, 0x48, 0xf3, 0xb2, 0xd9, 0x9a, 0x20, 0xf9, 0x1a, 0x44, 0x67, 0x70, 0x31, 0x27, 0xa1, - 0xa3, 0x4b, 0xe8, 0xcf, 0x9b, 0xf1, 0x22, 0x9c, 0xaf, 0xaa, 0x10, 0x6f, 0xe8, 0x99, 0xe8, 0x67, - 0xd0, 0x2c, 0x02, 0xa9, 0x88, 0x40, 0x36, 0x4a, 0x81, 0xa8, 0xb5, 0x0a, 0x2b, 0x7e, 0xbc, 0x49, - 0x96, 0x25, 0x99, 0x2a, 0x38, 0x72, 0x60, 0xff, 0x12, 0x1a, 0xdf, 0x3b, 0xbb, 0xf6, 0xbf, 0x2a, - 0xd0, 0x79, 0x44, 0x69, 0x70, 0x1a, 0xeb, 0x14, 0x6c, 0xc2, 0x9a, 0x6c, 0x2d, 0xb2, 0xd7, 0xcb, - 0x01, 0x1a, 0x43, 0x4b, 0xd5, 0x2d, 0x03, 0x7a, 0x53, 0x74, 0x6d, 0x49, 0x54, 0xb5, 0xac, 0x26, - 0x43, 0xe3, 0xdd, 0x62, 0x8e, 0xe8, 0xae, 0xad, 0x24, 0xba, 0x75, 0x83, 0xe8, 0xbe, 0x0d, 0x4d, - 0x31, 0x29, 0x4e, 0x7c, 0xa2, 0x18, 0x70, 0x83, 0x0b, 0x8e, 0x12, 0x9f, 0xa0, 0x5d, 0xd8, 0x8a, - 0x48, 0x94, 0x64, 0x33, 0x37, 0xc2, 0xa9, 0xcb, 0x79, 0xb6, 0xe0, 0x2e, 0xd1, 0x44, 0xd5, 0x5e, - 0x24, 0xb5, 0x87, 0x38, 0x3d, 0xc4, 0x97, 0x9c, 0xbe, 0x1c, 0x4e, 0xd0, 0x2e, 0xdc, 0xfe, 0x2a, - 0x0b, 0x18, 0x9e, 0x84, 0xa4, 0xcc, 0xdf, 0x65, 0x2d, 0xde, 0xd0, 0x4a, 0x83, 0xc4, 0xdb, 0x7f, - 0xb3, 0xa0, 0xab, 0x51, 0x53, 0x27, 0xac, 0x0f, 0xd5, 0x93, 0x3c, 0xcb, 0xfc, 0x53, 0xe7, 0xa2, - 0xb2, 0x2a, 0x17, 0x0b, 0x8f, 0x88, 0x1c, 0xf9, 0x9a, 0x89, 0x7c, 0x9e, 0xf4, 0x35, 0x23, 0xe9, - 0x1c, 0x1a, 0x3c, 0x65, 0x67, 0x1a, 0x1a, 0xfe, 0x6d, 0x9f, 0xc2, 0xe0, 0x98, 0x61, 0x16, 0x50, - 0x16, 0x78, 0x54, 0xa7, 0x73, 0x2e, 0x71, 0xd6, 0x75, 0x89, 0xab, 0xac, 0x4a, 0x5c, 0x35, 0x4f, - 0x9c, 0xfd, 0x1f, 0x0b, 0x90, 0xb9, 0x92, 0x82, 0xe0, 0x07, 0x58, 0x8a, 0x43, 0xc6, 0x12, 0xc6, - 0xd9, 0x20, 0x67, 0x5c, 0x8a, 0x37, 0x09, 0x09, 0x4f, 0x1f, 0x3f, 0x0d, 0x53, 0x4a, 0x7c, 0xa9, - 0x95, 0xa4, 0xa9, 0xc1, 0x05, 0x42, 0x59, 0xe6, 0x5c, 0xf5, 0x39, 0xce, 0x65, 0x3f, 0x82, 0x96, - 0xea, 0x3f, 0x2f, 0x79, 0xef, 0xba, 0x3e, 0x7a, 0x15, 0x5d, 0xa5, 0x00, 0x62, 0x0c, 0xb0, 0x57, - 0x44, 0xbf, 0xac, 0x02, 0xff, 0x11, 0x6e, 0x17, 0x16, 0xcf, 0x02, 0xca, 0x74, 0x5e, 0x3e, 0x86, - 0xad, 0x20, 0xf6, 0xc2, 0xa9, 0x4f, 0xdc, 0x98, 0x77, 0xf0, 0x30, 0x7f, 0xbc, 0x58, 0x82, 0xad, - 0x6d, 0x2a, 0xed, 0x91, 0x50, 0xea, 0x47, 0xcc, 0x07, 0x80, 0xf4, 0x2c, 0xe2, 0xe5, 0x33, 0x2a, - 0x62, 0x46, 0x5f, 0x69, 0xf6, 0x3d, 0x65, 0x6d, 0xbf, 0x80, 0xad, 0xf9, 0xc5, 0x55, 0xaa, 0x7e, - 0x0e, 0xad, 0x02, 0x76, 0x5d, 0x07, 0x6f, 0x1b, 0xe5, 0xa7, 0x98, 0xe7, 0x98, 0x96, 0xf6, 0x4f, - 0xe0, 0xad, 0x42, 0xf5, 0x44, 0x14, 0xfa, 0xab, 0x1a, 0xd0, 0x08, 0x86, 0x8b, 0xe6, 0x32, 0x06, - 0xfb, 0xaf, 0x55, 0x68, 0x3f, 0x51, 0x37, 0x97, 0xd3, 0x18, 0x83, 0xb8, 0x48, 0xf6, 0x70, 0x1f, - 0xda, 0xa5, 0x0b, 0x29, 0xf9, 0x76, 0xeb, 0xc2, 0x78, 0x4d, 0x2f, 0x7b, 0x77, 0x57, 0x85, 0xd9, - 0xfc, 0xbb, 0xfb, 0x21, 0x0c, 0x4e, 0x32, 0x42, 0x16, 0x9f, 0xe8, 0x35, 0xa7, 0xc7, 0x15, 0xa6, - 0xed, 0x0e, 0x6c, 0x60, 0x8f, 0x05, 0x17, 0x73, 0xd6, 0xf2, 0x7c, 0x0d, 0xa4, 0xca, 0xb4, 0xff, - 0x3c, 0x0f, 0x34, 0x88, 0x4f, 0x12, 0x3a, 0xac, 0x7f, 0xf7, 0x27, 0xb6, 0xda, 0x0d, 0xd7, 0x50, - 0xf4, 0x1c, 0xba, 0xfa, 0xa9, 0xa6, 0x3c, 0xad, 0xdf, 0xf8, 0x19, 0xd8, 0x26, 0x85, 0x8a, 0x1a, - 0xbc, 0xb9, 0xb4, 0x93, 0x86, 0xdc, 0x89, 0x54, 0x99, 0x85, 0xed, 0xdf, 0x15, 0x68, 0x38, 0xd8, - 0x3b, 0x7f, 0xb3, 0xf3, 0xf1, 0x19, 0xf4, 0xf2, 0x1e, 0x51, 0x4a, 0xc9, 0x5b, 0x06, 0x90, 0xe6, - 0xd1, 0x73, 0x3a, 0xbe, 0x31, 0x5a, 0x09, 0xdb, 0xfa, 0x2a, 0xd8, 0xfe, 0x59, 0x81, 0xee, 0x93, - 0xbc, 0x6f, 0xbd, 0xd9, 0xe0, 0xed, 0x02, 0xf0, 0x46, 0x5b, 0xc2, 0xcd, 0x24, 0x26, 0xfa, 0x78, - 0x38, 0xcd, 0x4c, 0x7d, 0xdd, 0x1c, 0xaf, 0x6f, 0x2a, 0xd0, 0x7e, 0x99, 0xa4, 0x49, 0x98, 0x9c, - 0xce, 0xde, 0x6c, 0xb4, 0xf6, 0x61, 0x60, 0x70, 0x98, 0x12, 0x68, 0x77, 0xe6, 0x0e, 0x5b, 0x71, - 0x38, 0x9c, 0x9e, 0x5f, 0x1a, 0xdf, 0x1c, 0xc0, 0x0d, 0x18, 0x28, 0x5e, 0x5f, 0xb4, 0x14, 0xfb, - 0xcf, 0x16, 0x20, 0x53, 0xaa, 0x6a, 0xfd, 0xaf, 0xa0, 0xc3, 0x14, 0xd6, 0x22, 0x3e, 0xf5, 0xb8, - 0x31, 0xef, 0x82, 0x99, 0x0b, 0xa7, 0xcd, 0xcc, 0xcc, 0xfc, 0x14, 0x36, 0x17, 0x7e, 0x06, 0xe2, - 0x84, 0x4a, 0x66, 0x64, 0x30, 0xf7, 0x4b, 0xd0, 0xe1, 0xc4, 0xfe, 0x18, 0x6e, 0x4b, 0x12, 0xad, - 0xfb, 0x90, 0xee, 0x0f, 0x0b, 0x6c, 0xb8, 0x53, 0xb0, 0x61, 0xfb, 0x5b, 0x0b, 0xb6, 0xe6, 0xa7, - 0xa9, 0xf8, 0xaf, 0x9a, 0x87, 0x30, 0x20, 0x55, 0x2f, 0x4d, 0x5e, 0x2f, 0xe9, 0xf4, 0x47, 0x0b, - 0xbc, 0x7e, 0xde, 0xf7, 0x8e, 0xae, 0xa3, 0x05, 0xb5, 0xef, 0xd3, 0xb2, 0x80, 0x8e, 0x30, 0x0c, - 0x16, 0xcc, 0xf8, 0xab, 0x48, 0xaf, 0xab, 0x62, 0x5a, 0x57, 0x13, 0xbf, 0x07, 0xb1, 0xb7, 0xef, - 0xc1, 0xdd, 0x2f, 0x08, 0x3b, 0x14, 0x36, 0x7b, 0x49, 0x7c, 0x12, 0x9c, 0x4e, 0x33, 0x69, 0x54, - 0xa4, 0xf6, 0xdd, 0x55, 0x16, 0x0a, 0xa6, 0x25, 0xbf, 0xb5, 0x59, 0x37, 0xfe, 0xad, 0xad, 0x72, - 0xd5, 0x6f, 0x6d, 0xbb, 0xff, 0xad, 0xc3, 0xfa, 0x31, 0xc1, 0xaf, 0x09, 0xe1, 0x4f, 0xfb, 0xce, - 0x31, 0x89, 0xfd, 0xe2, 0x57, 0xf4, 0x4d, 0x63, 0x8f, 0xb9, 0x74, 0xf4, 0xce, 0x32, 0x69, 0x4e, - 0x01, 0x6e, 0x6d, 0x5b, 0x1f, 0x5a, 0xe8, 0x05, 0x74, 0x4a, 0x2f, 0x5a, 0x74, 0xcf, 0x98, 0xb4, - 0xec, 0xad, 0x3b, 0xba, 0xb3, 0xd0, 0x10, 0x35, 0xaa, 0xb9, 0xcb, 0xb6, 0xf9, 0x92, 0x43, 0xef, - 0xae, 0x7c, 0xe2, 0x49, 0x87, 0xf7, 0xae, 0x79, 0x02, 0xda, 0xb7, 0xd0, 0x67, 0x50, 0x97, 0x94, - 0x1f, 0x0d, 0x0d, 0xe3, 0xd2, 0xdb, 0xa9, 0x14, 0x57, 0xf9, 0x7d, 0x60, 0xdf, 0x42, 0x4f, 0x01, - 0x0a, 0xd2, 0x8c, 0xde, 0x29, 0xfd, 0x0c, 0x32, 0xc7, 0xda, 0x47, 0x77, 0x57, 0x68, 0x73, 0x67, - 0x5f, 0x41, 0xb7, 0x4c, 0xed, 0xd0, 0x78, 0x29, 0x7b, 0x33, 0xea, 0xc3, 0xe8, 0xfe, 0x15, 0x16, - 0xb9, 0xe3, 0xdf, 0x41, 0x7f, 0x9e, 0xb1, 0x21, 0x7b, 0xe9, 0xc4, 0x12, 0xfb, 0x1b, 0xbd, 0x77, - 0xa5, 0x8d, 0x09, 0x42, 0x51, 0xa2, 0x4a, 0x20, 0x2c, 0xd4, 0xb3, 0x12, 0x08, 0x8b, 0x75, 0x4d, - 0x82, 0x50, 0xbe, 0xd7, 0x25, 0x10, 0x96, 0x56, 0xa1, 0x12, 0x08, 0xcb, 0x8b, 0x82, 0x7d, 0x0b, - 0x25, 0xb0, 0xb5, 0xfc, 0xb6, 0x21, 0xf3, 0x27, 0xa1, 0x2b, 0xaf, 0xec, 0xe8, 0xc1, 0x77, 0xb0, - 0xd4, 0x0b, 0x4e, 0xea, 0xe2, 0x2f, 0xaa, 0x8f, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x64, - 0x5c, 0xbc, 0xb2, 0x1a, 0x00, 0x00, + // 2183 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0xf7, 0x92, 0x94, 0x48, 0x3e, 0x7e, 0x8f, 0x64, 0x85, 0x66, 0x62, 0x8b, 0xde, 0xa4, 0x88, + 0xec, 0xa6, 0x6a, 0xaa, 0x04, 0x68, 0xd0, 0x34, 0x08, 0x2c, 0x59, 0x71, 0x05, 0x5b, 0x8a, 0xbd, + 0x72, 0x1d, 0xa0, 0x40, 0xb1, 0x19, 0xee, 0x8e, 0xa4, 0x85, 0xf6, 0xab, 0xbb, 0x43, 0x59, 0x4c, + 0x2f, 0x05, 0x7a, 0xec, 0xa9, 0xe8, 0xa1, 0xff, 0x42, 0x2f, 0x3d, 0xb5, 0x97, 0x5e, 0x72, 0xe9, + 0x7f, 0xd4, 0x6b, 0x2e, 0xc5, 0x7c, 0xed, 0xce, 0x2e, 0x49, 0x29, 0x0a, 0x90, 0x83, 0x6f, 0xbb, + 0xef, 0xbd, 0x79, 0xf3, 0xf6, 0xf7, 0xe6, 0xbd, 0xf9, 0x3d, 0x12, 0xda, 0x01, 0x4e, 0x29, 0x49, + 0xb6, 0xe3, 0x24, 0xa2, 0x11, 0x6a, 0x8a, 0x37, 0x3b, 0x9e, 0x98, 0x7f, 0x59, 0x85, 0xe6, 0x6f, + 0x08, 0x4e, 0xe8, 0x84, 0x60, 0x8a, 0xba, 0x50, 0xf1, 0xe2, 0xa1, 0x31, 0x36, 0xb6, 0x9a, 0x56, + 0xc5, 0x8b, 0x11, 0x82, 0x5a, 0x1c, 0x25, 0x74, 0x58, 0x19, 0x1b, 0x5b, 0x1d, 0x8b, 0x3f, 0xa3, + 0xbb, 0x00, 0xf1, 0x74, 0xe2, 0x7b, 0x8e, 0x3d, 0x4d, 0xfc, 0x61, 0x95, 0xdb, 0x36, 0x85, 0xe4, + 0xb7, 0x89, 0x8f, 0xb6, 0xa0, 0x1f, 0xe0, 0x4b, 0xfb, 0x22, 0xf2, 0xa7, 0x01, 0xb1, 0x9d, 0x68, + 0x1a, 0xd2, 0x61, 0x8d, 0x2f, 0xef, 0x06, 0xf8, 0xf2, 0x15, 0x17, 0xef, 0x31, 0x29, 0x1a, 0xb3, + 0xa8, 0x2e, 0xed, 0x13, 0xcf, 0x27, 0xf6, 0x39, 0x99, 0x0d, 0x57, 0xc6, 0xc6, 0x56, 0xcd, 0x82, + 0x00, 0x5f, 0x7e, 0xe1, 0xf9, 0xe4, 0x29, 0x99, 0xa1, 0x4d, 0x68, 0xb9, 0x98, 0x62, 0xdb, 0x21, + 0x21, 0x25, 0xc9, 0x70, 0x95, 0xef, 0x05, 0x4c, 0xb4, 0xc7, 0x25, 0x2c, 0xbe, 0x04, 0x3b, 0xe7, + 0xc3, 0x3a, 0xd7, 0xf0, 0x67, 0x16, 0x1f, 0x76, 0x03, 0x2f, 0xb4, 0x79, 0xe4, 0x0d, 0xbe, 0x75, + 0x93, 0x4b, 0x9e, 0xb3, 0xf0, 0x3f, 0x83, 0xba, 0x88, 0x2d, 0x1d, 0x36, 0xc7, 0xd5, 0xad, 0xd6, + 0xce, 0xbb, 0xdb, 0x19, 0x1a, 0xdb, 0x22, 0xbc, 0x83, 0xf0, 0x24, 0x4a, 0x02, 0x4c, 0xbd, 0x28, + 0x3c, 0x24, 0x69, 0x8a, 0x4f, 0x89, 0xa5, 0xd6, 0xa0, 0x03, 0x68, 0x85, 0xe4, 0xb5, 0xad, 0x5c, + 0x00, 0x77, 0xb1, 0x35, 0xe7, 0xe2, 0xf8, 0x2c, 0x4a, 0xe8, 0x02, 0x3f, 0x10, 0x92, 0xd7, 0xaf, + 0xa4, 0xab, 0x17, 0xd0, 0x73, 0x89, 0x4f, 0x28, 0x71, 0x33, 0x77, 0xad, 0x1b, 0xba, 0xeb, 0x4a, + 0x07, 0xca, 0xe5, 0x7b, 0xd0, 0x3d, 0xc3, 0xa9, 0x1d, 0x46, 0x99, 0xc7, 0xf6, 0xd8, 0xd8, 0x6a, + 0x58, 0xed, 0x33, 0x9c, 0x1e, 0x45, 0xca, 0xea, 0x09, 0x34, 0x89, 0x63, 0xa7, 0x67, 0x38, 0x71, + 0xd3, 0x61, 0x9f, 0x6f, 0xf9, 0x70, 0x6e, 0xcb, 0x7d, 0xe7, 0x98, 0x19, 0x2c, 0xd8, 0xb4, 0x41, + 0x84, 0x2a, 0x45, 0x47, 0xd0, 0x61, 0x60, 0xe4, 0xce, 0x06, 0x37, 0x76, 0xc6, 0xd0, 0xdc, 0x57, + 0xfe, 0x5e, 0xc1, 0x40, 0x21, 0x92, 0xfb, 0x44, 0x37, 0xf6, 0xa9, 0x60, 0xcd, 0xfc, 0xbe, 0x0f, + 0x7d, 0x09, 0x4b, 0xee, 0x76, 0x8d, 0x03, 0xd3, 0xe1, 0xc0, 0x28, 0x43, 0xf3, 0x4f, 0x15, 0x18, + 0x64, 0xd5, 0x60, 0x91, 0x34, 0x8e, 0xc2, 0x94, 0xa0, 0x87, 0x30, 0x90, 0xc7, 0x39, 0xf5, 0xbe, + 0x21, 0xb6, 0xef, 0x05, 0x1e, 0xe5, 0x45, 0x52, 0xb3, 0x7a, 0x42, 0x71, 0xec, 0x7d, 0x43, 0x9e, + 0x31, 0x31, 0xda, 0x80, 0x55, 0x9f, 0x60, 0x97, 0x24, 0xbc, 0x66, 0x9a, 0x96, 0x7c, 0x43, 0xef, + 0x43, 0x2f, 0x20, 0x34, 0xf1, 0x9c, 0xd4, 0xc6, 0xae, 0x9b, 0x90, 0x34, 0x95, 0xa5, 0xd3, 0x95, + 0xe2, 0x47, 0x42, 0x8a, 0x3e, 0x81, 0xa1, 0x32, 0xf4, 0xd8, 0x19, 0xbf, 0xc0, 0xbe, 0x9d, 0x12, + 0x27, 0x0a, 0xdd, 0x54, 0xd6, 0xd1, 0x86, 0xd4, 0x1f, 0x48, 0xf5, 0xb1, 0xd0, 0xa2, 0xc7, 0xd0, + 0x4f, 0x69, 0x94, 0xe0, 0x53, 0x62, 0x4f, 0xb0, 0x73, 0x4e, 0xd8, 0x8a, 0x15, 0x0e, 0xde, 0x1d, + 0x0d, 0xbc, 0x63, 0x61, 0xb2, 0x2b, 0x2c, 0xac, 0x5e, 0x5a, 0x78, 0x4f, 0xcd, 0xef, 0xaa, 0x30, + 0x5c, 0x56, 0x06, 0xbc, 0x3f, 0xb8, 0xfc, 0xd3, 0x3b, 0x56, 0xc5, 0x73, 0x59, 0xfd, 0x31, 0x48, + 0xf8, 0xb7, 0xd6, 0x2c, 0xfe, 0x8c, 0xee, 0x01, 0x38, 0x91, 0xef, 0x13, 0x87, 0x2d, 0x94, 0x1f, + 0xa9, 0x49, 0x58, 0x7d, 0xf2, 0x92, 0xcf, 0x5b, 0x43, 0xcd, 0x6a, 0x32, 0x89, 0xe8, 0x0a, 0xf7, + 0xa1, 0x2d, 0xd2, 0x27, 0x0d, 0x44, 0x57, 0x68, 0x09, 0x99, 0x30, 0xf9, 0x00, 0x90, 0x3a, 0x26, + 0x93, 0x59, 0x66, 0xb8, 0xca, 0x0d, 0xfb, 0x52, 0xb3, 0x3b, 0x53, 0xd6, 0x6f, 0x43, 0x33, 0x21, + 0xd8, 0xb5, 0xa3, 0xd0, 0x9f, 0xf1, 0x46, 0xd1, 0xb0, 0x1a, 0x4c, 0xf0, 0x65, 0xe8, 0xcf, 0xd0, + 0x4f, 0x61, 0x90, 0x90, 0xd8, 0xf7, 0x1c, 0x6c, 0xc7, 0x3e, 0x76, 0x48, 0x40, 0x42, 0xd5, 0x33, + 0xfa, 0x52, 0xf1, 0x5c, 0xc9, 0xd1, 0x10, 0xea, 0x17, 0x24, 0x49, 0xd9, 0x67, 0x35, 0xb9, 0x89, + 0x7a, 0x45, 0x7d, 0xa8, 0x52, 0xea, 0x0f, 0x81, 0x4b, 0xd9, 0x23, 0x7a, 0x00, 0x7d, 0x27, 0x0a, + 0x62, 0xec, 0x50, 0x3b, 0x21, 0x17, 0x1e, 0x5f, 0xd4, 0xe2, 0xea, 0x9e, 0x94, 0x5b, 0x52, 0xcc, + 0x3e, 0x27, 0x88, 0x5c, 0xef, 0xc4, 0x23, 0xae, 0x8d, 0xa9, 0x4c, 0x36, 0x2f, 0xdc, 0xaa, 0xd5, + 0x57, 0x9a, 0x47, 0x54, 0xa4, 0x19, 0x6d, 0xc3, 0x5a, 0x42, 0x82, 0x88, 0x12, 0x5b, 0x25, 0x3b, + 0xc4, 0x01, 0x19, 0x76, 0x38, 0xce, 0x03, 0xa1, 0x92, 0x39, 0x3e, 0xc2, 0x01, 0x61, 0xde, 0x4b, + 0xf6, 0xac, 0xd7, 0x76, 0xb9, 0x79, 0xbf, 0x60, 0xfe, 0x94, 0xcc, 0xcc, 0x7f, 0x18, 0x70, 0xf7, + 0xca, 0x96, 0x33, 0x77, 0x04, 0xae, 0x4b, 0xf7, 0x8f, 0x85, 0xb0, 0x39, 0x85, 0xcd, 0x6b, 0x1a, + 0xc1, 0x35, 0xb1, 0x56, 0xe6, 0x62, 0x35, 0xa1, 0x43, 0x1c, 0xdb, 0x0b, 0x5d, 0x72, 0x69, 0x4f, + 0x3c, 0x2a, 0x4a, 0xb4, 0x63, 0xb5, 0x88, 0x73, 0xc0, 0x64, 0xbb, 0x1e, 0x4d, 0xcd, 0x6f, 0x0d, + 0xe8, 0x16, 0x6b, 0x88, 0x55, 0x01, 0x9d, 0xc5, 0x44, 0xde, 0x9b, 0xfc, 0x59, 0x6e, 0x5d, 0x91, + 0x37, 0xa9, 0x8b, 0x0e, 0x00, 0xe2, 0x24, 0x8a, 0x49, 0x42, 0x3d, 0xc2, 0xfc, 0xb2, 0xb2, 0x7c, + 0xb0, 0xb4, 0x2c, 0xb7, 0x9f, 0x67, 0xb6, 0xfb, 0x21, 0x4d, 0x66, 0x96, 0xb6, 0x78, 0xf4, 0x19, + 0xf4, 0x4a, 0x6a, 0x86, 0x0e, 0xcb, 0xaa, 0x08, 0x80, 0x3d, 0xa2, 0x75, 0x58, 0xb9, 0xc0, 0xfe, + 0x94, 0xc8, 0x10, 0xc4, 0xcb, 0xaf, 0x2a, 0x9f, 0x18, 0x66, 0x1d, 0x56, 0xf6, 0x83, 0x98, 0xce, + 0xd8, 0x97, 0xf4, 0x8e, 0xa7, 0x31, 0x49, 0x76, 0xfd, 0xc8, 0x39, 0xdf, 0xbf, 0xa4, 0x09, 0x46, + 0x5f, 0x42, 0x97, 0x24, 0x38, 0x9d, 0x26, 0xac, 0xaa, 0x5c, 0x2f, 0x3c, 0xe5, 0x3e, 0x8b, 0x57, + 0x52, 0x69, 0xcd, 0xf6, 0xbe, 0x58, 0xb0, 0xc7, 0xed, 0xad, 0x0e, 0xd1, 0x5f, 0x47, 0xbf, 0x83, + 0x4e, 0x41, 0xcf, 0xc0, 0x62, 0x17, 0xb8, 0xcc, 0x0a, 0x7f, 0x66, 0x4d, 0x33, 0xc6, 0x89, 0x47, + 0x67, 0x92, 0x68, 0xc8, 0x37, 0xd6, 0x2a, 0x64, 0xe3, 0xf5, 0x5c, 0x01, 0x5a, 0xc7, 0x6a, 0x0a, + 0xc9, 0x81, 0x9b, 0x9a, 0x4f, 0x60, 0xfd, 0x29, 0x21, 0xf1, 0x5e, 0x14, 0x86, 0xc4, 0xa1, 0xc4, + 0xb5, 0xc8, 0x1f, 0xa6, 0x24, 0xa5, 0x6c, 0x0b, 0x5e, 0x13, 0x32, 0x1f, 0xec, 0x99, 0x75, 0x81, + 0xd3, 0x24, 0x76, 0x6c, 0x8d, 0xce, 0x34, 0x98, 0x80, 0x71, 0x02, 0xf3, 0xef, 0x06, 0x74, 0xc5, + 0x59, 0x7a, 0x16, 0x39, 0xfc, 0x04, 0x31, 0x44, 0x19, 0xbd, 0x91, 0x88, 0x4e, 0x13, 0xbf, 0xc4, + 0x7b, 0x2a, 0x65, 0xde, 0x73, 0x07, 0x1a, 0x9c, 0x18, 0xe4, 0x91, 0xd6, 0xd9, 0x5d, 0xef, 0xb9, + 0x69, 0xde, 0xd2, 0x5c, 0xa1, 0xae, 0x71, 0x75, 0x4b, 0xdd, 0xdd, 0xcc, 0x24, 0xbf, 0x36, 0x56, + 0xf4, 0x6b, 0xc3, 0x7c, 0x09, 0x6b, 0xcf, 0xa2, 0xe8, 0x7c, 0x1a, 0x8b, 0xf0, 0xd4, 0x17, 0x16, + 0x81, 0x31, 0xc6, 0x55, 0x16, 0x4b, 0x06, 0xcc, 0x75, 0xe7, 0xdc, 0xfc, 0x9f, 0x01, 0xeb, 0x45, + 0xb7, 0xf2, 0xa6, 0xfb, 0x1a, 0xd6, 0x32, 0xbf, 0xb6, 0x2f, 0xb1, 0x10, 0x1b, 0xb4, 0x76, 0x3e, + 0xd4, 0xce, 0xc0, 0xa2, 0xd5, 0x8a, 0x3d, 0xb9, 0x0a, 0x44, 0x6b, 0x70, 0x51, 0x92, 0xa4, 0xa3, + 0x4b, 0xe8, 0x97, 0xcd, 0x58, 0x6e, 0xb2, 0x5d, 0x25, 0xe2, 0x0d, 0xb5, 0x12, 0xfd, 0x02, 0x9a, + 0x79, 0x20, 0x15, 0x1e, 0xc8, 0x5a, 0x21, 0x10, 0xb9, 0x57, 0x6e, 0xc5, 0xce, 0x3e, 0x49, 0x92, + 0x28, 0x91, 0xdd, 0x48, 0xbc, 0x98, 0x9f, 0x42, 0xe3, 0x07, 0x67, 0xd7, 0xfc, 0x57, 0x05, 0x3a, + 0x8f, 0xd2, 0xd4, 0x3b, 0x0d, 0x55, 0x0a, 0xd6, 0x61, 0x45, 0xdc, 0x3b, 0x82, 0x08, 0x88, 0x17, + 0x34, 0x86, 0x96, 0x6c, 0x6a, 0x1a, 0xf4, 0xba, 0xe8, 0xda, 0x7e, 0x29, 0x1b, 0x5d, 0x4d, 0x84, + 0xc6, 0xae, 0x92, 0x12, 0x0b, 0x5e, 0x59, 0xca, 0x82, 0x57, 0x35, 0x16, 0xfc, 0x36, 0x34, 0xf9, + 0xa2, 0x30, 0x72, 0x89, 0xa4, 0xc7, 0x0d, 0x26, 0x38, 0x8a, 0x5c, 0x82, 0x76, 0x60, 0x23, 0x20, + 0x41, 0x94, 0xcc, 0xec, 0x00, 0xc7, 0x36, 0x23, 0xe1, 0x9c, 0xd8, 0x04, 0x13, 0xd9, 0x98, 0x91, + 0xd0, 0x1e, 0xe2, 0xf8, 0x10, 0x5f, 0x32, 0x6e, 0x73, 0x38, 0x41, 0x3b, 0x70, 0xfb, 0xab, 0xc4, + 0xa3, 0x78, 0xe2, 0x93, 0x22, 0xb9, 0x17, 0x8d, 0x7a, 0x4d, 0x29, 0x35, 0x86, 0x6f, 0xfe, 0xcd, + 0x80, 0xae, 0x42, 0x4d, 0x9e, 0xb0, 0x3e, 0x54, 0x4f, 0xb2, 0x2c, 0xb3, 0x47, 0x95, 0x8b, 0xca, + 0xb2, 0x5c, 0xcc, 0x4d, 0x18, 0x19, 0xf2, 0x35, 0x1d, 0xf9, 0x2c, 0xe9, 0x2b, 0x5a, 0xd2, 0x19, + 0x34, 0x78, 0x4a, 0xcf, 0x14, 0x34, 0xec, 0xd9, 0x3c, 0x85, 0xc1, 0x31, 0xc5, 0xd4, 0x4b, 0xa9, + 0xe7, 0xa4, 0x2a, 0x9d, 0xa5, 0xc4, 0x19, 0xd7, 0x25, 0xae, 0xb2, 0x2c, 0x71, 0xd5, 0x2c, 0x71, + 0xe6, 0x7f, 0x0d, 0x40, 0xfa, 0x4e, 0x12, 0x82, 0x1f, 0x61, 0x2b, 0x06, 0x19, 0x8d, 0x28, 0xa3, + 0x8a, 0x8c, 0x8e, 0x49, 0x52, 0xc5, 0x25, 0x2c, 0x7d, 0xec, 0x34, 0x4c, 0x53, 0xe2, 0x0a, 0xad, + 0x60, 0x54, 0x0d, 0x26, 0xe0, 0xca, 0x22, 0x21, 0x5b, 0x2d, 0x11, 0x32, 0xf3, 0x11, 0xb4, 0xe4, + 0xe5, 0xf4, 0x92, 0x5d, 0x6c, 0xd7, 0x47, 0x2f, 0xa3, 0xab, 0xe4, 0x40, 0x8c, 0x01, 0xf6, 0xf2, + 0xe8, 0x17, 0xb4, 0x67, 0xf3, 0x8f, 0x70, 0x3b, 0xb7, 0x78, 0xe6, 0xa5, 0x54, 0xe5, 0xe5, 0x63, + 0xd8, 0xf0, 0x42, 0xc7, 0x9f, 0xba, 0xc4, 0x0e, 0xd9, 0xf5, 0xee, 0x67, 0x93, 0x8d, 0xc1, 0xa9, + 0xdc, 0xba, 0xd4, 0x1e, 0x71, 0xa5, 0x9a, 0x70, 0x3e, 0x00, 0xa4, 0x56, 0x11, 0x27, 0x5b, 0x51, + 0xe1, 0x2b, 0xfa, 0x52, 0xb3, 0xef, 0x48, 0x6b, 0xf3, 0x05, 0x6c, 0x94, 0x37, 0x97, 0xa9, 0xfa, + 0x25, 0xb4, 0x72, 0xd8, 0x55, 0x1f, 0xbc, 0xad, 0xb5, 0x9f, 0x7c, 0x9d, 0xa5, 0x5b, 0x9a, 0x3f, + 0x83, 0xb7, 0x72, 0xd5, 0x63, 0xde, 0xe8, 0xaf, 0xb8, 0x9d, 0xcc, 0x11, 0x0c, 0xe7, 0xcd, 0x45, + 0x0c, 0xe6, 0x5f, 0xab, 0xd0, 0x7e, 0x2c, 0x2b, 0x97, 0x71, 0x1c, 0x8d, 0xd5, 0x08, 0x6a, 0x71, + 0x1f, 0xda, 0x85, 0x82, 0x14, 0x64, 0xbc, 0x75, 0xa1, 0x8d, 0xda, 0x8b, 0x86, 0xf2, 0x2a, 0x37, + 0x2b, 0x0f, 0xe5, 0x0f, 0x61, 0x70, 0x92, 0x10, 0x32, 0x3f, 0xbf, 0xd7, 0xac, 0x1e, 0x53, 0xe8, + 0xb6, 0xdb, 0xb0, 0x86, 0x1d, 0xea, 0x5d, 0x94, 0xac, 0xc5, 0xf9, 0x1a, 0x08, 0x95, 0x6e, 0xff, + 0x45, 0x16, 0xa8, 0x17, 0x9e, 0x44, 0xe9, 0x70, 0xf5, 0xfb, 0xcf, 0xdf, 0xf2, 0x6b, 0x98, 0x26, + 0x45, 0xcf, 0xa1, 0xab, 0xe6, 0x38, 0xe9, 0xa9, 0x7e, 0xe3, 0x19, 0xb1, 0x4d, 0x72, 0x55, 0xaa, + 0x91, 0xea, 0xc2, 0x97, 0x34, 0xc4, 0x97, 0x08, 0x95, 0xde, 0xd8, 0xfe, 0x5d, 0x81, 0x86, 0x85, + 0x9d, 0xf3, 0x37, 0x3b, 0x1f, 0x9f, 0x43, 0x2f, 0xbb, 0x23, 0x0a, 0x29, 0x79, 0x4b, 0x03, 0x52, + 0x3f, 0x7a, 0x56, 0xc7, 0xd5, 0xde, 0x96, 0xc2, 0x56, 0x5f, 0x06, 0xdb, 0x3f, 0x2b, 0xd0, 0x7d, + 0x9c, 0xdd, 0x5b, 0x6f, 0x36, 0x78, 0x3b, 0x00, 0xec, 0xa2, 0x2d, 0xe0, 0xa6, 0x13, 0x13, 0x75, + 0x3c, 0xac, 0x66, 0x22, 0x9f, 0x6e, 0x8e, 0xd7, 0xb7, 0x15, 0x68, 0xbf, 0x8c, 0xe2, 0xc8, 0x8f, + 0x4e, 0x67, 0x6f, 0x36, 0x5a, 0xfb, 0x30, 0xd0, 0x38, 0x4c, 0x01, 0xb4, 0x3b, 0xa5, 0xc3, 0x96, + 0x1f, 0x0e, 0xab, 0xe7, 0x16, 0xde, 0x6f, 0x0e, 0xe0, 0x1a, 0x0c, 0x24, 0xaf, 0xcf, 0xaf, 0x14, + 0xf3, 0xcf, 0x06, 0x20, 0x5d, 0x2a, 0x7b, 0xfd, 0xaf, 0xa1, 0x43, 0x25, 0xd6, 0x3c, 0x3e, 0x39, + 0xf9, 0xe8, 0xb5, 0xa0, 0xe7, 0xc2, 0x6a, 0x53, 0x3d, 0x33, 0x3f, 0x87, 0xf5, 0xb9, 0xdf, 0x88, + 0x18, 0xa1, 0x12, 0x19, 0x19, 0x94, 0x7e, 0x26, 0x3a, 0x9c, 0x98, 0x1f, 0xc3, 0x6d, 0x41, 0xa2, + 0xd5, 0x3d, 0xa4, 0xee, 0x87, 0x39, 0x36, 0xdc, 0xc9, 0xd9, 0xb0, 0xf9, 0x9d, 0x01, 0x1b, 0xe5, + 0x65, 0x32, 0xfe, 0xab, 0xd6, 0x21, 0x0c, 0x48, 0xf6, 0x4b, 0x9d, 0xd7, 0x0b, 0x3a, 0xfd, 0xd1, + 0x1c, 0xaf, 0x2f, 0xfb, 0xde, 0x56, 0x7d, 0x34, 0xa7, 0xf6, 0xfd, 0xb4, 0x28, 0x48, 0x47, 0x18, + 0x06, 0x73, 0x66, 0x6c, 0x2a, 0x52, 0xfb, 0xca, 0x98, 0xea, 0x72, 0xe1, 0x0f, 0x20, 0xf6, 0xe6, + 0x26, 0xdc, 0x7d, 0x42, 0xe8, 0x21, 0xb7, 0xd9, 0x8b, 0xc2, 0x13, 0xef, 0x74, 0x9a, 0x08, 0xa3, + 0x3c, 0xb5, 0xf7, 0x96, 0x59, 0x48, 0x98, 0x16, 0xfc, 0x10, 0x67, 0xdc, 0xf8, 0x87, 0xb8, 0xca, + 0x55, 0x3f, 0xc4, 0x99, 0x9f, 0xc2, 0x90, 0x9d, 0x2c, 0x19, 0x85, 0xef, 0x91, 0x90, 0x66, 0x3c, + 0x73, 0x13, 0x5a, 0x0e, 0x97, 0xd8, 0xda, 0x4f, 0x06, 0x20, 0x44, 0x8c, 0x5f, 0x99, 0xbb, 0x70, + 0x67, 0xc1, 0x62, 0x19, 0xfc, 0x4f, 0xa0, 0xcb, 0xa7, 0x58, 0x19, 0x39, 0x51, 0xb3, 0x5f, 0x87, + 0x49, 0x1f, 0x29, 0xe1, 0xce, 0x7f, 0xea, 0x50, 0x3f, 0x26, 0xf8, 0x35, 0x21, 0x2e, 0x3a, 0x80, + 0xce, 0x31, 0x09, 0xdd, 0xfc, 0x37, 0xfe, 0x75, 0x0d, 0xe4, 0x4c, 0x3a, 0x7a, 0x67, 0x91, 0x34, + 0xe3, 0x20, 0xb7, 0xb6, 0x8c, 0x0f, 0x0d, 0xf4, 0x02, 0x3a, 0x85, 0x79, 0x1b, 0x6d, 0x6a, 0x8b, + 0x16, 0x4d, 0xe2, 0xa3, 0x3b, 0x73, 0x37, 0xb2, 0x4a, 0x6b, 0xe6, 0xb2, 0xad, 0x8f, 0x92, 0xe8, + 0xde, 0xd2, 0x19, 0x53, 0x38, 0xdc, 0xbc, 0x66, 0x06, 0x35, 0x6f, 0xa1, 0xcf, 0x61, 0x55, 0xcc, + 0x1c, 0x68, 0xa8, 0x19, 0x17, 0x86, 0xb7, 0x42, 0x5c, 0xc5, 0x01, 0xc5, 0xbc, 0x85, 0x9e, 0x02, + 0xe4, 0xac, 0x1d, 0xbd, 0x53, 0xf8, 0x91, 0xa6, 0x34, 0x36, 0x8c, 0xee, 0x2e, 0xd1, 0x66, 0xce, + 0xbe, 0x82, 0x6e, 0x91, 0x5b, 0xa2, 0xf1, 0x42, 0xfa, 0xa8, 0x35, 0xa8, 0xd1, 0xfd, 0x2b, 0x2c, + 0x32, 0xc7, 0xbf, 0x87, 0x7e, 0x99, 0x32, 0x22, 0x73, 0xe1, 0xc2, 0x02, 0xfd, 0x1c, 0xbd, 0x7b, + 0xa5, 0x8d, 0x0e, 0x42, 0xde, 0x23, 0x0b, 0x20, 0xcc, 0x35, 0xd4, 0x02, 0x08, 0xf3, 0x8d, 0x55, + 0x80, 0x50, 0x6c, 0x2c, 0x05, 0x10, 0x16, 0xb6, 0xc1, 0x02, 0x08, 0x8b, 0xbb, 0x92, 0x79, 0x0b, + 0x45, 0xb0, 0xb1, 0xb8, 0xdc, 0x91, 0xfe, 0x83, 0xd5, 0x95, 0x3d, 0x63, 0xf4, 0xe0, 0x7b, 0x58, + 0x66, 0x1b, 0x7e, 0x0d, 0x83, 0xb9, 0xea, 0x44, 0x3a, 0xa4, 0xcb, 0x0a, 0x7f, 0xf4, 0xde, 0xd5, + 0x46, 0x6a, 0x87, 0xc9, 0x2a, 0xff, 0x8b, 0xee, 0xa3, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x6a, + 0xee, 0xfc, 0x90, 0xb2, 0x1b, 0x00, 0x00, } diff --git a/weed/pb/queue_pb/queue.pb.go b/weed/pb/queue_pb/queue.pb.go index 16147a77a..8ec4d62aa 100644 --- a/weed/pb/queue_pb/queue.pb.go +++ b/weed/pb/queue_pb/queue.pb.go @@ -41,9 +41,10 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type WriteMessageRequest struct { - Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` - EventNs int64 `protobuf:"varint,2,opt,name=event_ns,json=eventNs" json:"event_ns,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + EventNs int64 `protobuf:"varint,2,opt,name=event_ns,json=eventNs" json:"event_ns,omitempty"` + PartitionKey []byte `protobuf:"bytes,3,opt,name=partition_key,json=partitionKey,proto3" json:"partition_key,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` } func (m *WriteMessageRequest) Reset() { *m = WriteMessageRequest{} } @@ -65,6 +66,13 @@ func (m *WriteMessageRequest) GetEventNs() int64 { return 0 } +func (m *WriteMessageRequest) GetPartitionKey() []byte { + if m != nil { + return m.PartitionKey + } + return nil +} + func (m *WriteMessageRequest) GetData() []byte { if m != nil { return m.Data @@ -153,8 +161,9 @@ func (m *ReadMessageResponse) GetData() []byte { } type ConfigureTopicRequest struct { - Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` - TtlSeconds int64 `protobuf:"varint,2,opt,name=ttl_seconds,json=ttlSeconds" json:"ttl_seconds,omitempty"` + Topic string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` + TtlSeconds int64 `protobuf:"varint,2,opt,name=ttl_seconds,json=ttlSeconds" json:"ttl_seconds,omitempty"` + PartitionCount int32 `protobuf:"varint,3,opt,name=partition_count,json=partitionCount" json:"partition_count,omitempty"` } func (m *ConfigureTopicRequest) Reset() { *m = ConfigureTopicRequest{} } @@ -176,6 +185,13 @@ func (m *ConfigureTopicRequest) GetTtlSeconds() int64 { return 0 } +func (m *ConfigureTopicRequest) GetPartitionCount() int32 { + if m != nil { + return m.PartitionCount + } + return 0 +} + type ConfigureTopicResponse struct { Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` } @@ -469,29 +485,32 @@ var _SeaweedQueue_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("queue.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x93, 0xd1, 0x6e, 0xda, 0x30, - 0x14, 0x86, 0x09, 0x0c, 0xc6, 0x4e, 0xd0, 0x34, 0x19, 0x98, 0x18, 0x1a, 0x10, 0xf9, 0x2a, 0xda, - 0xa4, 0x08, 0x6d, 0x6f, 0x00, 0xed, 0x5d, 0x89, 0xda, 0xd0, 0xaa, 0x52, 0x6f, 0x90, 0x49, 0x0e, - 0x28, 0x22, 0x4d, 0x82, 0xed, 0xb4, 0x6f, 0xda, 0xe7, 0xa9, 0xe2, 0x28, 0x22, 0x29, 0x10, 0xd1, - 0xbb, 0xfc, 0xb1, 0xfd, 0x9d, 0xdf, 0xff, 0x39, 0x06, 0x7d, 0x9f, 0x60, 0x82, 0x56, 0xcc, 0x23, - 0x19, 0x91, 0xb6, 0x12, 0xab, 0x78, 0x4d, 0x9f, 0xa0, 0xfb, 0xc8, 0x7d, 0x89, 0x0b, 0x14, 0x82, - 0x6d, 0xd1, 0xc1, 0x7d, 0x82, 0x42, 0x92, 0x1e, 0x34, 0x65, 0x14, 0xfb, 0xee, 0x40, 0x33, 0x34, - 0xf3, 0x9b, 0x93, 0x09, 0xf2, 0x0b, 0xda, 0xf8, 0x82, 0xa1, 0x5c, 0x85, 0x62, 0x50, 0x37, 0x34, - 0xb3, 0xe1, 0x7c, 0x55, 0xda, 0x16, 0x84, 0xc0, 0x17, 0x8f, 0x49, 0x36, 0x68, 0x18, 0x9a, 0xd9, - 0x71, 0xd4, 0x37, 0x9d, 0x43, 0xaf, 0xcc, 0x16, 0x71, 0x14, 0x0a, 0x4c, 0xe1, 0xc8, 0x79, 0xc4, - 0x73, 0xb8, 0x12, 0xa4, 0x0f, 0x2d, 0xe6, 0xee, 0x0e, 0xe8, 0x26, 0x73, 0x77, 0xb6, 0xa0, 0xd7, - 0x40, 0x1c, 0x64, 0xde, 0xa5, 0xfe, 0x84, 0x64, 0xbc, 0xe8, 0x4f, 0x69, 0x5b, 0xa4, 0xf7, 0x2c, - 0x61, 0x2a, 0xad, 0x7c, 0xf2, 0x9e, 0x36, 0xf4, 0xe7, 0x51, 0xb8, 0xf1, 0xb7, 0x09, 0xc7, 0xfb, - 0xd4, 0x48, 0xb5, 0xcb, 0x09, 0xe8, 0x52, 0x06, 0x2b, 0x81, 0x6e, 0x14, 0x7a, 0x79, 0x01, 0x90, - 0x32, 0x58, 0x66, 0x7f, 0xa8, 0x05, 0x3f, 0x3f, 0xf2, 0xaa, 0xec, 0xd2, 0x3f, 0x40, 0xae, 0x30, - 0x40, 0x79, 0x41, 0x71, 0xfa, 0x17, 0xba, 0xa5, 0xbd, 0x55, 0xe0, 0x7f, 0x6f, 0x75, 0xe8, 0x2c, - 0x91, 0xbd, 0x22, 0x7a, 0x77, 0xe9, 0xc0, 0x10, 0x07, 0xf4, 0xa5, 0xe4, 0xc8, 0x9e, 0x55, 0x5f, - 0xc9, 0xc8, 0xca, 0xe7, 0xc8, 0x3a, 0x31, 0x44, 0xc3, 0xf1, 0xb9, 0xe5, 0xac, 0x28, 0xad, 0x99, - 0xda, 0x54, 0x23, 0x0b, 0x80, 0x8c, 0x99, 0xf6, 0x87, 0xfc, 0x3e, 0x9c, 0x39, 0x6e, 0xfb, 0x70, - 0x74, 0x66, 0x35, 0x07, 0x4e, 0x35, 0xf2, 0x00, 0xdf, 0xcb, 0xe1, 0x91, 0xc9, 0xe1, 0xd0, 0xc9, - 0x36, 0x0d, 0x8d, 0xf3, 0x1b, 0x72, 0x30, 0xb9, 0x01, 0xbd, 0x90, 0x5b, 0xd1, 0xe6, 0x71, 0xf4, - 0x45, 0x9b, 0x27, 0xc2, 0xa6, 0xb5, 0xd9, 0x18, 0x7e, 0x88, 0x2c, 0xd7, 0x8d, 0xb0, 0xdc, 0xc0, - 0xc7, 0x50, 0xce, 0x40, 0x45, 0x7c, 0x9b, 0xbe, 0xcf, 0x75, 0x4b, 0x3d, 0xd3, 0xff, 0xef, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x34, 0x84, 0x96, 0x74, 0xb5, 0x03, 0x00, 0x00, + // 429 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xae, 0x9b, 0xa6, 0x94, 0x49, 0x28, 0x68, 0xd2, 0xa2, 0x10, 0xd1, 0x36, 0x5a, 0x0e, 0x44, + 0x20, 0x59, 0x15, 0xbc, 0x41, 0x03, 0x27, 0x68, 0x04, 0x0e, 0x08, 0x89, 0x8b, 0xb5, 0xb5, 0xa7, + 0x95, 0x15, 0xb3, 0xeb, 0xee, 0x8e, 0xa9, 0x7a, 0xe2, 0x2d, 0x79, 0x1e, 0xe4, 0xb5, 0x5c, 0xdb, + 0x34, 0xb1, 0x7a, 0xf3, 0xcc, 0x78, 0xe7, 0xfb, 0xd9, 0x6f, 0x61, 0x70, 0x9d, 0x53, 0x4e, 0x7e, + 0x66, 0x34, 0x6b, 0xdc, 0x73, 0x45, 0x98, 0x5d, 0x88, 0x3f, 0x30, 0xfa, 0x61, 0x12, 0xa6, 0x73, + 0xb2, 0x56, 0x5e, 0x51, 0x40, 0xd7, 0x39, 0x59, 0xc6, 0x03, 0xe8, 0xb3, 0xce, 0x92, 0x68, 0xec, + 0x4d, 0xbd, 0xd9, 0xe3, 0xa0, 0x2c, 0xf0, 0x05, 0xec, 0xd1, 0x6f, 0x52, 0x1c, 0x2a, 0x3b, 0xde, + 0x9e, 0x7a, 0xb3, 0x5e, 0xf0, 0xc8, 0xd5, 0x0b, 0x8b, 0xaf, 0xe0, 0x49, 0x26, 0x0d, 0x27, 0x9c, + 0x68, 0x15, 0xae, 0xe8, 0x76, 0xdc, 0x9b, 0x7a, 0xb3, 0x61, 0x30, 0xbc, 0x6b, 0x7e, 0xa2, 0x5b, + 0x44, 0xd8, 0x89, 0x25, 0xcb, 0xf1, 0x8e, 0x9b, 0xb9, 0x6f, 0x31, 0x87, 0x83, 0x36, 0x01, 0x9b, + 0x69, 0x65, 0xa9, 0x60, 0x40, 0xc6, 0x68, 0x53, 0x31, 0x70, 0x05, 0x1e, 0xc2, 0xae, 0x8c, 0x56, + 0x35, 0x7e, 0x5f, 0x46, 0xab, 0x85, 0x15, 0x1f, 0x01, 0x03, 0x92, 0xf1, 0x43, 0x45, 0x58, 0x96, + 0xa6, 0x29, 0xc2, 0xd5, 0x0b, 0x2b, 0x7e, 0xc2, 0xa8, 0xb5, 0xa6, 0x93, 0x4a, 0x87, 0x19, 0x95, + 0xce, 0x5e, 0x43, 0xe7, 0x0d, 0x1c, 0xce, 0xb5, 0xba, 0x4c, 0xae, 0x72, 0x43, 0xdf, 0x0a, 0x22, + 0xdd, 0x2c, 0x4f, 0x60, 0xc0, 0x9c, 0x86, 0x96, 0x22, 0xad, 0xe2, 0x0a, 0x00, 0x98, 0xd3, 0x65, + 0xd9, 0xc1, 0xd7, 0xf0, 0xb4, 0x36, 0x3c, 0xd2, 0xb9, 0x62, 0x07, 0xd7, 0x0f, 0xf6, 0xef, 0xda, + 0xf3, 0xa2, 0x2b, 0x7c, 0x78, 0xfe, 0x3f, 0x70, 0x97, 0x2e, 0xf1, 0x06, 0xf0, 0x03, 0xa5, 0xc4, + 0x0f, 0x60, 0x29, 0xde, 0xc2, 0xa8, 0xf5, 0x6f, 0xd7, 0xe2, 0x77, 0x7f, 0xb7, 0x61, 0xb8, 0x24, + 0x79, 0x43, 0x14, 0x7f, 0x2d, 0xe2, 0x87, 0x01, 0x0c, 0x96, 0x6c, 0x48, 0xfe, 0x72, 0x01, 0xc0, + 0x23, 0xbf, 0x4a, 0xa5, 0xbf, 0x26, 0x92, 0x93, 0xe3, 0x4d, 0xe3, 0x12, 0x54, 0x6c, 0xcd, 0xbc, + 0x53, 0x0f, 0xcf, 0x01, 0xca, 0x9d, 0xc5, 0x45, 0xe2, 0xcb, 0xfa, 0xcc, 0xfd, 0x7c, 0x4c, 0x8e, + 0x36, 0x4c, 0xab, 0x85, 0xa7, 0x1e, 0x7e, 0x87, 0xfd, 0xb6, 0x79, 0x78, 0x52, 0x1f, 0x5a, 0x7b, + 0x9f, 0x93, 0xe9, 0xe6, 0x1f, 0xaa, 0xc5, 0xf8, 0x19, 0x06, 0x0d, 0xdf, 0x9a, 0x34, 0xef, 0x5b, + 0xdf, 0xa4, 0xb9, 0xc6, 0x6c, 0xb1, 0x75, 0x76, 0x0c, 0xcf, 0x6c, 0xe9, 0xeb, 0xa5, 0xf5, 0xa3, + 0x34, 0x21, 0xc5, 0x67, 0xe0, 0x2c, 0xfe, 0x52, 0xbc, 0xf6, 0x8b, 0x5d, 0xf7, 0xe8, 0xdf, 0xff, + 0x0b, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x3e, 0x14, 0xd8, 0x03, 0x04, 0x00, 0x00, } diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index bfb182dbe..57caeb6d4 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -44,7 +44,7 @@ type FilerOption struct { DataCenter string DefaultLevelDbDir string DisableHttp bool - Port int + Port uint32 recursiveDelete bool } @@ -66,7 +66,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) glog.Fatal("master list is required!") } - fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption) + fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Port+10000) go fs.filer.KeepConnectedToMaster() diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index d308130d6..3f699a7a2 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -1,8 +1,10 @@ package weed_server import ( + "context" "fmt" "net" + "strings" "time" "github.com/chrislusf/raft" @@ -181,35 +183,13 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ return ms.informNewLeader(stream) } - // remember client address - ctx := stream.Context() - // fmt.Printf("FromContext %+v\n", ctx) - pr, ok := peer.FromContext(ctx) - if !ok { - glog.Error("failed to get peer from ctx") - return fmt.Errorf("failed to get peer from ctx") - } - if pr.Addr == net.Addr(nil) { - glog.Error("failed to get peer address") - return fmt.Errorf("failed to get peer address") - } - - clientName := req.Name + pr.Addr.String() - glog.V(0).Infof("+ client %v", clientName) + peerAddress := findClientAddress(stream.Context(), req.GrpcPort) - messageChan := make(chan *master_pb.VolumeLocation) stopChan := make(chan bool) - ms.clientChansLock.Lock() - ms.clientChans[clientName] = messageChan - ms.clientChansLock.Unlock() + clientName, messageChan := ms.addClient(req.Name, peerAddress) - defer func() { - glog.V(0).Infof("- client %v", clientName) - ms.clientChansLock.Lock() - delete(ms.clientChans, clientName) - ms.clientChansLock.Unlock() - }() + defer ms.deleteClient(clientName) for _, message := range ms.Topo.ToVolumeLocations() { if err := stream.Send(message); err != nil { @@ -261,3 +241,57 @@ func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedSe } return nil } + +func (ms *MasterServer) addClient(clientType string, clientAddress string) (clientName string, messageChan chan *master_pb.VolumeLocation) { + clientName = clientType + "@" + clientAddress + glog.V(0).Infof("+ client %v", clientName) + + messageChan = make(chan *master_pb.VolumeLocation) + + ms.clientChansLock.Lock() + ms.clientChans[clientName] = messageChan + ms.clientChansLock.Unlock() + return +} + +func (ms *MasterServer) deleteClient(clientName string) { + glog.V(0).Infof("- client %v", clientName) + ms.clientChansLock.Lock() + delete(ms.clientChans, clientName) + ms.clientChansLock.Unlock() +} + +func findClientAddress(ctx context.Context, grpcPort uint32) string { + // fmt.Printf("FromContext %+v\n", ctx) + pr, ok := peer.FromContext(ctx) + if !ok { + glog.Error("failed to get peer from ctx") + return "" + } + if pr.Addr == net.Addr(nil) { + glog.Error("failed to get peer address") + return "" + } + if grpcPort == 0 { + return pr.Addr.String() + } + if tcpAddr, ok := pr.Addr.(*net.TCPAddr); ok { + externalIP := tcpAddr.IP + return fmt.Sprintf("%s:%d", externalIP, grpcPort) + } + return pr.Addr.String() + +} + +func (ms *MasterServer ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) { + resp := &master_pb.ListMasterClientsResponse{} + ms.clientChansLock.RLock() + defer ms.clientChansLock.RUnlock() + + for k := range ms.clientChans { + if strings.HasPrefix(k, req.ClientType+"@") { + resp.GrpcAddresses = append(resp.GrpcAddresses, k[len(req.ClientType)+1:]) + } + } + return resp, nil +} diff --git a/weed/server/master_server.go b/weed/server/master_server.go index 095008339..a9ae6b888 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -88,7 +88,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste preallocateSize: preallocateSize, clientChans: make(map[string]chan *master_pb.VolumeLocation), grpcDialOption: grpcDialOption, - MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", peers), + MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", 0, peers), } ms.bounedLeaderChan = make(chan int, 16) diff --git a/weed/shell/commands.go b/weed/shell/commands.go index 31ca31bc3..a67a4e45e 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -44,8 +44,7 @@ var ( func NewCommandEnv(options ShellOptions) *CommandEnv { return &CommandEnv{ env: make(map[string]string), - MasterClient: wdclient.NewMasterClient( - options.GrpcDialOption, "shell", strings.Split(*options.Masters, ",")), + MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, "shell", 0, strings.Split(*options.Masters, ",")), option: options, } } diff --git a/weed/stats/metrics.go b/weed/stats/metrics.go index a9624cd86..ee8763e84 100644 --- a/weed/stats/metrics.go +++ b/weed/stats/metrics.go @@ -136,7 +136,7 @@ func LoopPushingMetric(name, instance string, gatherer *prometheus.Registry, fnG } } -func SourceName(port int) string { +func SourceName(port uint32) string { hostname, err := os.Hostname() if err != nil { return "unknown" diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index f197fa6f2..0cf161a63 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -6,14 +6,16 @@ import ( "math/rand" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" ) type MasterClient struct { name string + grpcPort uint32 currentMaster string masters []string grpcDialOption grpc.DialOption @@ -21,9 +23,10 @@ type MasterClient struct { vidMap } -func NewMasterClient(grpcDialOption grpc.DialOption, clientName string, masters []string) *MasterClient { +func NewMasterClient(grpcDialOption grpc.DialOption, clientName string, clientGrpcPort uint32, masters []string) *MasterClient { return &MasterClient{ name: clientName, + grpcPort: clientGrpcPort, masters: masters, grpcDialOption: grpcDialOption, vidMap: newVidMap(), @@ -72,7 +75,7 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri return err } - if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.name}); err != nil { + if err = stream.Send(&master_pb.KeepConnectedRequest{Name: mc.name, GrpcPort: mc.grpcPort}); err != nil { glog.V(0).Infof("%s failed to send to %s: %v", mc.name, master, err) return err } From dc40413847690fbcb304fa97bf6c3159beb21b37 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 1 Mar 2020 22:21:13 -0800 Subject: [PATCH 0179/2432] fix error --- weed/server/master_grpc_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 3f699a7a2..84087df8b 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -283,7 +283,7 @@ func findClientAddress(ctx context.Context, grpcPort uint32) string { } -func (ms *MasterServer ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) { +func (ms *MasterServer) ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) { resp := &master_pb.ListMasterClientsResponse{} ms.clientChansLock.RLock() defer ms.clientChansLock.RUnlock() From 9434ebc298e25474c9ab849f0099ba1f37d161c8 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 1 Mar 2020 22:25:00 -0800 Subject: [PATCH 0180/2432] update go version --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index b7467ab8a..bad4a77f1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,9 +1,9 @@ sudo: false language: go go: - - 1.11.x - 1.12.x - 1.13.x + - 1.14.x before_install: - export PATH=/home/travis/gopath/bin:$PATH @@ -45,4 +45,4 @@ deploy: on: tags: true repo: chrislusf/seaweedfs - go: 1.13.x + go: 1.14.x From b8ddf064c39b0d62a6790fb6bfff5226806efe15 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 1 Mar 2020 22:36:40 -0800 Subject: [PATCH 0181/2432] Update README.md --- README.md | 49 ++++++++++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 8c32a4e58..0c8a5d9c7 100644 --- a/README.md +++ b/README.md @@ -461,50 +461,49 @@ My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CP Write 1 million 1KB file: ``` Concurrency Level: 16 -Time taken for tests: 88.796 seconds +Time taken for tests: 66.753 seconds Complete requests: 1048576 Failed requests: 0 -Total transferred: 1106764659 bytes -Requests per second: 11808.87 [#/sec] -Transfer rate: 12172.05 [Kbytes/sec] +Total transferred: 1106789009 bytes +Requests per second: 15708.23 [#/sec] +Transfer rate: 16191.69 [Kbytes/sec] Connection Times (ms) min avg max std -Total: 0.2 1.3 44.8 0.9 +Total: 0.3 1.0 84.3 0.9 Percentage of the requests served within a certain time (ms) - 50% 1.1 ms - 66% 1.3 ms - 75% 1.5 ms - 80% 1.7 ms - 90% 2.1 ms - 95% 2.6 ms - 98% 3.7 ms - 99% 4.6 ms - 100% 44.8 ms + 50% 0.8 ms + 66% 1.0 ms + 75% 1.1 ms + 80% 1.2 ms + 90% 1.4 ms + 95% 1.7 ms + 98% 2.1 ms + 99% 2.6 ms + 100% 84.3 ms ``` Randomly read 1 million files: ``` Concurrency Level: 16 -Time taken for tests: 34.263 seconds +Time taken for tests: 22.301 seconds Complete requests: 1048576 Failed requests: 0 -Total transferred: 1106762945 bytes -Requests per second: 30603.34 [#/sec] -Transfer rate: 31544.49 [Kbytes/sec] +Total transferred: 1106812873 bytes +Requests per second: 47019.38 [#/sec] +Transfer rate: 48467.57 [Kbytes/sec] Connection Times (ms) min avg max std -Total: 0.0 0.5 20.7 0.7 +Total: 0.0 0.3 54.1 0.2 Percentage of the requests served within a certain time (ms) - 50% 0.4 ms - 75% 0.5 ms - 95% 0.6 ms - 98% 0.8 ms - 99% 1.2 ms - 100% 20.7 ms + 50% 0.3 ms + 90% 0.4 ms + 98% 0.6 ms + 99% 0.7 ms + 100% 54.1 ms ``` [Back to TOC](#table-of-contents) From 410bce3925fb47d6ba21f838fafed4ff7c892a90 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 1 Mar 2020 22:39:08 -0800 Subject: [PATCH 0182/2432] go fmt --- weed/pb/master_pb/master.pb.go | 20 +++++++++------ weed/pb/volume_server_pb/volume_server.pb.go | 26 ++++++++++++-------- weed/shell/commands.go | 4 +-- 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index c33e2b768..95c9533a1 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -428,10 +428,12 @@ type VolumeEcShardInformationMessage struct { EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits" json:"ec_index_bits,omitempty"` } -func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } -func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardInformationMessage) ProtoMessage() {} -func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } +func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardInformationMessage) ProtoMessage() {} +func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{4} +} func (m *VolumeEcShardInformationMessage) GetId() uint32 { if m != nil { @@ -1422,10 +1424,12 @@ type GetMasterConfigurationResponse struct { MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` } -func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } -func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } +func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*GetMasterConfigurationResponse) ProtoMessage() {} +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{32} +} func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { if m != nil { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 56baa0cf7..588b18f2e 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1035,10 +1035,12 @@ func (m *VolumeEcShardsGenerateRequest) GetCollection() string { type VolumeEcShardsGenerateResponse struct { } -func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } -func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } +func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{41} +} type VolumeEcShardsRebuildRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1411,10 +1413,12 @@ func (m *VolumeEcShardsToVolumeRequest) GetCollection() string { type VolumeEcShardsToVolumeResponse struct { } -func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } -func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} -func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } +func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{57} +} type ReadVolumeFileStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -2081,8 +2085,10 @@ type QueryRequest_InputSerialization_JSONInput struct { func (m *QueryRequest_InputSerialization_JSONInput) Reset() { *m = QueryRequest_InputSerialization_JSONInput{} } -func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} +func (m *QueryRequest_InputSerialization_JSONInput) String() string { + return proto.CompactTextString(m) +} +func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70, 1, 1} } diff --git a/weed/shell/commands.go b/weed/shell/commands.go index a67a4e45e..93a4c94bb 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -43,9 +43,9 @@ var ( func NewCommandEnv(options ShellOptions) *CommandEnv { return &CommandEnv{ - env: make(map[string]string), + env: make(map[string]string), MasterClient: wdclient.NewMasterClient(options.GrpcDialOption, "shell", 0, strings.Split(*options.Masters, ",")), - option: options, + option: options, } } From 7335e6219921fd3976179f6a74ea4a2dcdc43e1e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 1 Mar 2020 23:37:02 -0800 Subject: [PATCH 0183/2432] volume: PUT also conditionally gzip compress --- weed/storage/needle/needle.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/weed/storage/needle/needle.go b/weed/storage/needle/needle.go index 494cc138e..022e8bf14 100644 --- a/weed/storage/needle/needle.go +++ b/weed/storage/needle/needle.go @@ -12,6 +12,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/images" . "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -63,7 +64,7 @@ func ParseUpload(r *http.Request, sizeLimit int64) ( if r.Method == "POST" { fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r, sizeLimit) } else { - isGzipped = false + isGzipped = r.Header.Get("Content-Encoding") == "gzip" mimeType = r.Header.Get("Content-Type") fileName = "" data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1)) @@ -72,6 +73,16 @@ func ParseUpload(r *http.Request, sizeLimit int64) ( io.Copy(ioutil.Discard, r.Body) } r.Body.Close() + if isGzipped { + if unzipped, e := util.UnGzipData(data); e == nil { + originalDataSize = len(unzipped) + } + } else if shouldGzip, _ := util.IsGzippableFileType("", mimeType); shouldGzip { + if compressedData, err := util.GzipData(data); err == nil { + data = compressedData + isGzipped = true + } + } } if e != nil { return From 9b7dae803701dc6b2368a85c24cd818c7277e59d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 2 Mar 2020 20:27:14 -0800 Subject: [PATCH 0184/2432] filer: fix uploaded chunked file size reporting bug. --- weed/server/filer_server_handlers_write_autochunk.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 25c0a4b4d..45cad480d 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -125,12 +125,13 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadedSize), contentLength) + // reset variables for the next chunk + chunkOffset = chunkOffset + int64(uploadedSize) + // if last chunk was not at full chunk size, but already exhausted the reader if uploadedSize < int64(chunkSize) { break } - // reset variables for the next chunk - chunkOffset = chunkOffset + int64(uploadedSize) } path := r.URL.Path From b058867852287c06d0b06a2bb07de83c1ccb9df9 Mon Sep 17 00:00:00 2001 From: LazyDBA247-Anyvision Date: Tue, 3 Mar 2020 09:56:39 +0200 Subject: [PATCH 0185/2432] SEAWEEDFS-K8S: helm chart seaweedfs on k8s deployment through helm chart & stateful sets --- k8s/README.md | 23 + k8s/seaweedfs/.helmignore | 22 + k8s/seaweedfs/Chart.yaml | 4 + k8s/seaweedfs/templates/_helpers.tpl | 114 ++ k8s/seaweedfs/templates/ca-cert.yaml | 14 + .../templates/cert-clusterissuer.yaml | 8 + k8s/seaweedfs/templates/client-cert.yaml | 33 + k8s/seaweedfs/templates/filer-cert.yaml | 33 + k8s/seaweedfs/templates/filer-service.yaml | 22 + .../templates/filer-statefulset.yaml | 210 +++ k8s/seaweedfs/templates/ingress.yaml | 59 + k8s/seaweedfs/templates/master-cert.yaml | 33 + k8s/seaweedfs/templates/master-service.yaml | 24 + .../templates/master-statefulset.yaml | 199 +++ k8s/seaweedfs/templates/s3-deployment.yaml | 159 ++ k8s/seaweedfs/templates/s3-service.yaml | 17 + .../templates/seaweefs-grafana-dashboard.yaml | 1352 +++++++++++++++++ .../templates/secret-seaweedfs-db.yaml | 14 + .../templates/security-configmap.yaml | 52 + k8s/seaweedfs/templates/service-account.yaml | 29 + k8s/seaweedfs/templates/volume-cert.yaml | 33 + k8s/seaweedfs/templates/volume-service.yaml | 22 + .../templates/volume-statefulset.yaml | 187 +++ k8s/seaweedfs/values.yaml | 314 ++++ 24 files changed, 2977 insertions(+) create mode 100644 k8s/README.md create mode 100644 k8s/seaweedfs/.helmignore create mode 100644 k8s/seaweedfs/Chart.yaml create mode 100644 k8s/seaweedfs/templates/_helpers.tpl create mode 100644 k8s/seaweedfs/templates/ca-cert.yaml create mode 100644 k8s/seaweedfs/templates/cert-clusterissuer.yaml create mode 100644 k8s/seaweedfs/templates/client-cert.yaml create mode 100644 k8s/seaweedfs/templates/filer-cert.yaml create mode 100644 k8s/seaweedfs/templates/filer-service.yaml create mode 100644 k8s/seaweedfs/templates/filer-statefulset.yaml create mode 100644 k8s/seaweedfs/templates/ingress.yaml create mode 100644 k8s/seaweedfs/templates/master-cert.yaml create mode 100644 k8s/seaweedfs/templates/master-service.yaml create mode 100644 k8s/seaweedfs/templates/master-statefulset.yaml create mode 100644 k8s/seaweedfs/templates/s3-deployment.yaml create mode 100644 k8s/seaweedfs/templates/s3-service.yaml create mode 100644 k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml create mode 100644 k8s/seaweedfs/templates/secret-seaweedfs-db.yaml create mode 100644 k8s/seaweedfs/templates/security-configmap.yaml create mode 100644 k8s/seaweedfs/templates/service-account.yaml create mode 100644 k8s/seaweedfs/templates/volume-cert.yaml create mode 100644 k8s/seaweedfs/templates/volume-service.yaml create mode 100644 k8s/seaweedfs/templates/volume-statefulset.yaml create mode 100644 k8s/seaweedfs/values.yaml diff --git a/k8s/README.md b/k8s/README.md new file mode 100644 index 000000000..5ec3ab407 --- /dev/null +++ b/k8s/README.md @@ -0,0 +1,23 @@ +## SEAWEEDFS - helm chart (2.x) + +### info: +* master/filer/volume are stateful sets with anti-affinity on the hostname, +so your deployment will be spread/HA. +* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances) +and backup/HA memsql can provide. +* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer +with ENV. +* cert config exists and can be enabled, but not been tested. + +### current instances config (AIO): +1 instance for each type (master/filer/volume/s3) + +instances need node labels: +* sw-volume: true (for volume instance, specific tag) +* sw-backend: true (for all others, as they less resource demanding) + +you can update the replicas count for each node type in values.yaml, +need to add more nodes with the corresponding label. + +most of the configuration are available through values.yaml + diff --git a/k8s/seaweedfs/.helmignore b/k8s/seaweedfs/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/k8s/seaweedfs/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml new file mode 100644 index 000000000..00033e4c6 --- /dev/null +++ b/k8s/seaweedfs/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +description: SeaweedFS +name: seaweedfs +version: 1.57 \ No newline at end of file diff --git a/k8s/seaweedfs/templates/_helpers.tpl b/k8s/seaweedfs/templates/_helpers.tpl new file mode 100644 index 000000000..04a782f8b --- /dev/null +++ b/k8s/seaweedfs/templates/_helpers.tpl @@ -0,0 +1,114 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to +this (by the DNS naming spec). If release name contains chart name it will +be used as a full name. +*/}} +{{- define "seaweedfs.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "seaweedfs.chart" -}} +{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "seaweedfs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Inject extra environment vars in the format key:value, if populated +*/}} +{{- define "seaweedfs.extraEnvironmentVars" -}} +{{- if .extraEnvironmentVars -}} +{{- range $key, $value := .extraEnvironmentVars }} +- name: {{ $key }} + value: {{ $value | quote }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper filer image */}} +{{- define "filer.image" -}} +{{- if .Values.filer.imageOverride -}} +{{- $imageOverride := .Values.filer.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper postgresqlSchema image */}} +{{- define "filer.dbSchema.image" -}} +{{- if .Values.filer.dbSchema.imageOverride -}} +{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.global.repository | toString -}} +{{- $name := .Values.filer.dbSchema.imageName | toString -}} +{{- $tag := .Values.filer.dbSchema.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper master image */}} +{{- define "master.image" -}} +{{- if .Values.master.imageOverride -}} +{{- $imageOverride := .Values.master.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper s3 image */}} +{{- define "s3.image" -}} +{{- if .Values.s3.imageOverride -}} +{{- $imageOverride := .Values.s3.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper volume image */}} +{{- define "volume.image" -}} +{{- if .Values.volume.imageOverride -}} +{{- $imageOverride := .Values.volume.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/ca-cert.yaml b/k8s/seaweedfs/templates/ca-cert.yaml new file mode 100644 index 000000000..056f01502 --- /dev/null +++ b/k8s/seaweedfs/templates/ca-cert.yaml @@ -0,0 +1,14 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-ca-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + commonName: "{{ template "seaweedfs.name" . }}-root-ca" + isCA: true + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer +{{- end }} diff --git a/k8s/seaweedfs/templates/cert-clusterissuer.yaml b/k8s/seaweedfs/templates/cert-clusterissuer.yaml new file mode 100644 index 000000000..d0bd42593 --- /dev/null +++ b/k8s/seaweedfs/templates/cert-clusterissuer.yaml @@ -0,0 +1,8 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: ClusterIssuer +metadata: + name: {{ template "seaweedfs.name" . }}-clusterissuer +spec: + selfSigned: {} +{{- end }} diff --git a/k8s/seaweedfs/templates/client-cert.yaml b/k8s/seaweedfs/templates/client-cert.yaml new file mode 100644 index 000000000..4d27b5659 --- /dev/null +++ b/k8s/seaweedfs/templates/client-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-client-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-client-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/filer-cert.yaml b/k8s/seaweedfs/templates/filer-cert.yaml new file mode 100644 index 000000000..855183c54 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-filer-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/filer-service.yaml b/k8s/seaweedfs/templates/filer-service.yaml new file mode 100644 index 000000000..493859e36 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: filer +spec: + clusterIP: None + ports: + - name: "swfs-filer" + port: {{ .Values.filer.port }} + targetPort: {{ .Values.filer.port }} + protocol: TCP + - name: "swfs-filer-grpc" + port: {{ .Values.filer.grpcPort }} + targetPort: {{ .Values.filer.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: filer \ No newline at end of file diff --git a/k8s/seaweedfs/templates/filer-statefulset.yaml b/k8s/seaweedfs/templates/filer-statefulset.yaml new file mode 100644 index 000000000..6ee57edf7 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-statefulset.yaml @@ -0,0 +1,210 @@ +{{- if .Values.filer.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-filer + podManagementPolicy: Parallel + replicas: {{ .Values.filer.replicas }} + {{- if (gt (int .Values.filer.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.filer.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: filer + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: filer + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }} + {{- if .Values.filer.affinity }} + affinity: + {{ tpl .Values.filer.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.filer.tolerations }} + tolerations: + {{ tpl .Values.filer.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + serviceAccountName: seaweefds-rw-sa #hack for delete pod master after migration + terminationGracePeriodSeconds: 60 + {{- if .Values.filer.priorityClassName }} + priorityClassName: {{ .Values.filer.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "filer.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: WEED_MYSQL_USERNAME + valueFrom: + secretKeyRef: + name: secret-seaweedfs-db + key: user + - name: WEED_MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: secret-seaweedfs-db + key: password + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.filer.extraEnvironmentVars }} + {{- range $key, $value := .Values.filer.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.filer.loggingOverrideLevel }} + -v={{ .Values.filer.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + filer \ + -port={{ .Values.filer.port }} \ + {{- if .Values.filer.redirectOnRead }} + -redirectOnRead \ + {{- end }} + {{- if .Values.filer.disableHttp }} + -disableHttp \ + {{- end }} + {{- if .Values.filer.disableDirListing }} + -disableDirListing \ + {{- end }} + -dirListLimit={{ .Values.filer.dirListLimit }} \ + -ip=${POD_IP} \ + -master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + {{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }} + volumeMounts: + - name: seaweedfs-filer-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }} + {{- end }} + ports: + - containerPort: {{ .Values.filer.port }} + name: swfs-filer + - containerPort: {{ .Values.filer.grpcPort }} + #name: swfs-filer-grpc + readinessProbe: + httpGet: + path: / + port: {{ .Values.filer.port }} + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + livenessProbe: + httpGet: + path: / + port: {{ .Values.filer.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 5 + {{- if .Values.filer.resources }} + resources: + {{ tpl .Values.filer.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-filer-log-volume + hostPath: + path: /storage/logs/seaweedfs/filer + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.filer.extraVolumes . | indent 8 | trim }} + {{- if .Values.filer.nodeSelector }} + nodeSelector: + {{ tpl .Values.filer.nodeSelector . | indent 8 | trim }} + {{- end }} +{{/* volumeClaimTemplates:*/}} +{{/* - metadata:*/}} +{{/* name: data-{{ .Release.Namespace }}*/}} +{{/* spec:*/}} +{{/* accessModes:*/}} +{{/* - ReadWriteOnce*/}} +{{/* resources:*/}} +{{/* requests:*/}} +{{/* storage: {{ .Values.filer.storage }}*/}} +{{/* {{- if .Values.filer.storageClass }}*/}} +{{/* storageClassName: {{ .Values.filer.storageClass }}*/}} +{{/* {{- end }}*/}} +{{- end }} diff --git a/k8s/seaweedfs/templates/ingress.yaml b/k8s/seaweedfs/templates/ingress.yaml new file mode 100644 index 000000000..dcd52c138 --- /dev/null +++ b/k8s/seaweedfs/templates/ingress.yaml @@ -0,0 +1,59 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-{{ template "seaweedfs.name" . }}-filer + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/auth-type: "basic" + nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer' + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + sub_filter '' ' '; #add base url + sub_filter '="/' '="./'; #make absolute paths to relative + sub_filter '=/' '=./'; + sub_filter '/seaweedfsstatic' './seaweedfsstatic'; + sub_filter_once off; +spec: + rules: + - http: + paths: + - path: /sw-filer/?(.*) + backend: + serviceName: {{ template "seaweedfs.name" . }}-filer + servicePort: {{ .Values.filer.port }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-{{ template "seaweedfs.name" . }}-master + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/auth-type: "basic" + nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master' + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + sub_filter '' ' '; #add base url + sub_filter '="/' '="./'; #make absolute paths to relative + sub_filter '=/' '=./'; + sub_filter '/seaweedfsstatic' './seaweedfsstatic'; + sub_filter_once off; +spec: + rules: + - http: + paths: + - path: /sw-master/?(.*) + backend: + serviceName: {{ template "seaweedfs.name" . }}-master + servicePort: {{ .Values.master.port }} diff --git a/k8s/seaweedfs/templates/master-cert.yaml b/k8s/seaweedfs/templates/master-cert.yaml new file mode 100644 index 000000000..a8b0fc1d1 --- /dev/null +++ b/k8s/seaweedfs/templates/master-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-master-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-master-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/master-service.yaml b/k8s/seaweedfs/templates/master-service.yaml new file mode 100644 index 000000000..f7603bd91 --- /dev/null +++ b/k8s/seaweedfs/templates/master-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: master + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None + ports: + - name: "swfs-master" + port: {{ .Values.master.port }} + targetPort: {{ .Values.master.port }} + protocol: TCP + - name: "swfs-master-grpc" + port: {{ .Values.master.grpcPort }} + targetPort: {{ .Values.master.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: master \ No newline at end of file diff --git a/k8s/seaweedfs/templates/master-statefulset.yaml b/k8s/seaweedfs/templates/master-statefulset.yaml new file mode 100644 index 000000000..87050534f --- /dev/null +++ b/k8s/seaweedfs/templates/master-statefulset.yaml @@ -0,0 +1,199 @@ +{{- if .Values.master.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-master + podManagementPolicy: Parallel + replicas: {{ .Values.master.replicas }} + {{- if (gt (int .Values.master.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.master.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: master + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: master + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }} + {{- if .Values.master.affinity }} + affinity: + {{ tpl .Values.master.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: + {{ tpl .Values.master.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "master.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.master.loggingOverrideLevel }} + -v={{ .Values.master.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + master \ + -port={{ .Values.master.port }} \ + -mdir=/data \ + -ip.bind={{ .Values.master.ipBind }} \ + {{- if .Values.master.volumePreallocate }} + -volumePreallocate \ + {{- end }} + {{- if .Values.global.monitoring.enabled }} + -metrics.address="{{ .Values.global.monitoring.gatewayHost }}:{{ .Values.global.monitoring.gatewayPort }}" \ + {{- end }} + -volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \ + {{- if .Values.master.disableHttp }} + -disableHttp \ + {{- end }} + -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \ + -peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name : data-{{ .Release.Namespace }} + mountPath: /data + - name: seaweedfs-master-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.master.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.master.port }} + name: swfs-master + - containerPort: {{ .Values.master.grpcPort }} + #name: swfs-master-grpc + readinessProbe: + httpGet: + path: /cluster/status + port: {{ .Values.master.port }} + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 15 + successThreshold: 2 + failureThreshold: 100 + livenessProbe: + httpGet: + path: /cluster/status + port: {{ .Values.master.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 6 + {{- if .Values.master.resources }} + resources: + {{ tpl .Values.master.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-master-log-volume + hostPath: + path: /storage/logs/seaweedfs/master + type: DirectoryOrCreate + - name: data-{{ .Release.Namespace }} + hostPath: + path: /ssd/seaweed-master/ + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.master.extraVolumes . | indent 8 | trim }} + {{- if .Values.master.nodeSelector }} + nodeSelector: + {{ tpl .Values.master.nodeSelector . | indent 8 | trim }} + {{- end }} +{{/* volumeClaimTemplates:*/}} +{{/* - metadata:*/}} +{{/* name: data-{{ .Release.Namespace }}*/}} +{{/* spec:*/}} +{{/* accessModes:*/}} +{{/* - ReadWriteOnce*/}} +{{/* resources:*/}} +{{/* requests:*/}} +{{/* storage: {{ .Values.master.storage }}*/}} +{{/* {{- if .Values.master.storageClass }}*/}} +{{/* storageClassName: {{ .Values.master.storageClass }}*/}} +{{/* {{- end }}*/}} +{{- end }} diff --git a/k8s/seaweedfs/templates/s3-deployment.yaml b/k8s/seaweedfs/templates/s3-deployment.yaml new file mode 100644 index 000000000..c124af287 --- /dev/null +++ b/k8s/seaweedfs/templates/s3-deployment.yaml @@ -0,0 +1,159 @@ +{{- if .Values.s3.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-s3 + replicas: {{ .Values.s3.replicas }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: s3 + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: s3 + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }} + {{- if .Values.s3.tolerations }} + tolerations: + {{ tpl .Values.s3.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.s3.priorityClassName }} + priorityClassName: {{ .Values.s3.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "s3.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed \ + {{- if .Values.s3.loggingOverrideLevel }} + -v={{ .Values.s3.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + s3 \ + -port={{ .Values.s3.port }} \ + {{- if .Values.global.enableSecurity }} + -cert.file=/usr/local/share/ca-certificates/client/tls.crt \ + -key.file=/usr/local/share/ca-certificates/client/tls.key \ + {{- end }} + -filer.dir.buckets={{ .Values.s3.filerDirBuckets }} \ + {{- if .Values.s3.domainName }} + -domainName={{ .Values.s3.domainName }} \ + {{- end }} + -filer={{ template "seaweedfs.name" . }}-filer:{{ .Values.filer.port }} + {{- if or (.Values.global.enableSecurity) (.Values.s3.extraVolumeMounts) }} + volumeMounts: + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }} + {{- end }} + ports: + - containerPort: {{ .Values.s3.port }} + name: swfs-s3 + readinessProbe: + httpGet: + path: / + port: {{ .Values.s3.port }} + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + livenessProbe: + httpGet: + path: / + port: {{ .Values.s3.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 60 + successThreshold: 1 + failureThreshold: 20 + {{- if .Values.s3.resources }} + resources: + {{ tpl .Values.s3.resources . | nindent 12 | trim }} + {{- end }} + volumes: + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.s3.extraVolumes . | indent 8 | trim }} + {{- if .Values.s3.nodeSelector }} + nodeSelector: + {{ tpl .Values.s3.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/k8s/seaweedfs/templates/s3-service.yaml b/k8s/seaweedfs/templates/s3-service.yaml new file mode 100644 index 000000000..b088e25fa --- /dev/null +++ b/k8s/seaweedfs/templates/s3-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: s3 +spec: + ports: + - name: "swfs-s3" + port: {{ .Values.s3.port }} + targetPort: {{ .Values.s3.port }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: s3 \ No newline at end of file diff --git a/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml b/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml new file mode 100644 index 000000000..c943ea50f --- /dev/null +++ b/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml @@ -0,0 +1,1352 @@ +{{- if .Values.global.monitoring.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: seaweefsfs-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + seaweedfs.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "Prometheus", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": 10423, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "refresh": "30s", + "rows": [ + { + "collapse": false, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 46, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 49, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 45, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 99th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(SeaweedFS_filer_request_total[1m]) * 5", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer QPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 252, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 47, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le, exported_instance))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{exported_instance}}`}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 40, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_volumeServer_request_total[1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_volumes) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{collection}}`}} {{`{{type}}`}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_max_volumes)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "id": 50, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{collection}}`}} {{`{{type}}`}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Collection and Type", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "id": 51, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (exported_instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{exported_instance}}`}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Host", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Volume Server", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_filerStore_request_total [1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Store", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 242, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 52, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_memstats_alloc_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "bytes allocated", + "refId": "B" + }, + { + "expr": "rate(go_memstats_alloc_bytes_total{exported_job=\"filer\"}[30s])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "alloc rate", + "refId": "A" + }, + { + "expr": "go_memstats_stack_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "stack inuse", + "refId": "C" + }, + { + "expr": "go_memstats_heap_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "heap inuse", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Memory Stats", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_gc_duration_seconds{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{quantile}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go GC duration quantiles", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 53, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{exported_instance}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Routines", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Instances", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "SeaweedFS", + "version": 3 + } +{{- end }} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml new file mode 100644 index 000000000..c397824ad --- /dev/null +++ b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: secret-seaweedfs-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/resource-policy": keep + "helm.sh/hook": "pre-install" +stringData: + user: {{ "YourSWUser" | b64enc }} + password: "HardCodedPassword" + # better to random generate and create in DB + # password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }} diff --git a/k8s/seaweedfs/templates/security-configmap.yaml b/k8s/seaweedfs/templates/security-configmap.yaml new file mode 100644 index 000000000..7d06614ec --- /dev/null +++ b/k8s/seaweedfs/templates/security-configmap.yaml @@ -0,0 +1,52 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "seaweedfs.name" . }}-security-config + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + security.toml: |- + # this file is read by master, volume server, and filer + + # the jwt signing key is read by master and volume server + # a jwt expires in 10 seconds + [jwt.signing] + key = "{{ randAlphaNum 10 | b64enc }}" + + # all grpc tls authentications are mutual + # the values for the following ca, cert, and key are paths to the PERM files. + [grpc] + ca = "/usr/local/share/ca-certificates/ca/tls.crt" + + [grpc.volume] + cert = "/usr/local/share/ca-certificates/volume/tls.crt" + key = "/usr/local/share/ca-certificates/volume/tls.key" + + [grpc.master] + cert = "/usr/local/share/ca-certificates/master/tls.crt" + key = "/usr/local/share/ca-certificates/master/tls.key" + + [grpc.filer] + cert = "/usr/local/share/ca-certificates/filer/tls.crt" + key = "/usr/local/share/ca-certificates/filer/tls.key" + + # use this for any place needs a grpc client + # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" + [grpc.client] + cert = "/usr/local/share/ca-certificates/client/tls.crt" + key = "/usr/local/share/ca-certificates/client/tls.key" + + # volume server https options + # Note: work in progress! + # this does not work with other clients, e.g., "weed filer|mount" etc, yet. + [https.client] + enabled = false + [https.volume] + cert = "" + key = "" +{{- end }} diff --git a/k8s/seaweedfs/templates/service-account.yaml b/k8s/seaweedfs/templates/service-account.yaml new file mode 100644 index 000000000..e82ef7d62 --- /dev/null +++ b/k8s/seaweedfs/templates/service-account.yaml @@ -0,0 +1,29 @@ +#hack for delete pod master after migration +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: seaweefds-rw-cr +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: seaweefds-rw-sa + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: system:serviceaccount:seaweefds-rw-sa:default +subjects: +- kind: ServiceAccount + name: seaweefds-rw-sa + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: seaweefds-rw-cr \ No newline at end of file diff --git a/k8s/seaweedfs/templates/volume-cert.yaml b/k8s/seaweedfs/templates/volume-cert.yaml new file mode 100644 index 000000000..72c62a0f5 --- /dev/null +++ b/k8s/seaweedfs/templates/volume-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-volume-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/volume-service.yaml b/k8s/seaweedfs/templates/volume-service.yaml new file mode 100644 index 000000000..fc7716681 --- /dev/null +++ b/k8s/seaweedfs/templates/volume-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: volume +spec: + clusterIP: None + ports: + - name: "swfs-volume" + port: {{ .Values.volume.port }} + targetPort: {{ .Values.volume.port }} + protocol: TCP + - name: "swfs-volume-18080" + port: {{ .Values.volume.grpcPort }} + targetPort: {{ .Values.volume.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: volume \ No newline at end of file diff --git a/k8s/seaweedfs/templates/volume-statefulset.yaml b/k8s/seaweedfs/templates/volume-statefulset.yaml new file mode 100644 index 000000000..9c6ddcd9f --- /dev/null +++ b/k8s/seaweedfs/templates/volume-statefulset.yaml @@ -0,0 +1,187 @@ +{{- if .Values.volume.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-volume + replicas: {{ .Values.volume.replicas }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: volume + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: volume + spec: + {{- if .Values.volume.affinity }} + affinity: + {{ tpl .Values.volume.affinity . | nindent 8 | trim }} + {{- end }} + restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }} + {{- if .Values.volume.tolerations }} + tolerations: + {{ tpl .Values.volume.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.volume.priorityClassName }} + priorityClassName: {{ .Values.volume.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "volume.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.volume.loggingOverrideLevel }} + -v={{ .Values.volume.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + volume \ + -port={{ .Values.volume.port }} \ + -dir={{ .Values.volume.dir }} \ + -max={{ .Values.volume.maxVolumes }} \ + {{- if .Values.volume.rack }} + -rack={{ .Values.volume.rack }} \ + {{- end }} + {{- if .Values.volume.dataCenter }} + -dataCenter={{ .Values.volume.dataCenter }} \ + {{- end }} + -ip.bind={{ .Values.volume.ipBind }} \ + -read.redirect={{ .Values.volume.readRedirect }} \ + {{- if .Values.volume.whiteList }} + -whiteList={{ .Values.volume.whiteList }} \ + {{- end }} + {{- if .Values.volume.imagesFixOrientation }} + -images.fix.orientation \ + {{- end }} + -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \ + -compactionMBps={{ .Values.volume.compactionMBps }} \ + -mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name: seaweedfs-volume-storage + mountPath: "/data/" + - name: seaweedfs-volume-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.volume.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.volume.port }} + name: swfs-vol + - containerPort: {{ .Values.volume.grpcPort }} + #name: swfs-vol-grpc + readinessProbe: + httpGet: + path: /status + port: {{ .Values.volume.port }} + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + livenessProbe: + httpGet: + path: /status + port: {{ .Values.volume.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 10 + {{- if .Values.volume.resources }} + resources: + {{ tpl .Values.volume.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-volume-log-volume + hostPath: + path: /storage/logs/seaweedfs/volume + type: DirectoryOrCreate + - name: seaweedfs-volume-storage + hostPath: + path: /storage/object_store/ + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{- if .Values.volume.extraVolumes }} + {{ tpl .Values.volume.extraVolumes . | indent 8 | trim }} + {{- end }} + {{- if .Values.volume.nodeSelector }} + nodeSelector: + {{ tpl .Values.volume.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml new file mode 100644 index 000000000..b7d93ff4a --- /dev/null +++ b/k8s/seaweedfs/values.yaml @@ -0,0 +1,314 @@ +# Available parameters and their default values for the SeaweedFS chart. + +global: + registry: "" + repository: "" + imageName: chrislusf/seaweedfs + imageTag: "1.58" + imagePullPolicy: IfNotPresent + imagePullSecrets: imagepullsecret + restartPolicy: Always + loggingLevel: 1 + enableSecurity: false + monitoring: + enabled: false + gatewayHost: null + gatewayPort: null + +image: + registry: "" + repository: "" + +master: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + replicas: 1 + port: 9333 + grpcPort: 19333 + ipBind: "0.0.0.0" + volumePreallocate: false + volumeSizeLimitMB: 30000 + loggingOverrideLevel: null + + # Disable http request, only gRpc operations are allowed + disableHttp: false + + extraVolumes: "" + extraVolumeMounts: "" + + # storage and storageClass are the settings for configuring stateful + # storage for the master pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 25Gi + storageClass: null + + # Resource requests, limits, etc. for the master cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # updatePartition is used to control a careful rolling update of SeaweedFS + # masters. + updatePartition: 0 + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: master + topologyKey: kubernetes.io/hostname + + # Toleration Settings for master pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for master pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to master pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + +volume: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + port: 8080 + grpcPort: 18080 + ipBind: "0.0.0.0" + replicas: 1 + loggingOverrideLevel: null + + # limit background compaction or copying speed in mega bytes per second + compactionMBps: "40" + + # Directories to store data files. dir[,dir]... (default "/tmp") + dir: "/data" + + # Maximum numbers of volumes, count[,count]... (default "7") + maxVolumes: "10000" + + # Volume server's rack name + rack: null + + # Volume server's data center name + dataCenter: null + + # Redirect moved or non-local volumes. (default true) + readRedirect: true + + # Comma separated Ip addresses having write permission. No limit if empty. + whiteList: null + + # Adjust jpg orientation when uploading. + imagesFixOrientation: false + + extraVolumes: "" + extraVolumeMounts: "" + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: volume + topologyKey: kubernetes.io/hostname + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-volume: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + +filer: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + replicas: 1 + port: 8888 + grpcPort: 18888 + loggingOverrideLevel: null + + # Whether proxy or redirect to volume server during file GET request + redirectOnRead: false + + # Limit sub dir listing size (default 100000) + dirListLimit: 100000 + + # Turn off directory listing + disableDirListing: false + + # Disable http request, only gRpc operations are allowed + disableHttp: false + + # storage and storageClass are the settings for configuring stateful + # storage for the master pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 25Gi + storageClass: null + + extraVolumes: "" + extraVolumeMounts: "" + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: filer + topologyKey: kubernetes.io/hostname + + # updatePartition is used to control a careful rolling update of SeaweedFS + # masters. + updatePartition: 0 + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + dbSchema: + imageName: db-schema + imageTag: "development" + imageOverride: "" + + # extraEnvVars is a list of extra enviroment variables to set with the stateful set. + extraEnvironmentVars: + WEED_MYSQL_ENABLED: "true" + WEED_MYSQL_HOSTNAME: "mysql-db-host" + WEED_MYSQL_PORT: "3306" + WEED_MYSQL_DATABASE: "sw-database" + WEED_MYSQL_CONNECTION_MAX_IDLE: "10" + WEED_MYSQL_CONNECTION_MAX_OPEN: "150" + # enable usage of memsql as filer backend + WEED_MYSQL_INTERPOLATEPARAMS: "true" + WEED_LEVELDB2_ENABLED: "false" + # with http DELETE, by default the filer would check whether a folder is empty. + # recursive_delete will delete all sub folders and files, similar to "rm -Rf" + WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false" + # directories under this folder will be automatically creating a separate bucket + WEED_FILER_BUCKETS_FOLDER: "/buckets" + # directories under this folder will be store message queue data + WEED_FILER_QUEUES_FOLDER: "/queues" + +s3: + enabled: true + repository: null + imageName: null + imageTag: null + restartPolicy: null + replicas: 1 + port: 8333 + loggingOverrideLevel: null + + # Folder on filer to store all buckets (default "/buckets") + filerDirBuckets: "/buckets" + + # Suffix of the host name, {bucket}.{domainName} + domainName: "" + + extraVolumes: "" + extraVolumeMounts: "" + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + +certificates: + commonName: "SeaweedFS CA" + ipAddresses: [] + keyAlgorithm: rsa + keySize: 2048 + duration: 2160h # 90d + renewBefore: 360h # 15d From 0db304ed1ca3ccbbf16f5469b2516b03d1b5c263 Mon Sep 17 00:00:00 2001 From: LazyDBA247-Anyvision Date: Tue, 3 Mar 2020 10:13:57 +0200 Subject: [PATCH 0186/2432] FILER/S3: filer Dir Buckets updated config, to the new 1.58 --- k8s/seaweedfs/templates/s3-deployment.yaml | 1 - k8s/seaweedfs/values.yaml | 3 --- 2 files changed, 4 deletions(-) diff --git a/k8s/seaweedfs/templates/s3-deployment.yaml b/k8s/seaweedfs/templates/s3-deployment.yaml index c124af287..1bb3283f1 100644 --- a/k8s/seaweedfs/templates/s3-deployment.yaml +++ b/k8s/seaweedfs/templates/s3-deployment.yaml @@ -75,7 +75,6 @@ spec: -cert.file=/usr/local/share/ca-certificates/client/tls.crt \ -key.file=/usr/local/share/ca-certificates/client/tls.key \ {{- end }} - -filer.dir.buckets={{ .Values.s3.filerDirBuckets }} \ {{- if .Values.s3.domainName }} -domainName={{ .Values.s3.domainName }} \ {{- end }} diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index b7d93ff4a..1a265a4bf 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -273,9 +273,6 @@ s3: port: 8333 loggingOverrideLevel: null - # Folder on filer to store all buckets (default "/buckets") - filerDirBuckets: "/buckets" - # Suffix of the host name, {bucket}.{domainName} domainName: "" From 0e22ed44d7d3076ecd57d085589a73220d8dfd68 Mon Sep 17 00:00:00 2001 From: LazyDBA247-Anyvision Date: Tue, 3 Mar 2020 10:43:45 +0200 Subject: [PATCH 0187/2432] FILER: pass & db fix revert from white labeling --- k8s/seaweedfs/templates/secret-seaweedfs-db.yaml | 2 +- k8s/seaweedfs/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml index c397824ad..c6132c9ea 100644 --- a/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml +++ b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml @@ -8,7 +8,7 @@ metadata: "helm.sh/resource-policy": keep "helm.sh/hook": "pre-install" stringData: - user: {{ "YourSWUser" | b64enc }} + user: "YourSWUser" password: "HardCodedPassword" # better to random generate and create in DB # password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }} diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index 1a265a4bf..7afc2dacf 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -249,7 +249,7 @@ filer: WEED_MYSQL_ENABLED: "true" WEED_MYSQL_HOSTNAME: "mysql-db-host" WEED_MYSQL_PORT: "3306" - WEED_MYSQL_DATABASE: "sw-database" + WEED_MYSQL_DATABASE: "sw_database" WEED_MYSQL_CONNECTION_MAX_IDLE: "10" WEED_MYSQL_CONNECTION_MAX_OPEN: "150" # enable usage of memsql as filer backend From e51ecb8621dfe1e850f630fc8dd4cb74b73bf62d Mon Sep 17 00:00:00 2001 From: Yoni Nakache <45972051+LazyDBA247-Anyvision@users.noreply.github.com> Date: Tue, 3 Mar 2020 12:46:18 +0200 Subject: [PATCH 0188/2432] align chart version with values.yaml --- k8s/seaweedfs/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index 00033e4c6..d0c650c7c 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: 1.57 \ No newline at end of file +version: 1.58 From f90c43635d96cace1ab1ca965a56a082f880aa4b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 4 Mar 2020 00:39:47 -0800 Subject: [PATCH 0189/2432] refactoring --- weed/command/command.go | 2 +- weed/command/filer.go | 3 +- weed/command/filer_copy.go | 20 +-- weed/command/master.go | 3 +- weed/command/mount.go | 21 --- weed/command/mount_std.go | 7 +- weed/command/msg_broker.go | 111 ++++++++++++++++ weed/command/queue.go | 107 ---------------- weed/command/s3.go | 5 +- weed/command/scaffold.go | 2 +- weed/command/volume.go | 3 +- weed/command/webdav.go | 3 +- weed/filesys/wfs.go | 4 +- weed/operation/grpc_client.go | 8 +- weed/{util => pb}/grpc_client_server.go | 55 +++++++- .../replication/sink/filersink/fetch_write.go | 4 +- weed/replication/source/filer_source.go | 3 +- weed/s3api/s3api_handlers.go | 4 +- weed/server/msg_broker_grpc_server.go | 23 ++++ weed/server/msg_broker_server.go | 121 ++++++++++++++++++ weed/server/queue_server.go | 49 ------- weed/server/raft_server.go | 11 +- weed/server/volume_grpc_client_to_master.go | 5 +- weed/server/webdav_server.go | 3 +- weed/shell/command_fs_du.go | 9 +- weed/wdclient/masterclient.go | 21 +-- 26 files changed, 361 insertions(+), 246 deletions(-) create mode 100644 weed/command/msg_broker.go delete mode 100644 weed/command/queue.go rename weed/{util => pb}/grpc_client_server.go (65%) create mode 100644 weed/server/msg_broker_grpc_server.go create mode 100644 weed/server/msg_broker_server.go delete mode 100644 weed/server/queue_server.go diff --git a/weed/command/command.go b/weed/command/command.go index 6687469f1..9dc51e922 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -20,7 +20,7 @@ var Commands = []*Command{ cmdS3, cmdUpload, cmdDownload, - cmdQueue, + cmdMsgBroker, cmdScaffold, cmdShell, cmdVersion, diff --git a/weed/command/filer.go b/weed/command/filer.go index 31e65acea..b5b595215 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -9,6 +9,7 @@ import ( "google.golang.org/grpc/reflection" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" @@ -144,7 +145,7 @@ func (fo *FilerOptions) startFiler() { if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer")) filer_pb.RegisterSeaweedFilerServer(grpcS, fs) reflection.Register(grpcS) go grpcS.Serve(grpcL) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 18f41048b..3e7ae1db2 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -17,6 +17,7 @@ import ( "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" @@ -159,7 +160,7 @@ func runCopy(cmd *Command, args []string) bool { } func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) { - err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) @@ -274,7 +275,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err if task.fileSize > 0 { // assign a volume - err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -319,7 +320,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) } - if err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -375,7 +376,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, // assign a volume var assignResult *filer_pb.AssignVolumeResponse var assignError error - err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, Replication: *worker.options.replication, @@ -447,7 +448,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, return uploadError } - if err := withFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -496,12 +497,3 @@ func detectMimeType(f *os.File) string { mimeType := http.DetectContentType(head[:n]) return mimeType } - -func withFilerClient(filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - - return util.WithCachedGrpcClient(func(clientConn *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(clientConn) - return fn(client) - }, filerAddress, grpcDialOption) - -} diff --git a/weed/command/master.go b/weed/command/master.go index c4b11119b..1be60426f 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -12,6 +12,7 @@ import ( "google.golang.org/grpc/reflection" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" @@ -129,7 +130,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } // Create your protocol servers. - grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) protobuf.RegisterRaftServer(grpcS, raftServer) reflection.Register(grpcS) diff --git a/weed/command/mount.go b/weed/command/mount.go index 4bdb3415a..e73cbee10 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -1,11 +1,5 @@ package command -import ( - "fmt" - "strconv" - "strings" -) - type MountOptions struct { filer *string filerMountRootPath *string @@ -69,18 +63,3 @@ var cmdMount = &Command{ `, } -func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { - hostnameAndPort := strings.Split(filer, ":") - if len(hostnameAndPort) != 2 { - return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort) - } - - filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) - if parseErr != nil { - return "", fmt.Errorf("filer port parse error: %v", parseErr) - } - - filerGrpcPort := int(filerPort) + 10000 - - return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil -} diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index e8e3fb030..b195bf143 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -17,6 +17,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" @@ -135,16 +136,16 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente }) // parse filer grpc address - filerGrpcAddress, err := parseFilerGrpcAddress(filer) + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer) if err != nil { - glog.V(0).Infof("parseFilerGrpcAddress: %v", err) + glog.V(0).Infof("ParseFilerGrpcAddress: %v", err) daemonize.SignalOutcome(err) return true } // try to connect to filer, filerBucketsPath may be useful later grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") - err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { _, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go new file mode 100644 index 000000000..0d69a9a66 --- /dev/null +++ b/weed/command/msg_broker.go @@ -0,0 +1,111 @@ +package command + +import ( + "context" + "fmt" + "strconv" + "time" + + "google.golang.org/grpc/reflection" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/queue_pb" + "github.com/chrislusf/seaweedfs/weed/security" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + messageBrokerStandaloneOptions QueueOptions +) + +type QueueOptions struct { + filer *string + port *int + tlsPrivateKey *string + tlsCertificate *string + defaultTtl *string +} + +func init() { + cmdMsgBroker.Run = runMsgBroker // break init cycle + messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address") + messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "queue server gRPC listen port") + messageBrokerStandaloneOptions.tlsPrivateKey = cmdMsgBroker.Flag.String("key.file", "", "path to the TLS private key file") + messageBrokerStandaloneOptions.tlsCertificate = cmdMsgBroker.Flag.String("cert.file", "", "path to the TLS certificate file") + messageBrokerStandaloneOptions.defaultTtl = cmdMsgBroker.Flag.String("ttl", "1h", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") +} + +var cmdMsgBroker = &Command{ + UsageLine: "msg.broker [-port=17777] [-filer=]", + Short: " start a message queue broker", + Long: `start a message queue broker + + The broker can accept gRPC calls to write or read messages. The messages are stored via filer. + The brokers are stateless. To scale up, just add more brokers. + +`, +} + +func runMsgBroker(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + return messageBrokerStandaloneOptions.startQueueServer() + +} + +func (msgBrokerOpt *QueueOptions) startQueueServer() bool { + + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer) + if err != nil { + glog.Fatal(err) + return false + } + + filerQueuesPath := "/queues" + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + filerQueuesPath = resp.DirQueues + glog.V(0).Infof("Queue read filer queues dir: %s", filerQueuesPath) + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress) + break + } + } + + qs, err := weed_server.NewMessageBroker(&weed_server.MessageBrokerOption{ + Filers: []string{*msgBrokerOpt.filer}, + DefaultReplication: "", + MaxMB: 0, + Port: *msgBrokerOpt.port, + }) + + // start grpc listener + grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0) + if err != nil { + glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err) + } + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker")) + queue_pb.RegisterSeaweedQueueServer(grpcS, qs) + reflection.Register(grpcS) + grpcS.Serve(grpcL) + + return true + +} diff --git a/weed/command/queue.go b/weed/command/queue.go deleted file mode 100644 index d09d5d8b3..000000000 --- a/weed/command/queue.go +++ /dev/null @@ -1,107 +0,0 @@ -package command - -import ( - "context" - "fmt" - "strconv" - "time" - - "google.golang.org/grpc/reflection" - - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/pb/queue_pb" - "github.com/chrislusf/seaweedfs/weed/security" - weed_server "github.com/chrislusf/seaweedfs/weed/server" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" -) - -var ( - queueStandaloneOptions QueueOptions -) - -type QueueOptions struct { - filer *string - port *int - tlsPrivateKey *string - tlsCertificate *string - defaultTtl *string -} - -func init() { - cmdQueue.Run = runQueue // break init cycle - queueStandaloneOptions.filer = cmdQueue.Flag.String("filer", "localhost:8888", "filer server address") - queueStandaloneOptions.port = cmdQueue.Flag.Int("port", 17777, "queue server gRPC listen port") - queueStandaloneOptions.tlsPrivateKey = cmdQueue.Flag.String("key.file", "", "path to the TLS private key file") - queueStandaloneOptions.tlsCertificate = cmdQueue.Flag.String("cert.file", "", "path to the TLS certificate file") - queueStandaloneOptions.defaultTtl = cmdQueue.Flag.String("ttl", "1h", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") -} - -var cmdQueue = &Command{ - UsageLine: " queue [-port=17777] [-filer=]", - Short: "start a queue gRPC server that is backed by a filer", - Long: `start a queue gRPC server that is backed by a filer. - -`, -} - -func runQueue(cmd *Command, args []string) bool { - - util.LoadConfiguration("security", false) - - return queueStandaloneOptions.startQueueServer() - -} - -func (queueopt *QueueOptions) startQueueServer() bool { - - filerGrpcAddress, err := parseFilerGrpcAddress(*queueopt.filer) - if err != nil { - glog.Fatal(err) - return false - } - - filerQueuesPath := "/queues" - - grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") - - for { - err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) - if err != nil { - return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) - } - filerQueuesPath = resp.DirQueues - glog.V(0).Infof("Queue read filer queues dir: %s", filerQueuesPath) - return nil - }) - if err != nil { - glog.V(0).Infof("wait to connect to filer %s grpc address %s", *queueopt.filer, filerGrpcAddress) - time.Sleep(time.Second) - } else { - glog.V(0).Infof("connected to filer %s grpc address %s", *queueopt.filer, filerGrpcAddress) - break - } - } - - qs, err := weed_server.NewQueueServer(&weed_server.QueueServerOption{ - Filers: []string{*queueopt.filer}, - DefaultReplication: "", - MaxMB: 0, - Port: *queueopt.port, - }) - - // start grpc listener - grpcL, err := util.NewListener(":"+strconv.Itoa(*queueopt.port), 0) - if err != nil { - glog.Fatalf("failed to listen on grpc port %d: %v", *queueopt.port, err) - } - grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.queue")) - queue_pb.RegisterSeaweedQueueServer(grpcS, qs) - reflection.Register(grpcS) - go grpcS.Serve(grpcL) - - return true - -} diff --git a/weed/command/s3.go b/weed/command/s3.go index 39d0c04fc..cd4018fbc 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -6,6 +6,7 @@ import ( "net/http" "time" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" @@ -117,7 +118,7 @@ func runS3(cmd *Command, args []string) bool { func (s3opt *S3Options) startS3Server() bool { - filerGrpcAddress, err := parseFilerGrpcAddress(*s3opt.filer) + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer) if err != nil { glog.Fatal(err) return false @@ -128,7 +129,7 @@ func (s3opt *S3Options) startS3Server() bool { grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") for { - err = withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index fc7f8636d..5b246b7c0 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -326,7 +326,7 @@ key = "" cert = "" key = "" -[grpc.queue] +[grpc.msg_broker] cert = "" key = "" diff --git a/weed/command/volume.go b/weed/command/volume.go index 9d665d143..4773d8a55 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -13,6 +13,7 @@ import ( "github.com/spf13/viper" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util/httpdown" @@ -234,7 +235,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume")) volume_server_pb.RegisterVolumeServerServer(grpcS, vs) reflection.Register(grpcS) go func() { diff --git a/weed/command/webdav.go b/weed/command/webdav.go index 4d5752247..ba88a17be 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -8,6 +8,7 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" @@ -54,7 +55,7 @@ func runWebDav(cmd *Command, args []string) bool { func (wo *WebDavOption) startWebDav() bool { - filerGrpcAddress, err := parseFilerGrpcAddress(*wo.filer) + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer) if err != nil { glog.Fatal(err) return false diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index aa530f6aa..83826fed5 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -14,8 +14,8 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -93,7 +93,7 @@ func (wfs *WFS) Root() (fs.Node, error) { func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - err := util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + err := pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index 7eed66503..dccf85da4 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -8,9 +8,9 @@ import ( "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/util" ) func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { @@ -20,7 +20,7 @@ func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, return err } - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) return fn(client) }, grpcAddress, grpcDialOption) @@ -39,12 +39,12 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer) + masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(masterServer) if parseErr != nil { return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr) } - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) }, masterGrpcAddress, grpcDialOption) diff --git a/weed/util/grpc_client_server.go b/weed/pb/grpc_client_server.go similarity index 65% rename from weed/util/grpc_client_server.go rename to weed/pb/grpc_client_server.go index d6a9ee3c3..4b5f9eff3 100644 --- a/weed/util/grpc_client_server.go +++ b/weed/pb/grpc_client_server.go @@ -1,4 +1,4 @@ -package util +package pb import ( "context" @@ -11,6 +11,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/keepalive" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" ) var ( @@ -127,3 +130,53 @@ func ServerToGrpcAddress(server string) (serverGrpcAddress string) { return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort) } + +func WithMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { + + masterGrpcAddress, parseErr := ParseServerToGrpcAddress(master) + if parseErr != nil { + return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) + } + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := master_pb.NewSeaweedClient(grpcConnection) + return fn(client) + }, masterGrpcAddress, grpcDialOption) + +} + +func WithFilerClient(filer string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress, parseErr := ParseServerToGrpcAddress(filer) + if parseErr != nil { + return fmt.Errorf("failed to parse filer grpc %v: %v", filer, parseErr) + } + + return WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, fn) + +} + +func WithGrpcFilerClient(filerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, filerGrpcAddress, grpcDialOption) + +} + +func ParseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { + hostnameAndPort := strings.Split(filer, ":") + if len(hostnameAndPort) != 2 { + return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort) + } + + filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) + if parseErr != nil { + return "", fmt.Errorf("filer port parse error: %v", parseErr) + } + + filerGrpcPort := int(filerPort) + 10000 + + return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil +} diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 954e951c9..232b68fec 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -10,9 +10,9 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) { @@ -111,7 +111,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.grpcAddress, fs.grpcDialOption) diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index 11eb3afa1..90bcffdf0 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -9,6 +9,7 @@ import ( "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/glog" @@ -92,7 +93,7 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade func (fs *FilerSource) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.grpcAddress, fs.grpcDialOption) diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 81a260a63..d7212d5e3 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -12,8 +12,8 @@ import ( "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" ) type mimeType string @@ -40,7 +40,7 @@ func encodeResponse(response interface{}) []byte { func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) diff --git a/weed/server/msg_broker_grpc_server.go b/weed/server/msg_broker_grpc_server.go new file mode 100644 index 000000000..8b13aac76 --- /dev/null +++ b/weed/server/msg_broker_grpc_server.go @@ -0,0 +1,23 @@ +package weed_server + +import ( + "context" + + "github.com/chrislusf/seaweedfs/weed/pb/queue_pb" +) + +func (broker *MessageBroker) ConfigureTopic(context.Context, *queue_pb.ConfigureTopicRequest) (*queue_pb.ConfigureTopicResponse, error) { + panic("implement me") +} + +func (broker *MessageBroker) DeleteTopic(context.Context, *queue_pb.DeleteTopicRequest) (*queue_pb.DeleteTopicResponse, error) { + panic("implement me") +} + +func (broker *MessageBroker) StreamWrite(queue_pb.SeaweedQueue_StreamWriteServer) error { + panic("implement me") +} + +func (broker *MessageBroker) StreamRead(*queue_pb.ReadMessageRequest, queue_pb.SeaweedQueue_StreamReadServer) error { + panic("implement me") +} diff --git a/weed/server/msg_broker_server.go b/weed/server/msg_broker_server.go new file mode 100644 index 000000000..a9d908581 --- /dev/null +++ b/weed/server/msg_broker_server.go @@ -0,0 +1,121 @@ +package weed_server + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type MessageBrokerOption struct { + Filers []string + DefaultReplication string + MaxMB int + Port int +} + +type MessageBroker struct { + option *MessageBrokerOption + grpcDialOption grpc.DialOption +} + +func NewMessageBroker(option *MessageBrokerOption) (messageBroker *MessageBroker, err error) { + + messageBroker = &MessageBroker{ + option: option, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.msg_broker"), + } + + go messageBroker.loopForEver() + + return messageBroker, nil +} + +func (broker *MessageBroker) loopForEver() { + + for { + broker.checkPeers() + time.Sleep(3 * time.Second) + } + +} + +func (broker *MessageBroker) checkPeers() { + + // contact a filer about masters + var masters []string + for _, filer := range broker.option.Filers { + err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return err + } + masters = append(masters, resp.Masters...) + return nil + }) + if err != nil { + fmt.Printf("failed to read masters from %+v: %v\n", broker.option.Filers, err) + return + } + } + + // contact each masters for filers + var filers []string + for _, master := range masters { + err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error { + resp, err := client.ListMasterClients(context.Background(), &master_pb.ListMasterClientsRequest{ + ClientType: "filer", + }) + if err != nil { + return err + } + + fmt.Printf("filers: %+v\n", resp.GrpcAddresses) + filers = append(filers, resp.GrpcAddresses...) + + return nil + }) + if err != nil { + fmt.Printf("failed to list filers: %v\n", err) + return + } + } + + // contact each filer about brokers + for _, filer := range filers { + err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return err + } + masters = append(masters, resp.Masters...) + return nil + }) + if err != nil { + fmt.Printf("failed to read masters from %+v: %v\n", broker.option.Filers, err) + return + } + } + +} + +func (broker *MessageBroker) withFilerClient(filer string, fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithFilerClient(filer, broker.grpcDialOption, fn) + +} + +func (broker *MessageBroker) withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error { + + return pb.WithMasterClient(master, broker.grpcDialOption, func(client master_pb.SeaweedClient) error { + return fn(client) + }) + +} diff --git a/weed/server/queue_server.go b/weed/server/queue_server.go deleted file mode 100644 index 078c76a30..000000000 --- a/weed/server/queue_server.go +++ /dev/null @@ -1,49 +0,0 @@ -package weed_server - -import ( - "context" - - "google.golang.org/grpc" - - "github.com/chrislusf/seaweedfs/weed/pb/queue_pb" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/util" -) - -type QueueServerOption struct { - Filers []string - DefaultReplication string - MaxMB int - Port int -} - -type QueueServer struct { - option *QueueServerOption - grpcDialOption grpc.DialOption -} - -func (q *QueueServer) ConfigureTopic(context.Context, *queue_pb.ConfigureTopicRequest) (*queue_pb.ConfigureTopicResponse, error) { - panic("implement me") -} - -func (q *QueueServer) DeleteTopic(context.Context, *queue_pb.DeleteTopicRequest) (*queue_pb.DeleteTopicResponse, error) { - panic("implement me") -} - -func (q *QueueServer) StreamWrite(queue_pb.SeaweedQueue_StreamWriteServer) error { - panic("implement me") -} - -func (q *QueueServer) StreamRead(*queue_pb.ReadMessageRequest, queue_pb.SeaweedQueue_StreamReadServer) error { - panic("implement me") -} - -func NewQueueServer(option *QueueServerOption) (qs *QueueServer, err error) { - - qs = &QueueServer{ - option: option, - grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.queue"), - } - - return qs, nil -} diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 53289f1c1..0381c7feb 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -2,8 +2,6 @@ package weed_server import ( "encoding/json" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" "io/ioutil" "os" "path" @@ -11,7 +9,12 @@ import ( "sort" "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/topology" ) @@ -61,7 +64,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d s.raftServer.Start() for _, peer := range s.peers { - s.raftServer.AddPeer(peer, util.ServerToGrpcAddress(peer)) + s.raftServer.AddPeer(peer, pb.ServerToGrpcAddress(peer)) } s.GrpcServer = raft.NewGrpcServer(s.raftServer) @@ -72,7 +75,7 @@ func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, d _, err := s.raftServer.Do(&raft.DefaultJoinCommand{ Name: s.raftServer.Name(), - ConnectionString: util.ServerToGrpcAddress(s.serverAddr), + ConnectionString: pb.ServerToGrpcAddress(s.serverAddr), }) if err != nil { diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 2168afee7..1f4d9df10 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -7,6 +7,7 @@ import ( "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" @@ -36,7 +37,7 @@ func (vs *VolumeServer) heartbeat() { if newLeader != "" { master = newLeader } - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) + masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(master) if parseErr != nil { glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr) continue @@ -55,7 +56,7 @@ func (vs *VolumeServer) heartbeat() { func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { - grpcConection, err := util.GrpcDial(context.Background(), masterGrpcAddress, grpcDialOption) + grpcConection, err := pb.GrpcDial(context.Background(), masterGrpcAddress, grpcDialOption) if err != nil { return "", fmt.Errorf("fail to dial %s : %v", masterNode, err) } diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index ddd611724..a07f6be01 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -14,6 +14,7 @@ import ( "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -98,7 +99,7 @@ func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { func (fs *WebDavFileSystem) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption) diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 6c31ebdff..ca2f22b57 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -4,11 +4,9 @@ import ( "fmt" "io" - "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -82,10 +80,7 @@ func duTraverseDirectory(writer io.Writer, filerClient filer2.FilerClient, dir, func (env *CommandEnv) withFilerClient(filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000) - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) - }, filerGrpcAddress, env.option.GrpcDialOption) + return pb.WithGrpcFilerClient(filerGrpcAddress, env.option.GrpcDialOption, fn) } diff --git a/weed/wdclient/masterclient.go b/weed/wdclient/masterclient.go index 0cf161a63..301f20615 100644 --- a/weed/wdclient/masterclient.go +++ b/weed/wdclient/masterclient.go @@ -2,15 +2,14 @@ package wdclient import ( "context" - "fmt" "math/rand" "time" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/util" ) type MasterClient struct { @@ -67,7 +66,7 @@ func (mc *MasterClient) tryAllMasters() { func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader string) { glog.V(1).Infof("%s Connecting to master %v", mc.name, master) - gprcErr := withMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { + gprcErr := pb.WithMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { stream, err := client.KeepConnected(context.Background()) if err != nil { @@ -119,22 +118,8 @@ func (mc *MasterClient) tryConnectToMaster(master string) (nextHintedLeader stri return } -func withMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { - - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master) - if parseErr != nil { - return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) - } - - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { - client := master_pb.NewSeaweedClient(grpcConnection) - return fn(client) - }, masterGrpcAddress, grpcDialOption) - -} - func (mc *MasterClient) WithClient(fn func(client master_pb.SeaweedClient) error) error { - return withMasterClient(mc.currentMaster, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { + return pb.WithMasterClient(mc.currentMaster, mc.grpcDialOption, func(client master_pb.SeaweedClient) error { return fn(client) }) } From e0316052489c6b379e1b525da130c94581f32381 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 4 Mar 2020 00:44:27 -0800 Subject: [PATCH 0190/2432] purge code --- weed/command/msg_broker.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go index 0d69a9a66..21551df9e 100644 --- a/weed/command/msg_broker.go +++ b/weed/command/msg_broker.go @@ -25,8 +25,6 @@ var ( type QueueOptions struct { filer *string port *int - tlsPrivateKey *string - tlsCertificate *string defaultTtl *string } @@ -34,8 +32,6 @@ func init() { cmdMsgBroker.Run = runMsgBroker // break init cycle messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address") messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "queue server gRPC listen port") - messageBrokerStandaloneOptions.tlsPrivateKey = cmdMsgBroker.Flag.String("key.file", "", "path to the TLS private key file") - messageBrokerStandaloneOptions.tlsCertificate = cmdMsgBroker.Flag.String("cert.file", "", "path to the TLS certificate file") messageBrokerStandaloneOptions.defaultTtl = cmdMsgBroker.Flag.String("ttl", "1h", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") } From 465d18930bafc7123713909d59d5fe0f39aabee4 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 5 Mar 2020 10:35:21 -0800 Subject: [PATCH 0191/2432] filer redis: directory listing follow ttl fix https://github.com/chrislusf/seaweedfs/issues/1217 --- weed/filer2/redis/universal_redis_store.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index 62257e91e..2162be733 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -3,12 +3,14 @@ package redis import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/go-redis/redis" "sort" "strings" "time" + + "github.com/go-redis/redis" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" ) const ( @@ -120,7 +122,8 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { - members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() + dirListKey := genDirectoryListKey(string(fullpath)) + members, err := store.Client.SMembers(dirListKey).Result() if err != nil { return nil, fmt.Errorf("list %s : %v", fullpath, err) } @@ -159,6 +162,13 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full if err != nil { glog.V(0).Infof("list %s : %v", path, err) } else { + if entry.TtlSec > 0 { + if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + store.Client.Del(string(path)).Result() + store.Client.SRem(dirListKey, fileName).Result() + continue + } + } entries = append(entries, entry) } } From 31c481e3fce94a1a3872434a9907a574cb2679e1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 5 Mar 2020 16:00:20 -0800 Subject: [PATCH 0192/2432] fix typo --- weed/command/mount.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/command/mount.go b/weed/command/mount.go index e73cbee10..792845083 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -54,7 +54,7 @@ var cmdMount = &Command{ On OS X, it requires OSXFUSE (http://osxfuse.github.com/). - If the SeaweedFS systemm runs in a container cluster, e.g. managed by kubernetes or docker compose, + If the SeaweedFS system runs in a container cluster, e.g. managed by kubernetes or docker compose, the volume servers are not accessible by their own ip addresses. In "outsideContainerClusterMode", the mount will use the filer ip address instead, assuming: * All volume server containers are accessible through the same hostname or IP address as the filer. From 13e215ee5cb5f4c2873f89c263d8c970e9978b19 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 6 Mar 2020 00:49:47 -0800 Subject: [PATCH 0193/2432] filer: option to encrypt data on volume server --- other/java/client/src/main/proto/filer.proto | 2 + .../repeated_vacuum/repeated_vacuum.go | 2 +- weed/command/filer.go | 3 + weed/command/filer_copy.go | 36 +-- weed/command/mount_std.go | 5 +- weed/command/scaffold.go | 4 +- weed/command/server.go | 1 + weed/command/webdav.go | 40 ++- weed/filer2/filechunks.go | 30 +-- weed/filer2/filer.go | 1 + weed/filer2/filer_client_util.go | 7 +- weed/filer2/stream.go | 2 +- weed/filesys/dirty_page.go | 13 +- weed/filesys/wfs.go | 5 +- weed/operation/submit.go | 7 +- weed/operation/upload_content.go | 47 +++- weed/pb/filer.proto | 2 + weed/pb/filer_pb/filer.pb.go | 235 ++++++++++-------- weed/pb/master_pb/master.pb.go | 20 +- weed/pb/volume_server_pb/volume_server.pb.go | 26 +- weed/replication/sink/azuresink/azure_sink.go | 2 +- weed/replication/sink/b2sink/b2_sink.go | 2 +- .../replication/sink/filersink/fetch_write.go | 5 +- weed/replication/sink/gcssink/gcs_sink.go | 5 +- weed/replication/sink/s3sink/s3_write.go | 2 +- weed/server/common.go | 2 +- weed/server/filer_grpc_server.go | 1 + weed/server/filer_server.go | 2 + weed/server/filer_server_handlers_read.go | 25 +- .../filer_server_handlers_write_autochunk.go | 2 +- weed/server/webdav_server.go | 14 +- weed/topology/store_replicate.go | 5 +- weed/util/cipher.go | 60 +++++ weed/util/http_util.go | 51 +++- 34 files changed, 419 insertions(+), 247 deletions(-) create mode 100644 weed/util/cipher.go diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 9ee552561..5983c84d8 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -99,6 +99,7 @@ message FileChunk { string source_file_id = 6; // to be deprecated FileId fid = 7; FileId source_fid = 8; + bytes cipher_key = 9; } message FileId { @@ -229,4 +230,5 @@ message GetFilerConfigurationResponse { uint32 max_mb = 4; string dir_buckets = 5; string dir_queues = 6; + bool cipher = 7; } diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 718b6faa1..4a0464eda 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -35,7 +35,7 @@ func main() { targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) - _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, assignResult.Auth) + _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), false, reader, false, "", nil, assignResult.Auth) if err != nil { log.Fatalf("upload: %v", err) } diff --git a/weed/command/filer.go b/weed/command/filer.go index b5b595215..327ee8316 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -34,6 +34,7 @@ type FilerOptions struct { dataCenter *string enableNotification *bool disableHttp *bool + cipher *bool // default leveldb directory, used in "weed server" mode defaultLevelDbDirectory *string @@ -53,6 +54,7 @@ func init() { f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") + f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") } var cmdFiler = &Command{ @@ -111,6 +113,7 @@ func (fo *FilerOptions) startFiler() { DefaultLevelDbDir: defaultLevelDbDirectory, DisableHttp: *fo.disableHttp, Port: uint32(*fo.port), + Cipher: *fo.cipher, }) if nfs_err != nil { glog.Fatalf("Filer startup error: %v", nfs_err) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 3e7ae1db2..8c01cfd74 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -41,6 +41,7 @@ type CopyOptions struct { compressionLevel *int grpcDialOption grpc.DialOption masters []string + cipher bool } func init() { @@ -108,7 +109,7 @@ func runCopy(cmd *Command, args []string) bool { filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") - masters, collection, replication, maxMB, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress) + masters, collection, replication, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress) if err != nil { fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err) return false @@ -123,6 +124,7 @@ func runCopy(cmd *Command, args []string) bool { *copy.maxMB = int(maxMB) } copy.masters = masters + copy.cipher = cipher if *cmdCopy.IsDebug { util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") @@ -159,13 +161,14 @@ func runCopy(cmd *Command, args []string) bool { return true } -func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) { +func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, cipher bool, err error) { err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) } masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb + cipher = resp.Cipher return nil }) return @@ -300,7 +303,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId - uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, security.EncodedJwt(assignResult.Auth), *worker.options.compressionLevel) + uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, worker.options.cipher, f, false, mimeType, nil, security.EncodedJwt(assignResult.Auth), *worker.options.compressionLevel) if err != nil { return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } @@ -310,11 +313,12 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err fmt.Printf("uploaded %s to %s\n", fileName, targetUrl) chunks = append(chunks, &filer_pb.FileChunk{ - FileId: assignResult.FileId, - Offset: 0, - Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + FileId: assignResult.FileId, + Offset: 0, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, }) fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) @@ -409,10 +413,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, replication = assignResult.Replication } - uploadResult, err := operation.Upload(targetUrl, - fileName+"-"+strconv.FormatInt(i+1, 10), - io.NewSectionReader(f, i*chunkSize, chunkSize), - false, "", nil, security.EncodedJwt(assignResult.Auth)) + uploadResult, err := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), false, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth)) if err != nil { uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) return @@ -422,11 +423,12 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, return } chunksChan <- &filer_pb.FileChunk{ - FileId: assignResult.FileId, - Offset: i * chunkSize, - Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + FileId: assignResult.FileId, + Offset: i * chunkSize, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, } fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) }(i) diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index b195bf143..9177091a5 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -145,11 +145,13 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente // try to connect to filer, filerBucketsPath may be useful later grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + var cipher bool err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - _, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) } + cipher = resp.Cipher return nil }) if err != nil { @@ -183,6 +185,7 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente MountMtime: time.Now(), Umask: umask, OutsideContainerClusterMode: outsideContainerClusterMode, + Cipher: cipher, })) if err != nil { fuse.Unmount(dir) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index 5b246b7c0..f4a08fb51 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -75,9 +75,9 @@ const ( # recursive_delete will delete all sub folders and files, similar to "rm -Rf" recursive_delete = false # directories under this folder will be automatically creating a separate bucket -buckets_folder = /buckets +buckets_folder = "/buckets" # directories under this folder will be store message queue data -queues_folder = /queues +queues_folder = "/queues" #################################################### # The following are filer store options diff --git a/weed/command/server.go b/weed/command/server.go index c9d27555c..f45429193 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -82,6 +82,7 @@ func init() { filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") + filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") diff --git a/weed/command/webdav.go b/weed/command/webdav.go index ba88a17be..4f5d5f5ce 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -1,6 +1,7 @@ package command import ( + "context" "fmt" "net/http" "os/user" @@ -9,6 +10,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" @@ -55,12 +57,6 @@ func runWebDav(cmd *Command, args []string) bool { func (wo *WebDavOption) startWebDav() bool { - filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer) - if err != nil { - glog.Fatal(err) - return false - } - // detect current user uid, gid := uint32(0), uint32(0) if u, err := user.Current(); err == nil { @@ -72,13 +68,43 @@ func (wo *WebDavOption) startWebDav() bool { } } + // parse filer grpc address + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + var cipher bool + // connect to filer + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress) + break + } + } + ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ Filer: *wo.filer, FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"), + GrpcDialOption: grpcDialOption, Collection: *wo.collection, Uid: uid, Gid: gid, + Cipher: cipher, }) if webdavServer_err != nil { glog.Fatalf("WebDav Server startup error: %v", webdavServer_err) diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go index b5876df82..98a965337 100644 --- a/weed/filer2/filechunks.go +++ b/weed/filer2/filechunks.go @@ -71,6 +71,7 @@ type ChunkView struct { Size uint64 LogicOffset int64 IsFullChunk bool + CipherKey []byte } func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) { @@ -94,6 +95,7 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int Size: uint64(min(chunk.stop, stop) - offset), LogicOffset: offset, IsFullChunk: isFullChunk, + CipherKey: chunk.cipherKey, }) offset = min(chunk.stop, stop) } @@ -120,13 +122,7 @@ var bufPool = sync.Pool{ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval { - newV := newVisibleInterval( - chunk.Offset, - chunk.Offset+int64(chunk.Size), - chunk.GetFileIdString(), - chunk.Mtime, - true, - ) + newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, true, chunk.CipherKey) length := len(visibles) if length == 0 { @@ -140,23 +136,11 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb. logPrintf(" before", visibles) for _, v := range visibles { if v.start < chunk.Offset && chunk.Offset < v.stop { - newVisibles = append(newVisibles, newVisibleInterval( - v.start, - chunk.Offset, - v.fileId, - v.modifiedTime, - false, - )) + newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, false, v.cipherKey)) } chunkStop := chunk.Offset + int64(chunk.Size) if v.start < chunkStop && chunkStop < v.stop { - newVisibles = append(newVisibles, newVisibleInterval( - chunkStop, - v.stop, - v.fileId, - v.modifiedTime, - false, - )) + newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, false, v.cipherKey)) } if chunkStop <= v.start || v.stop <= chunk.Offset { newVisibles = append(newVisibles, v) @@ -208,15 +192,17 @@ type VisibleInterval struct { modifiedTime int64 fileId string isFullChunk bool + cipherKey []byte } -func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool) VisibleInterval { +func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool, cipherKey []byte) VisibleInterval { return VisibleInterval{ start: start, stop: stop, fileId: fileId, modifiedTime: modifiedTime, isFullChunk: isFullChunk, + cipherKey: cipherKey, } } diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index bf43c3c52..cbb14a5c1 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -33,6 +33,7 @@ type Filer struct { DirBucketsPath string DirQueuesPath string buckets *FilerBuckets + Cipher bool } func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerGrpcPort uint32) *Filer { diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index ab9db2992..e80c4bf36 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -70,12 +70,7 @@ func ReadIntoBuffer(filerClient FilerClient, fullFilePath FullPath, buff []byte, volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url) var n int64 - n, err = util.ReadUrl( - fmt.Sprintf("http://%s/%s", volumeServerAddress, chunkView.FileId), - chunkView.Offset, - int(chunkView.Size), - buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)], - !chunkView.IsFullChunk) + n, err = util.ReadUrl(fmt.Sprintf("http://%s/%s", volumeServerAddress, chunkView.FileId), chunkView.CipherKey, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)]) if err != nil { diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go index 01b87cad1..0a1f943ea 100644 --- a/weed/filer2/stream.go +++ b/weed/filer2/stream.go @@ -27,7 +27,7 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f for _, chunkView := range chunkViews { urlString := fileId2Url[chunkView.FileId] - _, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) { + err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) { w.Write(data) }) if err != nil { diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 67e1d57ef..ff3b8f885 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -174,7 +174,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, } fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - uploadResult, err := operation.Upload(fileUrl, pages.f.Name, reader, false, "", nil, auth) + uploadResult, err := operation.Upload(fileUrl, pages.f.Name, pages.f.wfs.option.Cipher, reader, false, "", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) return nil, fmt.Errorf("upload data: %v", err) @@ -185,11 +185,12 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, } return &filer_pb.FileChunk{ - FileId: fileId, - Offset: offset, - Size: uint64(size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + FileId: fileId, + Offset: offset, + Size: uint64(size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, }, nil } diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 83826fed5..77438b58e 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -39,8 +39,9 @@ type Option struct { MountCtime time.Time MountMtime time.Time - // whether the mount runs outside SeaweedFS containers - OutsideContainerClusterMode bool + OutsideContainerClusterMode bool // whether the mount runs outside SeaweedFS containers + Cipher bool // whether encrypt data on volume server + } var _ = fs.FS(&WFS{}) diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 62f067430..8c7ed5d7b 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -189,7 +189,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp cm.DeleteChunks(master, grpcDialOption) } } else { - ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt) + ret, e := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt) if e != nil { return 0, e } @@ -202,8 +202,7 @@ func upload_one_chunk(filename string, reader io.Reader, master, fileUrl string, jwt security.EncodedJwt, ) (size uint32, e error) { glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") - uploadResult, uploadError := Upload(fileUrl, filename, reader, false, - "", nil, jwt) + uploadResult, uploadError := Upload(fileUrl, filename, false, reader, false, "", nil, jwt) if uploadError != nil { return 0, uploadError } @@ -221,6 +220,6 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s q := u.Query() q.Set("cm", "true") u.RawQuery = q.Encode() - _, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", nil, jwt) + _, e = Upload(u.String(), manifest.Name, false, bufReader, false, "application/json", nil, jwt) return e } diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index c387d0230..ba15aea78 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -22,10 +22,11 @@ import ( ) type UploadResult struct { - Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` - Error string `json:"error,omitempty"` - ETag string `json:"eTag,omitempty"` + Name string `json:"name,omitempty"` + Size uint32 `json:"size,omitempty"` + Error string `json:"error,omitempty"` + ETag string `json:"eTag,omitempty"` + CipherKey []byte `json:"cipherKey,omitempty"` } var ( @@ -41,22 +42,22 @@ func init() { var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") // Upload sends a POST request to a volume server to upload the content with adjustable compression level -func UploadWithLocalCompressionLevel(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) { +func UploadWithLocalCompressionLevel(uploadUrl string, filename string, cipher bool, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) { if compressionLevel < 1 { compressionLevel = 1 } if compressionLevel > 9 { compressionLevel = 9 } - return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, compressionLevel, jwt) + return doUpload(uploadUrl, filename, cipher, reader, isGzipped, mtype, pairMap, compressionLevel, jwt) } // Upload sends a POST request to a volume server to upload the content with fast compression -func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, flate.BestSpeed, jwt) +func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { + return doUpload(uploadUrl, filename, cipher, reader, isGzipped, mtype, pairMap, flate.BestSpeed, jwt) } -func doUpload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) { +func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) { contentIsGzipped := isGzipped shouldGzipNow := false if !isGzipped { @@ -65,7 +66,25 @@ func doUpload(uploadUrl string, filename string, reader io.Reader, isGzipped boo contentIsGzipped = true } } - return upload_content(uploadUrl, func(w io.Writer) (err error) { + // encrypt data + var cipherKey util.CipherKey + var clearDataLen int + if cipher { + clearData, err := ioutil.ReadAll(reader) + if err != nil { + return nil, fmt.Errorf("read raw input: %v", err) + } + clearDataLen = len(clearData) + cipherKey = util.GenCipherKey() + encryptedData, err := util.Encrypt(clearData, cipherKey) + if err != nil { + return nil, fmt.Errorf("encrypt input: %v", err) + } + reader = bytes.NewReader(encryptedData) + } + + // upload data + uploadResult, err := upload_content(uploadUrl, func(w io.Writer) (err error) { if shouldGzipNow { gzWriter, _ := gzip.NewWriterLevel(w, compression) _, err = io.Copy(gzWriter, reader) @@ -75,6 +94,14 @@ func doUpload(uploadUrl string, filename string, reader io.Reader, isGzipped boo } return }, filename, contentIsGzipped, mtype, pairMap, jwt) + + // remember cipher key + if uploadResult != nil && cipherKey != nil { + uploadResult.CipherKey = cipherKey + uploadResult.Size = uint32(clearDataLen) + } + + return uploadResult, err } func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 9ee552561..5983c84d8 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -99,6 +99,7 @@ message FileChunk { string source_file_id = 6; // to be deprecated FileId fid = 7; FileId source_fid = 8; + bytes cipher_key = 9; } message FileId { @@ -229,4 +230,5 @@ message GetFilerConfigurationResponse { uint32 max_mb = 4; string dir_buckets = 5; string dir_queues = 6; + bool cipher = 7; } diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 269abb8c7..92a12321c 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -287,6 +287,7 @@ type FileChunk struct { SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"` Fid *FileId `protobuf:"bytes,7,opt,name=fid" json:"fid,omitempty"` SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid" json:"source_fid,omitempty"` + CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"` } func (m *FileChunk) Reset() { *m = FileChunk{} } @@ -350,6 +351,13 @@ func (m *FileChunk) GetSourceFid() *FileId { return nil } +func (m *FileChunk) GetCipherKey() []byte { + if m != nil { + return m.CipherKey + } + return nil +} + type FileId struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` @@ -1014,6 +1022,7 @@ type GetFilerConfigurationResponse struct { MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"` DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets" json:"dir_buckets,omitempty"` DirQueues string `protobuf:"bytes,6,opt,name=dir_queues,json=dirQueues" json:"dir_queues,omitempty"` + Cipher bool `protobuf:"varint,7,opt,name=cipher" json:"cipher,omitempty"` } func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} } @@ -1063,6 +1072,13 @@ func (m *GetFilerConfigurationResponse) GetDirQueues() string { return "" } +func (m *GetFilerConfigurationResponse) GetCipher() bool { + if m != nil { + return m.Cipher + } + return false +} + func init() { proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest") proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse") @@ -1594,113 +1610,114 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1713 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcb, 0x6e, 0xdb, 0xca, - 0x19, 0x36, 0x75, 0xe7, 0x2f, 0x29, 0xb1, 0x47, 0x76, 0xa2, 0xc8, 0x97, 0x3a, 0x74, 0x93, 0xba, - 0x48, 0xe0, 0x1a, 0x6e, 0x16, 0x49, 0xd3, 0x2e, 0x12, 0x5f, 0x0a, 0xa3, 0xce, 0xa5, 0x74, 0x52, - 0xa4, 0x28, 0x50, 0x82, 0x26, 0x47, 0xf2, 0xd4, 0x24, 0x47, 0x19, 0x0e, 0x6d, 0xa7, 0x8f, 0x52, - 0xa0, 0x8b, 0xbe, 0x47, 0xd1, 0x4d, 0x51, 0xa0, 0xeb, 0xf3, 0x08, 0xe7, 0x01, 0xce, 0xf2, 0xac, - 0x0f, 0x66, 0x86, 0xa4, 0x86, 0xa2, 0x6c, 0x27, 0xe7, 0x20, 0x3b, 0xce, 0x7f, 0x9b, 0x7f, 0xbe, - 0xff, 0x2a, 0x41, 0x7b, 0x48, 0x02, 0xcc, 0xb6, 0xc6, 0x8c, 0x72, 0x8a, 0x5a, 0xf2, 0xe0, 0x8c, - 0x4f, 0xac, 0x37, 0xb0, 0x7c, 0x44, 0xe9, 0x59, 0x32, 0xde, 0x23, 0x0c, 0x7b, 0x9c, 0xb2, 0x4f, - 0xfb, 0x11, 0x67, 0x9f, 0x6c, 0xfc, 0x31, 0xc1, 0x31, 0x47, 0x2b, 0x60, 0xfa, 0x19, 0xa3, 0x6f, - 0xac, 0x1b, 0x9b, 0xa6, 0x3d, 0x21, 0x20, 0x04, 0xb5, 0xc8, 0x0d, 0x71, 0xbf, 0x22, 0x19, 0xf2, - 0xdb, 0xda, 0x87, 0x95, 0xd9, 0x06, 0xe3, 0x31, 0x8d, 0x62, 0x8c, 0x1e, 0x40, 0x1d, 0x0b, 0x82, - 0xb4, 0xd6, 0xde, 0xb9, 0xbd, 0x95, 0xb9, 0xb2, 0xa5, 0xe4, 0x14, 0xd7, 0xfa, 0x8f, 0x01, 0xe8, - 0x88, 0xc4, 0x5c, 0x10, 0x09, 0x8e, 0x3f, 0xcf, 0x9f, 0x3b, 0xd0, 0x18, 0x33, 0x3c, 0x24, 0x97, - 0xa9, 0x47, 0xe9, 0x09, 0x3d, 0x86, 0x85, 0x98, 0xbb, 0x8c, 0x1f, 0x30, 0x1a, 0x1e, 0x90, 0x00, - 0xbf, 0x16, 0x4e, 0x57, 0xa5, 0x48, 0x99, 0x81, 0xb6, 0x00, 0x91, 0xc8, 0x0b, 0x92, 0x98, 0x9c, - 0xe3, 0xe3, 0x8c, 0xdb, 0xaf, 0xad, 0x1b, 0x9b, 0x2d, 0x7b, 0x06, 0x07, 0x2d, 0x42, 0x3d, 0x20, - 0x21, 0xe1, 0xfd, 0xfa, 0xba, 0xb1, 0xd9, 0xb5, 0xd5, 0xc1, 0xfa, 0x2d, 0xf4, 0x0a, 0xfe, 0x7f, - 0xd9, 0xf3, 0xff, 0x59, 0x81, 0xba, 0x24, 0xe4, 0x18, 0x1b, 0x13, 0x8c, 0xd1, 0x7d, 0xe8, 0x90, - 0xd8, 0x99, 0x00, 0x51, 0x91, 0xbe, 0xb5, 0x49, 0x9c, 0x63, 0x8e, 0x1e, 0x41, 0xc3, 0x3b, 0x4d, - 0xa2, 0xb3, 0xb8, 0x5f, 0x5d, 0xaf, 0x6e, 0xb6, 0x77, 0x7a, 0x93, 0x8b, 0xc4, 0x43, 0x77, 0x05, - 0xcf, 0x4e, 0x45, 0xd0, 0x53, 0x00, 0x97, 0x73, 0x46, 0x4e, 0x12, 0x8e, 0x63, 0xf9, 0xd2, 0xf6, - 0x4e, 0x5f, 0x53, 0x48, 0x62, 0xfc, 0x22, 0xe7, 0xdb, 0x9a, 0x2c, 0x7a, 0x06, 0x2d, 0x7c, 0xc9, - 0x71, 0xe4, 0x63, 0xbf, 0x5f, 0x97, 0x17, 0xad, 0x4e, 0xbd, 0x68, 0x6b, 0x3f, 0xe5, 0xab, 0xf7, - 0xe5, 0xe2, 0x83, 0xe7, 0xd0, 0x2d, 0xb0, 0xd0, 0x3c, 0x54, 0xcf, 0x70, 0x16, 0x55, 0xf1, 0x29, - 0x90, 0x3d, 0x77, 0x83, 0x44, 0x25, 0x58, 0xc7, 0x56, 0x87, 0xdf, 0x54, 0x9e, 0x1a, 0xd6, 0x1e, - 0x98, 0x07, 0x49, 0x10, 0xe4, 0x8a, 0x3e, 0x61, 0x99, 0xa2, 0x4f, 0xd8, 0x04, 0xe5, 0xca, 0xb5, - 0x28, 0xff, 0xdb, 0x80, 0x85, 0xfd, 0x73, 0x1c, 0xf1, 0xd7, 0x94, 0x93, 0x21, 0xf1, 0x5c, 0x4e, - 0x68, 0x84, 0x1e, 0x83, 0x49, 0x03, 0xdf, 0xb9, 0x36, 0x4c, 0x2d, 0x1a, 0xa4, 0x5e, 0x3f, 0x06, - 0x33, 0xc2, 0x17, 0xce, 0xb5, 0xd7, 0xb5, 0x22, 0x7c, 0xa1, 0xa4, 0x37, 0xa0, 0xeb, 0xe3, 0x00, - 0x73, 0xec, 0xe4, 0xd1, 0x11, 0xa1, 0xeb, 0x28, 0xe2, 0xae, 0x0a, 0xc7, 0x43, 0xb8, 0x2d, 0x4c, - 0x8e, 0x5d, 0x86, 0x23, 0xee, 0x8c, 0x5d, 0x7e, 0x2a, 0x63, 0x62, 0xda, 0xdd, 0x08, 0x5f, 0xbc, - 0x95, 0xd4, 0xb7, 0x2e, 0x3f, 0xb5, 0xbe, 0x37, 0xc0, 0xcc, 0x83, 0x89, 0xee, 0x42, 0x53, 0x5c, - 0xeb, 0x10, 0x3f, 0x45, 0xa2, 0x21, 0x8e, 0x87, 0xbe, 0xa8, 0x0a, 0x3a, 0x1c, 0xc6, 0x98, 0x4b, - 0xf7, 0xaa, 0x76, 0x7a, 0x12, 0x99, 0x15, 0x93, 0xbf, 0xab, 0x42, 0xa8, 0xd9, 0xf2, 0x5b, 0x20, - 0x1e, 0x72, 0x12, 0x62, 0x79, 0x61, 0xd5, 0x56, 0x07, 0xd4, 0x83, 0x3a, 0x76, 0xb8, 0x3b, 0x92, - 0x19, 0x6e, 0xda, 0x35, 0xfc, 0xce, 0x1d, 0xa1, 0x9f, 0xc3, 0xad, 0x98, 0x26, 0xcc, 0xc3, 0x4e, - 0x76, 0x6d, 0x43, 0x72, 0x3b, 0x8a, 0x7a, 0xa0, 0x2e, 0xb7, 0xa0, 0x3a, 0x24, 0x7e, 0xbf, 0x29, - 0x81, 0x99, 0x2f, 0x26, 0xe1, 0xa1, 0x6f, 0x0b, 0x26, 0xfa, 0x15, 0x40, 0x6e, 0xc9, 0xef, 0xb7, - 0xae, 0x10, 0x35, 0x33, 0xbb, 0xbe, 0xf5, 0x01, 0x1a, 0xa9, 0xf9, 0x65, 0x30, 0xcf, 0x69, 0x90, - 0x84, 0xf9, 0xb3, 0xbb, 0x76, 0x4b, 0x11, 0x0e, 0x7d, 0x74, 0x0f, 0x64, 0x9f, 0x73, 0x44, 0x56, - 0x55, 0xe4, 0x23, 0x25, 0x42, 0x7f, 0xc0, 0xb2, 0x53, 0x78, 0x94, 0x9e, 0x11, 0xf5, 0xfa, 0xa6, - 0x9d, 0x9e, 0xac, 0xef, 0x2a, 0x70, 0xab, 0x98, 0xee, 0xe2, 0x0a, 0x69, 0x45, 0x62, 0x65, 0x48, - 0x33, 0xd2, 0xec, 0x71, 0x01, 0xaf, 0x8a, 0x8e, 0x57, 0xa6, 0x12, 0x52, 0x5f, 0x5d, 0xd0, 0x55, - 0x2a, 0xaf, 0xa8, 0x8f, 0x45, 0xb6, 0x26, 0xc4, 0x97, 0x00, 0x77, 0x6d, 0xf1, 0x29, 0x28, 0x23, - 0xe2, 0xa7, 0xed, 0x43, 0x7c, 0x4a, 0xf7, 0x98, 0xb4, 0xdb, 0x50, 0x21, 0x53, 0x27, 0x11, 0xb2, - 0x50, 0x50, 0x9b, 0x2a, 0x0e, 0xe2, 0x1b, 0xad, 0x43, 0x9b, 0xe1, 0x71, 0x90, 0x66, 0xaf, 0x84, - 0xcf, 0xb4, 0x75, 0x12, 0x5a, 0x03, 0xf0, 0x68, 0x10, 0x60, 0x4f, 0x0a, 0x98, 0x52, 0x40, 0xa3, - 0x88, 0xcc, 0xe1, 0x3c, 0x70, 0x62, 0xec, 0xf5, 0x61, 0xdd, 0xd8, 0xac, 0xdb, 0x0d, 0xce, 0x83, - 0x63, 0xec, 0x89, 0x77, 0x24, 0x31, 0x66, 0x8e, 0x6c, 0x40, 0x6d, 0xa9, 0xd7, 0x12, 0x04, 0xd9, - 0x26, 0x57, 0x01, 0x46, 0x8c, 0x26, 0x63, 0xc5, 0xed, 0xac, 0x57, 0x45, 0x2f, 0x96, 0x14, 0xc9, - 0x7e, 0x00, 0xb7, 0xe2, 0x4f, 0x61, 0x40, 0xa2, 0x33, 0x87, 0xbb, 0x6c, 0x84, 0x79, 0xbf, 0xab, - 0x72, 0x38, 0xa5, 0xbe, 0x93, 0x44, 0x6b, 0x0c, 0x68, 0x97, 0x61, 0x97, 0xe3, 0x2f, 0x18, 0x3b, - 0x9f, 0x57, 0xdd, 0x68, 0x09, 0x1a, 0xd4, 0xc1, 0x97, 0x5e, 0x90, 0x16, 0x59, 0x9d, 0xee, 0x5f, - 0x7a, 0x81, 0xf5, 0x08, 0x7a, 0x85, 0x1b, 0xd3, 0xc6, 0xbc, 0x08, 0x75, 0xcc, 0x18, 0xcd, 0xda, - 0x88, 0x3a, 0x58, 0x7f, 0x06, 0xf4, 0x7e, 0xec, 0x7f, 0x0d, 0xf7, 0xac, 0x25, 0xe8, 0x15, 0x4c, - 0x2b, 0x3f, 0xac, 0xff, 0x19, 0x80, 0xf6, 0x64, 0x37, 0xf8, 0x69, 0x83, 0x58, 0xd4, 0xa7, 0x18, - 0x12, 0xaa, 0xdb, 0xf8, 0x2e, 0x77, 0xd3, 0x11, 0xd6, 0x21, 0xb1, 0xb2, 0xbf, 0xe7, 0x72, 0x37, - 0x1d, 0x25, 0x0c, 0x7b, 0x09, 0x13, 0x53, 0x4d, 0x26, 0xa1, 0x1c, 0x25, 0x76, 0x46, 0x42, 0x4f, - 0xe0, 0x0e, 0x19, 0x45, 0x94, 0xe1, 0x89, 0x98, 0xa3, 0xa0, 0x6a, 0x48, 0xe1, 0x45, 0xc5, 0xcd, - 0x15, 0xf6, 0x25, 0x72, 0x8f, 0xa0, 0x57, 0x78, 0xc6, 0xb5, 0x30, 0xff, 0xc3, 0x80, 0xfe, 0x0b, - 0x4e, 0x43, 0xe2, 0xd9, 0x58, 0x38, 0x5f, 0x78, 0xfa, 0x06, 0x74, 0x45, 0x3f, 0x9e, 0x7e, 0x7e, - 0x87, 0x06, 0xfe, 0x64, 0xde, 0xdd, 0x03, 0xd1, 0x92, 0x1d, 0x0d, 0x85, 0x26, 0x0d, 0x7c, 0x99, - 0x89, 0x1b, 0x20, 0xfa, 0xa6, 0xa6, 0xaf, 0x26, 0x7f, 0x27, 0xc2, 0x17, 0x05, 0x7d, 0x21, 0x24, - 0xf5, 0x55, 0xb3, 0x6d, 0x46, 0xf8, 0x42, 0xe8, 0x5b, 0xcb, 0x70, 0x6f, 0x86, 0x6f, 0x69, 0xb8, - 0xfe, 0x6f, 0x40, 0xef, 0x45, 0x1c, 0x93, 0x51, 0xf4, 0x27, 0xd9, 0x76, 0x32, 0xa7, 0x17, 0xa1, - 0xee, 0xd1, 0x24, 0xe2, 0xd2, 0xd9, 0xba, 0xad, 0x0e, 0x53, 0x95, 0x58, 0x29, 0x55, 0xe2, 0x54, - 0x2d, 0x57, 0xcb, 0xb5, 0xac, 0xd5, 0x6a, 0xad, 0x50, 0xab, 0x3f, 0x83, 0xb6, 0x08, 0xb2, 0xe3, - 0xe1, 0x88, 0x63, 0x96, 0x76, 0x6a, 0x10, 0xa4, 0x5d, 0x49, 0x11, 0x02, 0xfa, 0x44, 0x51, 0xcd, - 0x1a, 0xc6, 0x93, 0x71, 0xf2, 0xad, 0x01, 0x8b, 0xc5, 0xa7, 0xa4, 0x31, 0xbb, 0x72, 0xb2, 0x88, - 0x56, 0xc6, 0x82, 0xf4, 0x1d, 0xe2, 0x53, 0x34, 0x85, 0x71, 0x72, 0x12, 0x10, 0xcf, 0x11, 0x0c, - 0xe5, 0xbf, 0xa9, 0x28, 0xef, 0x59, 0x30, 0x41, 0xa5, 0xa6, 0xa3, 0x82, 0xa0, 0xe6, 0x26, 0xfc, - 0x34, 0x9b, 0x2e, 0xe2, 0x7b, 0x0a, 0xa9, 0xc6, 0x4d, 0x48, 0x35, 0xcb, 0x48, 0xe5, 0x99, 0xd6, - 0xd2, 0x33, 0xed, 0x09, 0xf4, 0xd4, 0x7a, 0x5a, 0x0c, 0xd7, 0x2a, 0x40, 0x3e, 0x47, 0xe2, 0xbe, - 0xa1, 0x9a, 0x59, 0x36, 0x48, 0x62, 0xeb, 0x77, 0x60, 0x1e, 0x51, 0x65, 0x37, 0x46, 0xdb, 0x60, - 0x06, 0xd9, 0x41, 0x8a, 0xb6, 0x77, 0xd0, 0xa4, 0xc6, 0x33, 0x39, 0x7b, 0x22, 0x64, 0x3d, 0x87, - 0x56, 0x46, 0xce, 0x30, 0x33, 0xae, 0xc2, 0xac, 0x32, 0x85, 0x99, 0xf5, 0x5f, 0x03, 0x16, 0x8b, - 0x2e, 0xa7, 0x61, 0x79, 0x0f, 0xdd, 0xfc, 0x0a, 0x27, 0x74, 0xc7, 0xa9, 0x2f, 0xdb, 0xba, 0x2f, - 0x65, 0xb5, 0xdc, 0xc1, 0xf8, 0x95, 0x3b, 0x56, 0xb9, 0xdc, 0x09, 0x34, 0xd2, 0xe0, 0x1d, 0x2c, - 0x94, 0x44, 0x66, 0xec, 0x66, 0xbf, 0xd4, 0x77, 0xb3, 0xc2, 0x7e, 0x99, 0x6b, 0xeb, 0x0b, 0xdb, - 0x33, 0xb8, 0xab, 0xda, 0xc1, 0x6e, 0x1e, 0xc3, 0x0c, 0xfb, 0x62, 0xa8, 0x8d, 0xe9, 0x50, 0x5b, - 0x03, 0xe8, 0x97, 0x55, 0xd3, 0xf2, 0x1b, 0xc1, 0xc2, 0x31, 0x77, 0x39, 0x89, 0x39, 0xf1, 0xf2, - 0x1f, 0x09, 0x53, 0xb9, 0x61, 0xdc, 0x34, 0x11, 0xcb, 0x75, 0x38, 0x0f, 0x55, 0xce, 0xb3, 0xfc, - 0x15, 0x9f, 0x22, 0x0a, 0x48, 0xbf, 0x29, 0x8d, 0xc1, 0x57, 0xb8, 0x4a, 0xe4, 0x03, 0xa7, 0xdc, - 0x0d, 0xd4, 0xc6, 0x51, 0x93, 0x1b, 0x87, 0x29, 0x29, 0x72, 0xe5, 0x50, 0x43, 0xd9, 0x57, 0xdc, - 0xba, 0xda, 0x47, 0x04, 0x41, 0x32, 0x57, 0x01, 0x64, 0xa9, 0xaa, 0x2a, 0x6b, 0x28, 0x5d, 0x41, - 0xd9, 0x15, 0x04, 0x6b, 0x0d, 0x56, 0x7e, 0x8f, 0xb9, 0xd8, 0x9d, 0xd8, 0x2e, 0x8d, 0x86, 0x64, - 0x94, 0x30, 0x57, 0x0b, 0x85, 0xf5, 0x8d, 0x01, 0xab, 0x57, 0x08, 0xa4, 0x0f, 0xee, 0x43, 0x33, - 0x74, 0x63, 0x8e, 0x59, 0x56, 0x25, 0xd9, 0x71, 0x1a, 0x8a, 0xca, 0x4d, 0x50, 0x54, 0x4b, 0x50, - 0x2c, 0x41, 0x23, 0x74, 0x2f, 0x9d, 0xf0, 0x24, 0x5d, 0x8e, 0xea, 0xa1, 0x7b, 0xf9, 0xea, 0x44, - 0x76, 0x36, 0xc2, 0x9c, 0x93, 0xc4, 0x3b, 0xc3, 0x3c, 0xce, 0x3b, 0x1b, 0x61, 0x2f, 0x15, 0x45, - 0x3c, 0x5a, 0x08, 0x7c, 0x4c, 0x70, 0x82, 0xe3, 0xb4, 0x57, 0x88, 0xe1, 0xf8, 0x47, 0x49, 0xd8, - 0xf9, 0x57, 0x0b, 0x3a, 0xc7, 0xd8, 0xbd, 0xc0, 0xd8, 0x97, 0x0f, 0x43, 0xa3, 0xac, 0xa0, 0x8a, - 0x3f, 0x51, 0xd1, 0x83, 0xe9, 0xca, 0x99, 0xf9, 0x9b, 0x78, 0xf0, 0xf0, 0x26, 0xb1, 0x34, 0x37, - 0xe7, 0xd0, 0x6b, 0x68, 0x6b, 0xbf, 0x01, 0xd1, 0x8a, 0xa6, 0x58, 0xfa, 0x69, 0x3b, 0x58, 0xbd, - 0x82, 0x9b, 0x59, 0xdb, 0x36, 0xd0, 0x11, 0xb4, 0xb5, 0xd5, 0x45, 0xb7, 0x57, 0xde, 0xa1, 0x74, - 0x7b, 0x33, 0xf6, 0x1d, 0x6b, 0x4e, 0x58, 0xd3, 0x16, 0x10, 0xdd, 0x5a, 0x79, 0xe5, 0xd1, 0xad, - 0xcd, 0xda, 0x5a, 0xa4, 0x35, 0x6d, 0xde, 0xeb, 0xd6, 0xca, 0xdb, 0x8c, 0x6e, 0x6d, 0xc6, 0x92, - 0x60, 0xcd, 0xa1, 0x0f, 0xd0, 0x3b, 0xe6, 0x0c, 0xbb, 0xe1, 0x84, 0x3d, 0x85, 0xe0, 0x8f, 0xb0, - 0xba, 0x69, 0x6c, 0x1b, 0xe8, 0xaf, 0xb0, 0x50, 0x9a, 0xe6, 0xc8, 0x9a, 0x68, 0x5e, 0xb5, 0x86, - 0x0c, 0x36, 0xae, 0x95, 0xc9, 0x3d, 0x7f, 0x03, 0x1d, 0x7d, 0x88, 0x22, 0xcd, 0xa9, 0x19, 0x7b, - 0xc2, 0x60, 0xed, 0x2a, 0xb6, 0x6e, 0x50, 0xef, 0xe3, 0xba, 0xc1, 0x19, 0x93, 0x4c, 0x37, 0x38, - 0xab, 0xfd, 0x5b, 0x73, 0xe8, 0x2f, 0x30, 0x3f, 0xdd, 0x4f, 0xd1, 0xfd, 0x69, 0xe8, 0x4a, 0x6d, - 0x7a, 0x60, 0x5d, 0x27, 0x92, 0x1b, 0x3f, 0x04, 0x98, 0xb4, 0x49, 0xb4, 0x3c, 0xd1, 0x29, 0xb5, - 0xe9, 0xc1, 0xca, 0x6c, 0x66, 0x6e, 0xea, 0x6f, 0xb0, 0x34, 0xb3, 0x17, 0x21, 0xad, 0x00, 0xaf, - 0xeb, 0x66, 0x83, 0x5f, 0xdc, 0x28, 0x97, 0xdd, 0xf5, 0x72, 0x0d, 0xe6, 0x63, 0xd5, 0x22, 0x86, - 0xf1, 0x96, 0x17, 0x10, 0x1c, 0xf1, 0x97, 0x20, 0x35, 0xde, 0x32, 0xca, 0xe9, 0x49, 0x43, 0xfe, - 0x6f, 0xf6, 0xeb, 0x1f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x25, 0xa7, 0x6f, 0x46, 0x13, 0x00, - 0x00, + // 1742 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x6f, 0xdc, 0xc6, + 0x15, 0x37, 0xf7, 0x3f, 0xdf, 0xee, 0x3a, 0xd2, 0xac, 0x9c, 0xac, 0xd7, 0x92, 0xab, 0xd0, 0x75, + 0xaa, 0xc2, 0x86, 0x6a, 0xa8, 0x39, 0x24, 0x4d, 0x7b, 0xb0, 0x65, 0xb9, 0x30, 0x62, 0x3b, 0x2e, + 0x65, 0x17, 0x29, 0x0a, 0x94, 0xa0, 0xc8, 0xd9, 0xd5, 0x54, 0x24, 0x87, 0x19, 0x0e, 0x2d, 0xb9, + 0xdf, 0xa3, 0x97, 0x02, 0x3d, 0xf4, 0x7b, 0x14, 0xbd, 0x14, 0x05, 0xfa, 0x39, 0x7a, 0xec, 0xa1, + 0x9f, 0xa1, 0x98, 0x37, 0x24, 0x77, 0xb8, 0x5c, 0x49, 0x49, 0x8b, 0xdc, 0x38, 0xef, 0xdf, 0xbc, + 0xf9, 0xbd, 0xbf, 0xbb, 0x30, 0x9c, 0xb3, 0x88, 0x8a, 0xfd, 0x54, 0x70, 0xc9, 0xc9, 0x00, 0x0f, + 0x5e, 0x7a, 0xe2, 0x7c, 0x05, 0x77, 0x5e, 0x70, 0x7e, 0x96, 0xa7, 0x4f, 0x99, 0xa0, 0x81, 0xe4, + 0xe2, 0xfd, 0x51, 0x22, 0xc5, 0x7b, 0x97, 0x7e, 0x93, 0xd3, 0x4c, 0x92, 0x6d, 0xb0, 0xc3, 0x92, + 0x31, 0xb5, 0x76, 0xad, 0x3d, 0xdb, 0x5d, 0x12, 0x08, 0x81, 0x4e, 0xe2, 0xc7, 0x74, 0xda, 0x42, + 0x06, 0x7e, 0x3b, 0x47, 0xb0, 0xbd, 0xde, 0x60, 0x96, 0xf2, 0x24, 0xa3, 0xe4, 0x3e, 0x74, 0xa9, + 0x22, 0xa0, 0xb5, 0xe1, 0xc1, 0x07, 0xfb, 0xa5, 0x2b, 0xfb, 0x5a, 0x4e, 0x73, 0x9d, 0xbf, 0x59, + 0x40, 0x5e, 0xb0, 0x4c, 0x2a, 0x22, 0xa3, 0xd9, 0xb7, 0xf3, 0xe7, 0x43, 0xe8, 0xa5, 0x82, 0xce, + 0xd9, 0x45, 0xe1, 0x51, 0x71, 0x22, 0x0f, 0x61, 0x33, 0x93, 0xbe, 0x90, 0xcf, 0x04, 0x8f, 0x9f, + 0xb1, 0x88, 0xbe, 0x52, 0x4e, 0xb7, 0x51, 0xa4, 0xc9, 0x20, 0xfb, 0x40, 0x58, 0x12, 0x44, 0x79, + 0xc6, 0xde, 0xd1, 0xe3, 0x92, 0x3b, 0xed, 0xec, 0x5a, 0x7b, 0x03, 0x77, 0x0d, 0x87, 0x6c, 0x41, + 0x37, 0x62, 0x31, 0x93, 0xd3, 0xee, 0xae, 0xb5, 0x37, 0x76, 0xf5, 0xc1, 0xf9, 0x39, 0x4c, 0x6a, + 0xfe, 0x7f, 0xb7, 0xe7, 0xff, 0xb9, 0x05, 0x5d, 0x24, 0x54, 0x18, 0x5b, 0x4b, 0x8c, 0xc9, 0xc7, + 0x30, 0x62, 0x99, 0xb7, 0x04, 0xa2, 0x85, 0xbe, 0x0d, 0x59, 0x56, 0x61, 0x4e, 0x1e, 0x40, 0x2f, + 0x38, 0xcd, 0x93, 0xb3, 0x6c, 0xda, 0xde, 0x6d, 0xef, 0x0d, 0x0f, 0x26, 0xcb, 0x8b, 0xd4, 0x43, + 0x0f, 0x15, 0xcf, 0x2d, 0x44, 0xc8, 0x67, 0x00, 0xbe, 0x94, 0x82, 0x9d, 0xe4, 0x92, 0x66, 0xf8, + 0xd2, 0xe1, 0xc1, 0xd4, 0x50, 0xc8, 0x33, 0xfa, 0xb8, 0xe2, 0xbb, 0x86, 0x2c, 0xf9, 0x1c, 0x06, + 0xf4, 0x42, 0xd2, 0x24, 0xa4, 0xe1, 0xb4, 0x8b, 0x17, 0xed, 0xac, 0xbc, 0x68, 0xff, 0xa8, 0xe0, + 0xeb, 0xf7, 0x55, 0xe2, 0xb3, 0x2f, 0x60, 0x5c, 0x63, 0x91, 0x0d, 0x68, 0x9f, 0xd1, 0x32, 0xaa, + 0xea, 0x53, 0x21, 0xfb, 0xce, 0x8f, 0x72, 0x9d, 0x60, 0x23, 0x57, 0x1f, 0x7e, 0xd6, 0xfa, 0xcc, + 0x72, 0x9e, 0x82, 0xfd, 0x2c, 0x8f, 0xa2, 0x4a, 0x31, 0x64, 0xa2, 0x54, 0x0c, 0x99, 0x58, 0xa2, + 0xdc, 0xba, 0x12, 0xe5, 0xbf, 0x5a, 0xb0, 0x79, 0xf4, 0x8e, 0x26, 0xf2, 0x15, 0x97, 0x6c, 0xce, + 0x02, 0x5f, 0x32, 0x9e, 0x90, 0x87, 0x60, 0xf3, 0x28, 0xf4, 0xae, 0x0c, 0xd3, 0x80, 0x47, 0x85, + 0xd7, 0x0f, 0xc1, 0x4e, 0xe8, 0xb9, 0x77, 0xe5, 0x75, 0x83, 0x84, 0x9e, 0x6b, 0xe9, 0x7b, 0x30, + 0x0e, 0x69, 0x44, 0x25, 0xf5, 0xaa, 0xe8, 0xa8, 0xd0, 0x8d, 0x34, 0xf1, 0x50, 0x87, 0xe3, 0x13, + 0xf8, 0x40, 0x99, 0x4c, 0x7d, 0x41, 0x13, 0xe9, 0xa5, 0xbe, 0x3c, 0xc5, 0x98, 0xd8, 0xee, 0x38, + 0xa1, 0xe7, 0xaf, 0x91, 0xfa, 0xda, 0x97, 0xa7, 0xce, 0x1f, 0x5b, 0x60, 0x57, 0xc1, 0x24, 0x1f, + 0x41, 0x5f, 0x5d, 0xeb, 0xb1, 0xb0, 0x40, 0xa2, 0xa7, 0x8e, 0xcf, 0x43, 0x55, 0x15, 0x7c, 0x3e, + 0xcf, 0xa8, 0x44, 0xf7, 0xda, 0x6e, 0x71, 0x52, 0x99, 0x95, 0xb1, 0x3f, 0xe8, 0x42, 0xe8, 0xb8, + 0xf8, 0xad, 0x10, 0x8f, 0x25, 0x8b, 0x29, 0x5e, 0xd8, 0x76, 0xf5, 0x81, 0x4c, 0xa0, 0x4b, 0x3d, + 0xe9, 0x2f, 0x30, 0xc3, 0x6d, 0xb7, 0x43, 0xdf, 0xf8, 0x0b, 0xf2, 0x43, 0xb8, 0x99, 0xf1, 0x5c, + 0x04, 0xd4, 0x2b, 0xaf, 0xed, 0x21, 0x77, 0xa4, 0xa9, 0xcf, 0xf4, 0xe5, 0x0e, 0xb4, 0xe7, 0x2c, + 0x9c, 0xf6, 0x11, 0x98, 0x8d, 0x7a, 0x12, 0x3e, 0x0f, 0x5d, 0xc5, 0x24, 0x3f, 0x01, 0xa8, 0x2c, + 0x85, 0xd3, 0xc1, 0x25, 0xa2, 0x76, 0x69, 0x37, 0x24, 0x3b, 0x00, 0x01, 0x4b, 0x4f, 0xa9, 0xf0, + 0x54, 0xc2, 0xd8, 0x98, 0x1c, 0xb6, 0xa6, 0x7c, 0x49, 0xdf, 0x3b, 0x5f, 0x43, 0xaf, 0xb8, 0xfd, + 0x0e, 0xd8, 0xef, 0x78, 0x94, 0xc7, 0x15, 0x2a, 0x63, 0x77, 0xa0, 0x09, 0xcf, 0x43, 0x72, 0x1b, + 0xb0, 0x0d, 0xa2, 0x8d, 0x16, 0x62, 0x80, 0x00, 0x7e, 0x49, 0xb1, 0x91, 0x04, 0x9c, 0x9f, 0x31, + 0x0d, 0x4e, 0xdf, 0x2d, 0x4e, 0xce, 0x7f, 0x5a, 0x70, 0xb3, 0x5e, 0x0d, 0xea, 0x0a, 0xb4, 0x82, + 0x50, 0x5a, 0x68, 0x06, 0xcd, 0x1e, 0xd7, 0xe0, 0x6c, 0x99, 0x70, 0x96, 0x2a, 0x31, 0x0f, 0xf5, + 0x05, 0x63, 0xad, 0xf2, 0x92, 0x87, 0x54, 0x25, 0x73, 0xce, 0x42, 0xc4, 0x7f, 0xec, 0xaa, 0x4f, + 0x45, 0x59, 0xb0, 0xb0, 0xe8, 0x2e, 0xea, 0x13, 0xdd, 0x13, 0x68, 0xb7, 0xa7, 0x23, 0xaa, 0x4f, + 0x2a, 0xa2, 0xb1, 0xa2, 0xf6, 0x75, 0x98, 0xd4, 0x37, 0xd9, 0x85, 0xa1, 0xa0, 0x69, 0x54, 0x24, + 0x37, 0xa2, 0x6b, 0xbb, 0x26, 0x89, 0xdc, 0x05, 0x08, 0x78, 0x14, 0xd1, 0x00, 0x05, 0x6c, 0x14, + 0x30, 0x28, 0x2a, 0xb1, 0xa4, 0x8c, 0xbc, 0x8c, 0x06, 0x53, 0xd8, 0xb5, 0xf6, 0xba, 0x6e, 0x4f, + 0xca, 0xe8, 0x98, 0x06, 0xea, 0x1d, 0x79, 0x46, 0x85, 0x87, 0xfd, 0x69, 0x88, 0x7a, 0x03, 0x45, + 0xc0, 0x2e, 0xba, 0x03, 0xb0, 0x10, 0x3c, 0x4f, 0x35, 0x77, 0xb4, 0xdb, 0x56, 0xad, 0x1a, 0x29, + 0xc8, 0xbe, 0x0f, 0x37, 0xb3, 0xf7, 0x71, 0xc4, 0x92, 0x33, 0x4f, 0xfa, 0x62, 0x41, 0xe5, 0x74, + 0xac, 0x53, 0xbc, 0xa0, 0xbe, 0x41, 0xa2, 0x93, 0x02, 0x39, 0x14, 0xd4, 0x97, 0xf4, 0x3b, 0x4c, + 0xa5, 0x6f, 0x57, 0xfc, 0xe4, 0x16, 0xf4, 0xb8, 0x47, 0x2f, 0x82, 0xa8, 0xa8, 0xc1, 0x2e, 0x3f, + 0xba, 0x08, 0x22, 0xe7, 0x01, 0x4c, 0x6a, 0x37, 0x16, 0x7d, 0x7b, 0x0b, 0xba, 0x54, 0x08, 0x5e, + 0x76, 0x19, 0x7d, 0x70, 0x7e, 0x03, 0xe4, 0x6d, 0x1a, 0x7e, 0x1f, 0xee, 0x39, 0xb7, 0x60, 0x52, + 0x33, 0xad, 0xfd, 0x70, 0xfe, 0x61, 0x01, 0x79, 0x8a, 0xcd, 0xe2, 0xff, 0x9b, 0xd3, 0xaa, 0x7c, + 0xd5, 0x0c, 0xd1, 0xcd, 0x28, 0xf4, 0xa5, 0x5f, 0x4c, 0xb8, 0x11, 0xcb, 0xb4, 0xfd, 0xa7, 0xbe, + 0xf4, 0x8b, 0x49, 0x23, 0x68, 0x90, 0x0b, 0x35, 0xf4, 0x30, 0x09, 0x71, 0xd2, 0xb8, 0x25, 0x89, + 0x7c, 0x0a, 0x1f, 0xb2, 0x45, 0xc2, 0x05, 0x5d, 0x8a, 0x79, 0x1a, 0xaa, 0x1e, 0x0a, 0x6f, 0x69, + 0x6e, 0xa5, 0x70, 0x84, 0xc8, 0x3d, 0x80, 0x49, 0xed, 0x19, 0x57, 0xc2, 0xfc, 0x27, 0x0b, 0xa6, + 0x8f, 0x25, 0x8f, 0x59, 0xe0, 0x52, 0xe5, 0x7c, 0xed, 0xe9, 0xf7, 0x60, 0xac, 0xda, 0xf5, 0xea, + 0xf3, 0x47, 0x3c, 0x0a, 0x97, 0xe3, 0xf0, 0x36, 0xa8, 0x8e, 0xed, 0x19, 0x28, 0xf4, 0x79, 0x14, + 0x62, 0x26, 0xde, 0x03, 0xd5, 0x56, 0x0d, 0x7d, 0xbd, 0x18, 0x8c, 0x12, 0x7a, 0x5e, 0xd3, 0x57, + 0x42, 0xa8, 0xaf, 0x7b, 0x71, 0x3f, 0xa1, 0xe7, 0x4a, 0xdf, 0xb9, 0x03, 0xb7, 0xd7, 0xf8, 0x56, + 0x84, 0xeb, 0x9f, 0x16, 0x4c, 0x1e, 0x67, 0x19, 0x5b, 0x24, 0xbf, 0xc6, 0xb6, 0x53, 0x3a, 0xbd, + 0x05, 0xdd, 0x80, 0xe7, 0x89, 0x44, 0x67, 0xbb, 0xae, 0x3e, 0xac, 0x54, 0x62, 0xab, 0x51, 0x89, + 0x2b, 0xb5, 0xdc, 0x6e, 0xd6, 0xb2, 0x51, 0xab, 0x9d, 0x5a, 0xad, 0xfe, 0x00, 0x86, 0x2a, 0xc8, + 0x5e, 0x40, 0x13, 0x49, 0x45, 0xd1, 0xc8, 0x41, 0x91, 0x0e, 0x91, 0xa2, 0x04, 0xcc, 0x81, 0xa3, + 0x7b, 0x39, 0xa4, 0xcb, 0x69, 0xf3, 0x2f, 0x0b, 0xb6, 0xea, 0x4f, 0x29, 0x62, 0x76, 0xe9, 0xe0, + 0x51, 0xad, 0x4c, 0x44, 0xc5, 0x3b, 0xd4, 0xa7, 0x6a, 0x0a, 0x69, 0x7e, 0x12, 0xb1, 0xc0, 0x53, + 0x0c, 0xed, 0xbf, 0xad, 0x29, 0x6f, 0x45, 0xb4, 0x44, 0xa5, 0x63, 0xa2, 0x42, 0xa0, 0xe3, 0xe7, + 0xf2, 0xb4, 0x1c, 0x3e, 0xea, 0x7b, 0x05, 0xa9, 0xde, 0x75, 0x48, 0xf5, 0x9b, 0x48, 0x55, 0x99, + 0x36, 0x30, 0x33, 0xed, 0x53, 0x98, 0xe8, 0xed, 0xb5, 0x1e, 0xae, 0x1d, 0x80, 0x6a, 0x8e, 0x64, + 0x53, 0x4b, 0x37, 0xb3, 0x72, 0x90, 0x64, 0xce, 0x2f, 0xc0, 0x7e, 0xc1, 0xb5, 0xdd, 0x8c, 0x3c, + 0x02, 0x3b, 0x2a, 0x0f, 0x28, 0x3a, 0x3c, 0x20, 0xcb, 0x1a, 0x2f, 0xe5, 0xdc, 0xa5, 0x90, 0xf3, + 0x05, 0x0c, 0x4a, 0x72, 0x89, 0x99, 0x75, 0x19, 0x66, 0xad, 0x15, 0xcc, 0x9c, 0xbf, 0x5b, 0xb0, + 0x55, 0x77, 0xb9, 0x08, 0xcb, 0x5b, 0x18, 0x57, 0x57, 0x78, 0xb1, 0x9f, 0x16, 0xbe, 0x3c, 0x32, + 0x7d, 0x69, 0xaa, 0x55, 0x0e, 0x66, 0x2f, 0xfd, 0x54, 0xe7, 0xf2, 0x28, 0x32, 0x48, 0xb3, 0x37, + 0xb0, 0xd9, 0x10, 0x59, 0xb3, 0xba, 0xfd, 0xd8, 0x5c, 0xdd, 0x6a, 0xeb, 0x67, 0xa5, 0x6d, 0xee, + 0x73, 0x9f, 0xc3, 0x47, 0xba, 0x1d, 0x1c, 0x56, 0x31, 0x2c, 0xb1, 0xaf, 0x87, 0xda, 0x5a, 0x0d, + 0xb5, 0x33, 0x83, 0x69, 0x53, 0xb5, 0x28, 0xbf, 0x05, 0x6c, 0x1e, 0x4b, 0x5f, 0xb2, 0x4c, 0xb2, + 0xa0, 0xfa, 0x0d, 0xb1, 0x92, 0x1b, 0xd6, 0x75, 0x13, 0xb1, 0x59, 0x87, 0x1b, 0xd0, 0x96, 0xb2, + 0xcc, 0x5f, 0xf5, 0xa9, 0xa2, 0x40, 0xcc, 0x9b, 0x8a, 0x18, 0x7c, 0x0f, 0x57, 0xa9, 0x7c, 0x90, + 0x5c, 0xfa, 0x91, 0xde, 0x38, 0x3a, 0xb8, 0x71, 0xd8, 0x48, 0xc1, 0x95, 0x43, 0x0f, 0xe5, 0x50, + 0x73, 0xbb, 0x7a, 0x1f, 0x51, 0x04, 0x64, 0xee, 0x00, 0x60, 0xa9, 0xea, 0x2a, 0xeb, 0x69, 0x5d, + 0x45, 0x39, 0x54, 0x04, 0xe7, 0x2e, 0x6c, 0xff, 0x92, 0x4a, 0xb5, 0x3b, 0x89, 0x43, 0x9e, 0xcc, + 0xd9, 0x22, 0x17, 0xbe, 0x11, 0x0a, 0xe7, 0xdf, 0x16, 0xec, 0x5c, 0x22, 0x50, 0x3c, 0x78, 0x0a, + 0xfd, 0xd8, 0xcf, 0x24, 0x15, 0x65, 0x95, 0x94, 0xc7, 0x55, 0x28, 0x5a, 0xd7, 0x41, 0xd1, 0x6e, + 0x40, 0x71, 0x0b, 0x7a, 0xb1, 0x7f, 0xe1, 0xc5, 0x27, 0xc5, 0x72, 0xd4, 0x8d, 0xfd, 0x8b, 0x97, + 0x27, 0xd8, 0xd9, 0x98, 0xf0, 0x4e, 0xf2, 0xe0, 0x8c, 0xca, 0xac, 0xea, 0x6c, 0x4c, 0x3c, 0xd1, + 0x14, 0xf5, 0x68, 0x25, 0xf0, 0x4d, 0x4e, 0x73, 0x9a, 0x15, 0xbd, 0x42, 0x0d, 0xc7, 0x5f, 0x21, + 0x01, 0x97, 0x29, 0x5c, 0x1d, 0xb1, 0x4b, 0x0c, 0xdc, 0xe2, 0x74, 0xf0, 0x97, 0x01, 0x8c, 0x8e, + 0xa9, 0x7f, 0x4e, 0x69, 0x88, 0x0f, 0x26, 0x8b, 0xb2, 0xd0, 0xea, 0xbf, 0x6c, 0xc9, 0xfd, 0xd5, + 0x8a, 0x5a, 0xfb, 0x53, 0x7a, 0xf6, 0xc9, 0x75, 0x62, 0x45, 0xce, 0xde, 0x20, 0xaf, 0x60, 0x68, + 0xfc, 0x74, 0x24, 0xdb, 0x86, 0x62, 0xe3, 0x17, 0xf1, 0x6c, 0xe7, 0x12, 0x6e, 0x69, 0xed, 0x91, + 0x45, 0x5e, 0xc0, 0xd0, 0x58, 0x69, 0x4c, 0x7b, 0xcd, 0xdd, 0xca, 0xb4, 0xb7, 0x66, 0x0f, 0x72, + 0x6e, 0x28, 0x6b, 0xc6, 0x62, 0x62, 0x5a, 0x6b, 0xae, 0x42, 0xa6, 0xb5, 0x75, 0xdb, 0x0c, 0x5a, + 0x33, 0xf6, 0x00, 0xd3, 0x5a, 0x73, 0xcb, 0x31, 0xad, 0xad, 0x59, 0x1e, 0x9c, 0x1b, 0xe4, 0x6b, + 0x98, 0x1c, 0x4b, 0x41, 0xfd, 0x78, 0xc9, 0x5e, 0x41, 0xf0, 0x7f, 0xb0, 0xba, 0x67, 0x3d, 0xb2, + 0xc8, 0xef, 0x60, 0xb3, 0x31, 0xe5, 0x89, 0xb3, 0xd4, 0xbc, 0x6c, 0x3d, 0x99, 0xdd, 0xbb, 0x52, + 0xa6, 0xf2, 0xfc, 0x2b, 0x18, 0x99, 0xc3, 0x95, 0x18, 0x4e, 0xad, 0xd9, 0x1f, 0x66, 0x77, 0x2f, + 0x63, 0x9b, 0x06, 0xcd, 0xfe, 0x6e, 0x1a, 0x5c, 0x33, 0xe1, 0x4c, 0x83, 0xeb, 0xc6, 0x82, 0x73, + 0x83, 0xfc, 0x16, 0x36, 0x56, 0xfb, 0x2c, 0xf9, 0x78, 0x15, 0xba, 0x46, 0xfb, 0x9e, 0x39, 0x57, + 0x89, 0x54, 0xc6, 0x9f, 0x03, 0x2c, 0xdb, 0x27, 0xb9, 0xb3, 0xd4, 0x69, 0xb4, 0xef, 0xd9, 0xf6, + 0x7a, 0x66, 0x65, 0xea, 0xf7, 0x70, 0x6b, 0x6d, 0x8f, 0x22, 0x46, 0x01, 0x5e, 0xd5, 0xe5, 0x66, + 0x3f, 0xba, 0x56, 0xae, 0xbc, 0xeb, 0xc9, 0x5d, 0xd8, 0xc8, 0x74, 0x8b, 0x98, 0x67, 0xfb, 0x41, + 0xc4, 0x68, 0x22, 0x9f, 0x00, 0x6a, 0xbc, 0x16, 0x5c, 0xf2, 0x93, 0x1e, 0xfe, 0xdd, 0xf6, 0xd3, + 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xf6, 0xb5, 0x1d, 0x19, 0x7d, 0x13, 0x00, 0x00, } diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 95c9533a1..c33e2b768 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -428,12 +428,10 @@ type VolumeEcShardInformationMessage struct { EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits" json:"ec_index_bits,omitempty"` } -func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } -func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardInformationMessage) ProtoMessage() {} -func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{4} -} +func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } +func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardInformationMessage) ProtoMessage() {} +func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *VolumeEcShardInformationMessage) GetId() uint32 { if m != nil { @@ -1424,12 +1422,10 @@ type GetMasterConfigurationResponse struct { MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` } -func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } -func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{32} -} +func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } +func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*GetMasterConfigurationResponse) ProtoMessage() {} +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { if m != nil { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 588b18f2e..56baa0cf7 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1035,12 +1035,10 @@ func (m *VolumeEcShardsGenerateRequest) GetCollection() string { type VolumeEcShardsGenerateResponse struct { } -func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } -func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{41} -} +func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } +func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } type VolumeEcShardsRebuildRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1413,12 +1411,10 @@ func (m *VolumeEcShardsToVolumeRequest) GetCollection() string { type VolumeEcShardsToVolumeResponse struct { } -func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } -func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} -func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{57} -} +func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } +func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } type ReadVolumeFileStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -2085,10 +2081,8 @@ type QueryRequest_InputSerialization_JSONInput struct { func (m *QueryRequest_InputSerialization_JSONInput) Reset() { *m = QueryRequest_InputSerialization_JSONInput{} } -func (m *QueryRequest_InputSerialization_JSONInput) String() string { - return proto.CompactTextString(m) -} -func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} +func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } +func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70, 1, 1} } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index f7c0eef0d..66e7b9c3e 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -115,7 +115,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { } var writeErr error - _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { + readErr := util.ReadUrlAsStream(fileUrl, nil, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { _, writeErr = appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) }) diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index e687170dd..0f8313638 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -103,7 +103,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { } var writeErr error - _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { + readErr := util.ReadUrlAsStream(fileUrl, nil, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { _, err := writer.Write(data) if err != nil { writeErr = err diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 232b68fec..512fbd46b 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -50,6 +50,7 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, dir stri Mtime: sourceChunk.Mtime, ETag: sourceChunk.ETag, SourceFileId: sourceChunk.GetFileIdString(), + CipherKey: sourceChunk.CipherKey, }, nil } @@ -95,8 +96,8 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) - uploadResult, err := operation.Upload(fileUrl, filename, readCloser, - "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) + // fetch data as is, regardless whether it is encrypted or not + uploadResult, err := operation.Upload(fileUrl, filename, false, readCloser, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err) return "", fmt.Errorf("upload data: %v", err) diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index d5b1e137f..dd8567e0e 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -6,13 +6,14 @@ import ( "os" "cloud.google.com/go/storage" + "google.golang.org/api/option" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/api/option" ) type GcsSink struct { @@ -100,7 +101,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { return err } - _, err = util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { + err = util.ReadUrlAsStream(fileUrl, nil, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { wc.Write(data) }) diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index 60885c30c..dcc041642 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -162,6 +162,6 @@ func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, e return nil, err } buf := make([]byte, chunk.Size) - util.ReadUrl(fileUrl, chunk.Offset, int(chunk.Size), buf, true) + util.ReadUrl(fileUrl, nil, false, chunk.Offset, int(chunk.Size), buf) return bytes.NewReader(buf), nil } diff --git a/weed/server/common.go b/weed/server/common.go index 31a9a73b8..d7ab8d1ee 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -134,7 +134,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth) + uploadResult, err := operation.Upload(url, fname, false, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 35539acca..b000bf3a6 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -338,5 +338,6 @@ func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb. MaxMb: uint32(fs.option.MaxMB), DirBuckets: fs.filer.DirBucketsPath, DirQueues: fs.filer.DirQueuesPath, + Cipher: fs.filer.Cipher, }, nil } diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 57caeb6d4..4c493b6b3 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -46,6 +46,7 @@ type FilerOption struct { DisableHttp bool Port uint32 recursiveDelete bool + Cipher bool } type FilerServer struct { @@ -67,6 +68,7 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) } fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Port+10000) + fs.filer.Cipher = option.Cipher go fs.filer.KeepConnectedToMaster() diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index ba21298ba..aff2b9159 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -14,6 +14,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -93,7 +94,7 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, return } - if fs.option.RedirectOnRead { + if fs.option.RedirectOnRead && entry.Chunks[0].CipherKey == nil { stats.FilerRequestCounter.WithLabelValues("redirect").Inc() http.Redirect(w, r, urlString, http.StatusFound) return @@ -136,7 +137,27 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, w.Header().Set("Content-Type", entry.Attr.Mime) } w.WriteHeader(resp.StatusCode) - io.Copy(w, resp.Body) + if entry.Chunks[0].CipherKey == nil { + io.Copy(w, resp.Body) + } else { + fs.writeEncryptedChunk(w, resp, entry.Chunks[0]) + } +} + +func (fs *FilerServer) writeEncryptedChunk(w http.ResponseWriter, resp *http.Response, chunk *filer_pb.FileChunk) { + encryptedData, err := ioutil.ReadAll(resp.Body) + if err != nil { + glog.V(1).Infof("read encrypted %s failed, err: %v", chunk.FileId, err) + w.WriteHeader(http.StatusNotFound) + return + } + decryptedData, err := util.Decrypt(encryptedData, util.CipherKey(chunk.CipherKey)) + if err != nil { + glog.V(1).Infof("decrypt %s failed, err: %v", chunk.FileId, err) + w.WriteHeader(http.StatusNotFound) + return + } + w.Write(decryptedData) } func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 45cad480d..c8eadf82a 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -182,7 +182,7 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) }() - uploadResult, uploadError := operation.Upload(urlLocation, fileName, limitedReader, false, contentType, nil, auth) + uploadResult, uploadError := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, nil, auth) if uploadError != nil { return 0, uploadError } diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index a07f6be01..9451fdf99 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -32,6 +32,7 @@ type WebDavOption struct { Collection string Uid uint32 Gid uint32 + Cipher bool } type WebDavServer struct { @@ -418,7 +419,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, f.name, bufReader, false, "", nil, auth) + uploadResult, err := operation.Upload(fileUrl, f.name, f.fs.option.Cipher, bufReader, false, "", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err) return 0, fmt.Errorf("upload data: %v", err) @@ -429,11 +430,12 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { } chunk := &filer_pb.FileChunk{ - FileId: fileId, - Offset: f.off, - Size: uint64(len(buf)), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + FileId: fileId, + Offset: f.off, + Size: uint64(len(buf)), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, } f.entry.Chunks = append(f.entry.Chunks, chunk) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index b195b48ed..332ec4d65 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -72,9 +72,8 @@ func ReplicatedWrite(masterNode string, s *storage.Store, } } - _, err := operation.Upload(u.String(), - string(n.Name), bytes.NewReader(n.Data), n.IsGzipped(), string(n.Mime), - pairMap, jwt) + // volume server do not know about encryption + _, err := operation.Upload(u.String(), string(n.Name), false, bytes.NewReader(n.Data), n.IsGzipped(), string(n.Mime), pairMap, jwt) return err }); err != nil { size = 0 diff --git a/weed/util/cipher.go b/weed/util/cipher.go new file mode 100644 index 000000000..f044c2ca3 --- /dev/null +++ b/weed/util/cipher.go @@ -0,0 +1,60 @@ +package util + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + "io" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +type CipherKey []byte + +func GenCipherKey() CipherKey { + key := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + glog.Fatalf("random key gen: %v", err) + } + return CipherKey(key) +} + +func Encrypt(plaintext []byte, key CipherKey) ([]byte, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + nonce := make([]byte, gcm.NonceSize()) + if _, err = io.ReadFull(rand.Reader, nonce); err != nil { + return nil, err + } + + return gcm.Seal(nonce, nonce, plaintext, nil), nil +} + +func Decrypt(ciphertext []byte, key CipherKey) ([]byte, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + nonceSize := gcm.NonceSize() + if len(ciphertext) < nonceSize { + return nil, errors.New("ciphertext too short") + } + + nonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:] + return gcm.Open(nil, nonce, ciphertext, nil) +} diff --git a/weed/util/http_util.go b/weed/util/http_util.go index f819d8497..833db910c 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -189,13 +189,21 @@ func NormalizeUrl(url string) string { return "http://" + url } -func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange bool) (int64, error) { +func ReadUrl(fileUrl string, cipherKey []byte, isFullChunk bool, offset int64, size int, buf []byte) (int64, error) { + + if cipherKey != nil { + var n int + err := readEncryptedUrl(fileUrl, cipherKey, offset, size, func(data []byte) { + n = copy(buf, data) + }) + return int64(n), err + } req, err := http.NewRequest("GET", fileUrl, nil) if err != nil { return 0, err } - if isReadRange { + if !isFullChunk { req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) } else { req.Header.Set("Accept-Encoding", "gzip") @@ -250,43 +258,64 @@ func ReadUrl(fileUrl string, offset int64, size int, buf []byte, isReadRange boo return n, err } -func ReadUrlAsStream(fileUrl string, offset int64, size int, fn func(data []byte)) (int64, error) { +func ReadUrlAsStream(fileUrl string, cipherKey []byte, isFullChunk bool, offset int64, size int, fn func(data []byte)) error { + + if cipherKey != nil { + return readEncryptedUrl(fileUrl, cipherKey, offset, size, fn) + } req, err := http.NewRequest("GET", fileUrl, nil) if err != nil { - return 0, err + return err + } + + if !isFullChunk { + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) } - req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) r, err := client.Do(req) if err != nil { - return 0, err + return err } defer CloseResponse(r) if r.StatusCode >= 400 { - return 0, fmt.Errorf("%s: %s", fileUrl, r.Status) + return fmt.Errorf("%s: %s", fileUrl, r.Status) } var ( m int - n int64 ) buf := make([]byte, 64*1024) for { m, err = r.Body.Read(buf) fn(buf[:m]) - n += int64(m) if err == io.EOF { - return n, nil + return nil } if err != nil { - return n, err + return err } } } +func readEncryptedUrl(fileUrl string, cipherKey []byte, offset int64, size int, fn func(data []byte)) error { + encryptedData, err := Get(fileUrl) + if err != nil { + return fmt.Errorf("fetch %s: %v", fileUrl, err) + } + decryptedData, err := Decrypt(encryptedData, CipherKey(cipherKey)) + if err != nil { + return fmt.Errorf("decrypt %s: %v", fileUrl, err) + } + if len(decryptedData) < int(offset)+size { + return fmt.Errorf("read decrypted %s size %d [%d, %d)", fileUrl, len(decryptedData), offset, int(offset)+size) + } + fn(decryptedData[int(offset) : int(offset)+size]) + return nil +} + func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, error) { req, err := http.NewRequest("GET", fileUrl, nil) From 40f70481cde0965374820852d630d2f34db4fb09 Mon Sep 17 00:00:00 2001 From: zhangsong Date: Thu, 5 Mar 2020 13:41:52 +0800 Subject: [PATCH 0194/2432] schedule new volume by free volume number of nodes --- weed/topology/node.go | 78 ++++++++++++++------------ weed/topology/volume_growth.go | 6 +- weed/topology/volume_growth_test.go | 87 +++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 38 deletions(-) diff --git a/weed/topology/node.go b/weed/topology/node.go index 572a89d4d..501a22623 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -62,56 +62,64 @@ type NodeImpl struct { } // the first node must satisfy filterFirstNodeFn(), the rest nodes must have one free slot -func (n *NodeImpl) RandomlyPickNodes(numberOfNodes int, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) { - candidates := make([]Node, 0, len(n.children)) +func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, filterFirstNodeFn func(dn Node) error) (firstNode Node, restNodes []Node, err error) { + var totalWeights int64 var errs []string n.RLock() + candidates := make([]Node, 0, len(n.children)) + candidatesWeights := make([]int64, 0, len(n.children)) + //pick nodes which has enough free volumes as candidates, and use free volumes number as node weight. for _, node := range n.children { - if err := filterFirstNodeFn(node); err == nil { - candidates = append(candidates, node) - } else { - errs = append(errs, string(node.Id())+":"+err.Error()) + if node.FreeSpace() <= 0 { + continue } + totalWeights += node.FreeSpace() + candidates = append(candidates, node) + candidatesWeights = append(candidatesWeights, node.FreeSpace()) } n.RUnlock() - if len(candidates) == 0 { - return nil, nil, errors.New("No matching data node found! \n" + strings.Join(errs, "\n")) + if len(candidates) < numberOfNodes { + glog.V(2).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates") + return nil, nil, errors.New("No enough data node found!") } - firstNode = candidates[rand.Intn(len(candidates))] - glog.V(2).Infoln(n.Id(), "picked main node:", firstNode.Id()) - restNodes = make([]Node, numberOfNodes-1) - candidates = candidates[:0] - n.RLock() - for _, node := range n.children { - if node.Id() == firstNode.Id() { - continue - } - if node.FreeSpace() <= 0 { - continue + //pick nodes randomly by weights, the node picked earlier has higher final weights + sortedCandidates := make([]Node, 0, len(candidates)) + for i:=0; i=lastWeights) && (weightsInterval= numberOfNodes-1 { + restNodes = sortedCandidates[:numberOfNodes-1] + } else { + restNodes = append(restNodes, sortedCandidates[:k]...) + restNodes = append(restNodes, sortedCandidates[k+1:numberOfNodes]...) } + ret = true + break } else { - r := rand.Intn(k + 1) - if r < len(restNodes) { - restNodes[r] = node - } + errs = append(errs, string(node.Id())+":"+err.Error()) } } + n.RUnlock() if !ret { - glog.V(2).Infoln(n.Id(), "failed to pick", numberOfNodes-1, "from rest", len(candidates), "node candidates") - err = errors.New("No enough data node found!") + return nil, nil, errors.New("No matching data node found! \n" + strings.Join(errs, "\n")) } return } diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 781a34ba3..446c88f60 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -112,7 +112,7 @@ func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topolo func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *VolumeGrowOption) (servers []*DataNode, err error) { //find main datacenter and other data centers rp := option.ReplicaPlacement - mainDataCenter, otherDataCenters, dc_err := topo.RandomlyPickNodes(rp.DiffDataCenterCount+1, func(node Node) error { + mainDataCenter, otherDataCenters, dc_err := topo.PickNodesByWeight(rp.DiffDataCenterCount+1, func(node Node) error { if option.DataCenter != "" && node.IsDataCenter() && node.Id() != NodeId(option.DataCenter) { return fmt.Errorf("Not matching preferred data center:%s", option.DataCenter) } @@ -144,7 +144,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum } //find main rack and other racks - mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).RandomlyPickNodes(rp.DiffRackCount+1, func(node Node) error { + mainRack, otherRacks, rackErr := mainDataCenter.(*DataCenter).PickNodesByWeight(rp.DiffRackCount+1, func(node Node) error { if option.Rack != "" && node.IsRack() && node.Id() != NodeId(option.Rack) { return fmt.Errorf("Not matching preferred rack:%s", option.Rack) } @@ -171,7 +171,7 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum } //find main rack and other racks - mainServer, otherServers, serverErr := mainRack.(*Rack).RandomlyPickNodes(rp.SameRackCount+1, func(node Node) error { + mainServer, otherServers, serverErr := mainRack.(*Rack).PickNodesByWeight(rp.SameRackCount+1, func(node Node) error { if option.DataNode != "" && node.IsDataNode() && node.Id() != NodeId(option.DataNode) { return fmt.Errorf("Not matching preferred data node:%s", option.DataNode) } diff --git a/weed/topology/volume_growth_test.go b/weed/topology/volume_growth_test.go index a004dc210..bca6a8fc7 100644 --- a/weed/topology/volume_growth_test.go +++ b/weed/topology/volume_growth_test.go @@ -253,3 +253,90 @@ func TestReplication011(t *testing.T) { fmt.Println("assigned node :", server.Id()) } } + +var topologyLayout3 = ` +{ + "dc1":{ + "rack1":{ + "server111":{ + "volumes":[], + "limit":2000 + } + } + }, + "dc2":{ + "rack2":{ + "server222":{ + "volumes":[], + "limit":2000 + } + } + }, + "dc3":{ + "rack3":{ + "server333":{ + "volumes":[], + "limit":1000 + } + } + }, + "dc4":{ + "rack4":{ + "server444":{ + "volumes":[], + "limit":1000 + } + } + }, + "dc5":{ + "rack5":{ + "server555":{ + "volumes":[], + "limit":500 + } + } + }, + "dc6":{ + "rack6":{ + "server666":{ + "volumes":[], + "limit":500 + } + } + } +} +` + +func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) { + topo := setup(topologyLayout3) + vg := NewDefaultVolumeGrowth() + rp, _ := super_block.NewReplicaPlacementFromString("100") + volumeGrowOption := &VolumeGrowOption{ + Collection: "Weight", + ReplicaPlacement: rp, + DataCenter: "", + Rack: "", + DataNode: "", + } + + distribution := map[NodeId]int{} + // assign 1000 volumes + for i:=0;i<1000 ;i++ { + servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption) + if err != nil { + fmt.Println("finding empty slots error :", err) + t.Fail() + } + for _, server := range servers { + fmt.Println("assigned node :", server.Id()) + if _, ok := distribution[server.id]; !ok { + distribution[server.id] = 0 + } + distribution[server.id] += 1 + } + } + + for k, v := range distribution { + fmt.Println(k, "%s : %d", k, v) + } +} \ No newline at end of file From ea1169dc8021172a5d14e618b041efb56db98de5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 06:06:58 -0800 Subject: [PATCH 0195/2432] filer cipher: single chunk http POST and PUT and read --- weed/operation/upload_content.go | 12 +- weed/server/common.go | 14 +- weed/server/filer_server_handlers_read.go | 11 +- weed/server/filer_server_handlers_write.go | 19 +- .../filer_server_handlers_write_autochunk.go | 28 ++- .../filer_server_handlers_write_cipher.go | 103 +++++++++++ weed/storage/needle/needle.go | 74 ++------ weed/storage/needle/needle_parse_multipart.go | 118 ------------- weed/storage/needle/needle_parse_upload.go | 166 ++++++++++++++++++ weed/util/cipher.go | 21 +++ 10 files changed, 354 insertions(+), 212 deletions(-) create mode 100644 weed/server/filer_server_handlers_write_cipher.go delete mode 100644 weed/storage/needle/needle_parse_multipart.go create mode 100644 weed/storage/needle/needle_parse_upload.go diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index ba15aea78..884933f18 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -69,18 +69,12 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, // encrypt data var cipherKey util.CipherKey var clearDataLen int + var err error if cipher { - clearData, err := ioutil.ReadAll(reader) + cipherKey, reader, clearDataLen, _, err = util.EncryptReader(reader) if err != nil { - return nil, fmt.Errorf("read raw input: %v", err) + return nil, err } - clearDataLen = len(clearData) - cipherKey = util.GenCipherKey() - encryptedData, err := util.Encrypt(clearData, cipherKey) - if err != nil { - return nil, fmt.Errorf("encrypt input: %v", err) - } - reader = bytes.NewReader(encryptedData) } // upload data diff --git a/weed/server/common.go b/weed/server/common.go index d7ab8d1ee..f88533c24 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -99,13 +99,13 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r, 256*1024*1024) + pu, pe := needle.ParseUpload(r, 256*1024*1024) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return } - debug("assigning file id for", fname) + debug("assigning file id for", pu.FileName) r.ParseForm() count := uint64(1) if r.FormValue("count") != "" { @@ -129,21 +129,21 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } url := "http://" + assignResult.Url + "/" + assignResult.Fid - if lastModified != 0 { - url = url + "?ts=" + strconv.FormatUint(lastModified, 10) + if pu.ModifiedTime != 0 { + url = url + "?ts=" + strconv.FormatUint(pu.ModifiedTime, 10) } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, fname, false, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth) + uploadResult, err := operation.Upload(url, pu.FileName, false, bytes.NewReader(pu.Data), pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return } - m["fileName"] = fname + m["fileName"] = pu.FileName m["fid"] = assignResult.Fid m["fileUrl"] = assignResult.PublicUrl + "/" + assignResult.Fid - m["size"] = originalDataSize + m["size"] = pu.OriginalDataSize m["eTag"] = uploadResult.ETag writeJsonQuiet(w, r, http.StatusCreated, m) return diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index aff2b9159..796fd9c1c 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -2,6 +2,7 @@ package weed_server import ( "context" + "fmt" "io" "io/ioutil" "mime" @@ -14,7 +15,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -136,15 +136,16 @@ func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, if entry.Attr.Mime != "" { w.Header().Set("Content-Type", entry.Attr.Mime) } - w.WriteHeader(resp.StatusCode) if entry.Chunks[0].CipherKey == nil { + w.WriteHeader(resp.StatusCode) io.Copy(w, resp.Body) } else { - fs.writeEncryptedChunk(w, resp, entry.Chunks[0]) + fs.writeEncryptedChunk(w, resp, entry) } } -func (fs *FilerServer) writeEncryptedChunk(w http.ResponseWriter, resp *http.Response, chunk *filer_pb.FileChunk) { +func (fs *FilerServer) writeEncryptedChunk(w http.ResponseWriter, resp *http.Response, entry *filer2.Entry) { + chunk := entry.Chunks[0] encryptedData, err := ioutil.ReadAll(resp.Body) if err != nil { glog.V(1).Infof("read encrypted %s failed, err: %v", chunk.FileId, err) @@ -157,6 +158,8 @@ func (fs *FilerServer) writeEncryptedChunk(w http.ResponseWriter, resp *http.Res w.WriteHeader(http.StatusNotFound) return } + w.Header().Set("Content-Length", fmt.Sprintf("%d", chunk.Size)) + w.WriteHeader(resp.StatusCode) w.Write(decryptedData) } diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index bb5f28663..b36333447 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -90,10 +90,22 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { return } + if fs.option.Cipher { + reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter) + if err != nil { + writeJsonError(w, r, http.StatusInternalServerError, err) + } else if reply != nil { + writeJsonQuiet(w, r, http.StatusCreated, reply) + } + + return + } + fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) if err != nil || fileId == "" || urlLocation == "" { glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) + writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)) return } @@ -134,7 +146,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { // update metadata in filer store func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, - replication string, collection string, ret operation.UploadResult, fileId string) (err error) { + replication string, collection string, ret *operation.UploadResult, fileId string) (err error) { stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc() start := time.Now() @@ -198,12 +210,14 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w } // send request to volume server -func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret operation.UploadResult, err error) { +func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, err error) { stats.FilerRequestCounter.WithLabelValues("postUpload").Inc() start := time.Now() defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }() + ret = &operation.UploadResult{} + request := &http.Request{ Method: r.Method, URL: u, @@ -215,6 +229,7 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se Host: r.Host, ContentLength: r.ContentLength, } + if auth != "" { request.Header.Set("Authorization", "BEARER "+string(auth)) } diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index c8eadf82a..1c7891353 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -103,33 +103,35 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r // upload the chunk to the volume server chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) - uploadedSize, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, chunkName, "", fileId, auth) + uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, chunkName, "", fileId, auth) if uploadErr != nil { return nil, uploadErr } // if last chunk exhausted the reader exactly at the border - if uploadedSize == 0 { + if uploadResult.Size == 0 { break } // Save to chunk manifest structure fileChunks = append(fileChunks, &filer_pb.FileChunk{ - FileId: fileId, - Offset: chunkOffset, - Size: uint64(uploadedSize), - Mtime: time.Now().UnixNano(), + FileId: fileId, + Offset: chunkOffset, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, }, ) - glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadedSize), contentLength) + glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size), contentLength) // reset variables for the next chunk - chunkOffset = chunkOffset + int64(uploadedSize) + chunkOffset = chunkOffset + int64(uploadResult.Size) // if last chunk was not at full chunk size, but already exhausted the reader - if uploadedSize < int64(chunkSize) { + if int64(uploadResult.Size) < int64(chunkSize) { break } } @@ -174,7 +176,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r } func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, - limitedReader io.Reader, fileName string, contentType string, fileId string, auth security.EncodedJwt) (size int64, err error) { + limitedReader io.Reader, fileName string, contentType string, fileId string, auth security.EncodedJwt) (*operation.UploadResult, error) { stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc() start := time.Now() @@ -182,9 +184,5 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) }() - uploadResult, uploadError := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, nil, auth) - if uploadError != nil { - return 0, uploadError - } - return int64(uploadResult.Size), nil + return operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, nil, auth) } diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go new file mode 100644 index 000000000..e65915971 --- /dev/null +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -0,0 +1,103 @@ +package weed_server + +import ( + "bytes" + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// handling single chunk POST or PUT upload +func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, + replication string, collection string, dataCenter string) (filerResult *FilerPostResult, err error) { + + fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + + if err != nil || fileId == "" || urlLocation == "" { + return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) + } + + glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) + + // Note: gzip(cipher(data)), cipher data first, then gzip + + sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024 + + pu, err := needle.ParseUpload(r, sizeLimit) + data := pu.Data + uncompressedData := pu.Data + cipherKey := util.GenCipherKey() + if pu.IsGzipped { + uncompressedData = pu.UncompressedData + data, err = util.Encrypt(pu.UncompressedData, cipherKey) + if err != nil { + return nil, fmt.Errorf("encrypt input: %v", err) + } + } + if pu.MimeType == "" { + pu.MimeType = http.DetectContentType(uncompressedData) + } + + uploadResult, uploadError := operation.Upload(urlLocation, pu.FileName, true, bytes.NewReader(data), pu.IsGzipped, "", pu.PairMap, auth) + if uploadError != nil { + return nil, fmt.Errorf("upload to volume server: %v", uploadError) + } + + // Save to chunk manifest structure + fileChunks := []*filer_pb.FileChunk{ + { + FileId: fileId, + Offset: 0, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, + }, + } + + path := r.URL.Path + if strings.HasSuffix(path, "/") { + if pu.FileName != "" { + path += pu.FileName + } + } + + entry := &filer2.Entry{ + FullPath: filer2.FullPath(path), + Attr: filer2.Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: 0660, + Uid: OS_UID, + Gid: OS_GID, + Replication: replication, + Collection: collection, + TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), + Mime: pu.MimeType, + }, + Chunks: fileChunks, + } + + filerResult = &FilerPostResult{ + Name: pu.FileName, + Size: int64(pu.OriginalDataSize), + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) + err = dbErr + filerResult.Error = dbErr.Error() + return + } + + return +} diff --git a/weed/storage/needle/needle.go b/weed/storage/needle/needle.go index 022e8bf14..f906062de 100644 --- a/weed/storage/needle/needle.go +++ b/weed/storage/needle/needle.go @@ -3,8 +3,6 @@ package needle import ( "encoding/json" "fmt" - "io" - "io/ioutil" "net/http" "strconv" "strings" @@ -12,7 +10,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/images" . "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -51,67 +48,30 @@ func (n *Needle) String() (str string) { return } -func ParseUpload(r *http.Request, sizeLimit int64) ( - fileName string, data []byte, mimeType string, pairMap map[string]string, isGzipped bool, originalDataSize int, - modifiedTime uint64, ttl *TTL, isChunkedFile bool, e error) { - pairMap = make(map[string]string) - for k, v := range r.Header { - if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) { - pairMap[k] = v[0] - } - } - - if r.Method == "POST" { - fileName, data, mimeType, isGzipped, originalDataSize, isChunkedFile, e = parseMultipart(r, sizeLimit) - } else { - isGzipped = r.Header.Get("Content-Encoding") == "gzip" - mimeType = r.Header.Get("Content-Type") - fileName = "" - data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1)) - originalDataSize = len(data) - if e == io.EOF || int64(originalDataSize) == sizeLimit+1 { - io.Copy(ioutil.Discard, r.Body) - } - r.Body.Close() - if isGzipped { - if unzipped, e := util.UnGzipData(data); e == nil { - originalDataSize = len(unzipped) - } - } else if shouldGzip, _ := util.IsGzippableFileType("", mimeType); shouldGzip { - if compressedData, err := util.GzipData(data); err == nil { - data = compressedData - isGzipped = true - } - } - } - if e != nil { - return - } - modifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64) - ttl, _ = ReadTTL(r.FormValue("ttl")) - - return -} func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit int64) (n *Needle, originalSize int, e error) { - var pairMap map[string]string - fname, mimeType, isGzipped, isChunkedFile := "", "", false, false n = new(Needle) - fname, n.Data, mimeType, pairMap, isGzipped, originalSize, n.LastModified, n.Ttl, isChunkedFile, e = ParseUpload(r, sizeLimit) + pu, e := ParseUpload(r, sizeLimit) if e != nil { return } - if len(fname) < 256 { - n.Name = []byte(fname) + n.Data = pu.Data + originalSize = pu.OriginalDataSize + n.LastModified = pu.ModifiedTime + n.Ttl = pu.Ttl + + + if len(pu.FileName) < 256 { + n.Name = []byte(pu.FileName) n.SetHasName() } - if len(mimeType) < 256 { - n.Mime = []byte(mimeType) + if len(pu.MimeType) < 256 { + n.Mime = []byte(pu.MimeType) n.SetHasMime() } - if len(pairMap) != 0 { + if len(pu.PairMap) != 0 { trimmedPairMap := make(map[string]string) - for k, v := range pairMap { + for k, v := range pu.PairMap { trimmedPairMap[k[len(PairNamePrefix):]] = v } @@ -122,7 +82,7 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit n.SetHasPairs() } } - if isGzipped { + if pu.IsGzipped { n.SetGzipped() } if n.LastModified == 0 { @@ -133,13 +93,13 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit n.SetHasTtl() } - if isChunkedFile { + if pu.IsChunkedFile { n.SetIsChunkManifest() } if fixJpgOrientation { - loweredName := strings.ToLower(fname) - if mimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") { + loweredName := strings.ToLower(pu.FileName) + if pu.MimeType == "image/jpeg" || strings.HasSuffix(loweredName, ".jpg") || strings.HasSuffix(loweredName, ".jpeg") { n.Data = images.FixJpgOrientation(n.Data) } } diff --git a/weed/storage/needle/needle_parse_multipart.go b/weed/storage/needle/needle_parse_multipart.go deleted file mode 100644 index 8c9032f5f..000000000 --- a/weed/storage/needle/needle_parse_multipart.go +++ /dev/null @@ -1,118 +0,0 @@ -package needle - -import ( - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "path" - "strconv" - "strings" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" -) - -func parseMultipart(r *http.Request, sizeLimit int64) ( - fileName string, data []byte, mimeType string, isGzipped bool, originalDataSize int, isChunkedFile bool, e error) { - defer func() { - if e != nil && r.Body != nil { - io.Copy(ioutil.Discard, r.Body) - r.Body.Close() - } - }() - form, fe := r.MultipartReader() - if fe != nil { - glog.V(0).Infoln("MultipartReader [ERROR]", fe) - e = fe - return - } - - //first multi-part item - part, fe := form.NextPart() - if fe != nil { - glog.V(0).Infoln("Reading Multi part [ERROR]", fe) - e = fe - return - } - - fileName = part.FileName() - if fileName != "" { - fileName = path.Base(fileName) - } - - data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1)) - if e != nil { - glog.V(0).Infoln("Reading Content [ERROR]", e) - return - } - if len(data) == int(sizeLimit)+1 { - e = fmt.Errorf("file over the limited %d bytes", sizeLimit) - return - } - - //if the filename is empty string, do a search on the other multi-part items - for fileName == "" { - part2, fe := form.NextPart() - if fe != nil { - break // no more or on error, just safely break - } - - fName := part2.FileName() - - //found the first multi-part has filename - if fName != "" { - data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1)) - if fe2 != nil { - glog.V(0).Infoln("Reading Content [ERROR]", fe2) - e = fe2 - return - } - if len(data) == int(sizeLimit)+1 { - e = fmt.Errorf("file over the limited %d bytes", sizeLimit) - return - } - - //update - data = data2 - fileName = path.Base(fName) - break - } - } - - originalDataSize = len(data) - - isChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) - - if !isChunkedFile { - - dotIndex := strings.LastIndex(fileName, ".") - ext, mtype := "", "" - if dotIndex > 0 { - ext = strings.ToLower(fileName[dotIndex:]) - mtype = mime.TypeByExtension(ext) - } - contentType := part.Header.Get("Content-Type") - if contentType != "" && mtype != contentType { - mimeType = contentType //only return mime type if not deductable - mtype = contentType - } - - if part.Header.Get("Content-Encoding") == "gzip" { - if unzipped, e := util.UnGzipData(data); e == nil { - originalDataSize = len(unzipped) - } - isGzipped = true - } else if util.IsGzippable(ext, mtype, data) { - if compressedData, err := util.GzipData(data); err == nil { - if len(data) > len(compressedData) { - data = compressedData - isGzipped = true - } - } - } - } - - return -} diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go new file mode 100644 index 000000000..85526aaa8 --- /dev/null +++ b/weed/storage/needle/needle_parse_upload.go @@ -0,0 +1,166 @@ +package needle + +import ( + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "path" + "strconv" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type ParsedUpload struct { + FileName string + Data []byte + MimeType string + PairMap map[string]string + IsGzipped bool + OriginalDataSize int + ModifiedTime uint64 + Ttl *TTL + IsChunkedFile bool + UncompressedData []byte +} + +func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) { + pu = &ParsedUpload{} + pu.PairMap = make(map[string]string) + for k, v := range r.Header { + if len(v) > 0 && strings.HasPrefix(k, PairNamePrefix) { + pu.PairMap[k] = v[0] + } + } + + if r.Method == "POST" { + e = parseMultipart(r, sizeLimit, pu) + } else { + e = parsePut(r, sizeLimit, pu) + } + if e != nil { + return + } + + pu.ModifiedTime, _ = strconv.ParseUint(r.FormValue("ts"), 10, 64) + pu.Ttl, _ = ReadTTL(r.FormValue("ttl")) + + pu.OriginalDataSize = len(pu.Data) + pu.UncompressedData = pu.Data + if pu.IsGzipped { + if unzipped, e := util.UnGzipData(pu.Data); e == nil { + pu.OriginalDataSize = len(unzipped) + pu.UncompressedData = unzipped + } + } else if shouldGzip, _ := util.IsGzippableFileType("", pu.MimeType); shouldGzip { + if compressedData, err := util.GzipData(pu.Data); err == nil { + pu.Data = compressedData + pu.IsGzipped = true + } + } + + return +} + +func parsePut(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { + pu.IsGzipped = r.Header.Get("Content-Encoding") == "gzip" + pu.MimeType = r.Header.Get("Content-Type") + pu.FileName = "" + pu.Data, e = ioutil.ReadAll(io.LimitReader(r.Body, sizeLimit+1)) + if e == io.EOF || int64(pu.OriginalDataSize) == sizeLimit+1 { + io.Copy(ioutil.Discard, r.Body) + } + r.Body.Close() + return nil +} + +func parseMultipart(r *http.Request, sizeLimit int64, pu *ParsedUpload) (e error) { + defer func() { + if e != nil && r.Body != nil { + io.Copy(ioutil.Discard, r.Body) + r.Body.Close() + } + }() + form, fe := r.MultipartReader() + if fe != nil { + glog.V(0).Infoln("MultipartReader [ERROR]", fe) + e = fe + return + } + + //first multi-part item + part, fe := form.NextPart() + if fe != nil { + glog.V(0).Infoln("Reading Multi part [ERROR]", fe) + e = fe + return + } + + pu.FileName = part.FileName() + if pu.FileName != "" { + pu.FileName = path.Base(pu.FileName) + } + + pu.Data, e = ioutil.ReadAll(io.LimitReader(part, sizeLimit+1)) + if e != nil { + glog.V(0).Infoln("Reading Content [ERROR]", e) + return + } + if len(pu.Data) == int(sizeLimit)+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } + + //if the filename is empty string, do a search on the other multi-part items + for pu.FileName == "" { + part2, fe := form.NextPart() + if fe != nil { + break // no more or on error, just safely break + } + + fName := part2.FileName() + + //found the first multi-part has filename + if fName != "" { + data2, fe2 := ioutil.ReadAll(io.LimitReader(part2, sizeLimit+1)) + if fe2 != nil { + glog.V(0).Infoln("Reading Content [ERROR]", fe2) + e = fe2 + return + } + if len(data2) == int(sizeLimit)+1 { + e = fmt.Errorf("file over the limited %d bytes", sizeLimit) + return + } + + //update + pu.Data = data2 + pu.FileName = path.Base(fName) + break + } + } + + pu.IsChunkedFile, _ = strconv.ParseBool(r.FormValue("cm")) + + if !pu.IsChunkedFile { + + dotIndex := strings.LastIndex(pu.FileName, ".") + ext, mtype := "", "" + if dotIndex > 0 { + ext = strings.ToLower(pu.FileName[dotIndex:]) + mtype = mime.TypeByExtension(ext) + } + contentType := part.Header.Get("Content-Type") + if contentType != "" && contentType != "application/octet-stream" && mtype != contentType { + pu.MimeType = contentType //only return mime type if not deductable + mtype = contentType + } + + pu.IsGzipped = part.Header.Get("Content-Encoding") == "gzip" + } + + return +} diff --git a/weed/util/cipher.go b/weed/util/cipher.go index f044c2ca3..7bcb6559a 100644 --- a/weed/util/cipher.go +++ b/weed/util/cipher.go @@ -1,11 +1,14 @@ package util import ( + "bytes" "crypto/aes" "crypto/cipher" "crypto/rand" "errors" + "fmt" "io" + "io/ioutil" "github.com/chrislusf/seaweedfs/weed/glog" ) @@ -58,3 +61,21 @@ func Decrypt(ciphertext []byte, key CipherKey) ([]byte, error) { nonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:] return gcm.Open(nil, nonce, ciphertext, nil) } + +func EncryptReader(clearReader io.Reader) (cipherKey CipherKey, encryptedReader io.ReadCloser, clearDataLen, encryptedDataLen int, err error) { + clearData, err := ioutil.ReadAll(clearReader) + if err != nil { + err = fmt.Errorf("read raw input: %v", err) + return + } + clearDataLen = len(clearData) + cipherKey = GenCipherKey() + encryptedData, err := Encrypt(clearData, cipherKey) + if err != nil { + err = fmt.Errorf("encrypt input: %v", err) + return + } + encryptedDataLen = len(encryptedData) + encryptedReader = ioutil.NopCloser(bytes.NewReader(encryptedData)) + return +} From dba35404e4951a2c428acd139d015b08de7ffd19 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 06:07:25 -0800 Subject: [PATCH 0196/2432] filer: HEAD response add content type --- weed/server/filer_server_handlers_read.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 796fd9c1c..ca687ca5d 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -70,6 +70,9 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, if r.Method == "HEAD" { w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) + if entry.Attr.Mime != "" { + w.Header().Set("Content-Type", entry.Attr.Mime) + } setEtag(w, filer2.ETag(entry.Chunks)) return } From 8c3e25b38a66629cf4ef38bb1b7f09daab955bd6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 06:07:56 -0800 Subject: [PATCH 0197/2432] mimee type default "application/octet-stream" to empty string --- weed/command/filer_copy.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 8c01cfd74..fa948249a 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -493,9 +493,12 @@ func detectMimeType(f *os.File) string { } if err != nil { fmt.Printf("read head of %v: %v\n", f.Name(), err) - return "application/octet-stream" + return "" } f.Seek(0, io.SeekStart) mimeType := http.DetectContentType(head[:n]) + if mimeType == "application/octet-stream" { + return "" + } return mimeType } From 564629444befcdd7f71dade1713199b212243226 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 06:12:20 -0800 Subject: [PATCH 0198/2432] format --- weed/topology/volume_growth_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/topology/volume_growth_test.go b/weed/topology/volume_growth_test.go index bca6a8fc7..83671cd2b 100644 --- a/weed/topology/volume_growth_test.go +++ b/weed/topology/volume_growth_test.go @@ -328,7 +328,7 @@ func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) { t.Fail() } for _, server := range servers { - fmt.Println("assigned node :", server.Id()) + // fmt.Println("assigned node :", server.Id()) if _, ok := distribution[server.id]; !ok { distribution[server.id] = 0 } @@ -337,6 +337,6 @@ func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) { } for k, v := range distribution { - fmt.Println(k, "%s : %d", k, v) + fmt.Printf("%s : %d\n", k, v) } } \ No newline at end of file From 1ae83c29389c64fedabaa5e401154c5b57add40c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 06:12:57 -0800 Subject: [PATCH 0199/2432] go fmt --- weed/command/mount.go | 1 - weed/command/msg_broker.go | 6 ++--- weed/pb/master_pb/master.pb.go | 20 +++++++++------ weed/pb/volume_server_pb/volume_server.pb.go | 26 ++++++++++++-------- weed/storage/needle/needle.go | 2 -- weed/topology/node.go | 4 +-- weed/topology/volume_growth_test.go | 4 +-- 7 files changed, 35 insertions(+), 28 deletions(-) diff --git a/weed/command/mount.go b/weed/command/mount.go index 792845083..f1448c6cc 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -62,4 +62,3 @@ var cmdMount = &Command{ `, } - diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go index 21551df9e..3e13b4730 100644 --- a/weed/command/msg_broker.go +++ b/weed/command/msg_broker.go @@ -23,9 +23,9 @@ var ( ) type QueueOptions struct { - filer *string - port *int - defaultTtl *string + filer *string + port *int + defaultTtl *string } func init() { diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index c33e2b768..95c9533a1 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -428,10 +428,12 @@ type VolumeEcShardInformationMessage struct { EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits" json:"ec_index_bits,omitempty"` } -func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } -func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardInformationMessage) ProtoMessage() {} -func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } +func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardInformationMessage) ProtoMessage() {} +func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{4} +} func (m *VolumeEcShardInformationMessage) GetId() uint32 { if m != nil { @@ -1422,10 +1424,12 @@ type GetMasterConfigurationResponse struct { MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` } -func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } -func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } +func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*GetMasterConfigurationResponse) ProtoMessage() {} +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{32} +} func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { if m != nil { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 56baa0cf7..588b18f2e 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1035,10 +1035,12 @@ func (m *VolumeEcShardsGenerateRequest) GetCollection() string { type VolumeEcShardsGenerateResponse struct { } -func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } -func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } +func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{41} +} type VolumeEcShardsRebuildRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1411,10 +1413,12 @@ func (m *VolumeEcShardsToVolumeRequest) GetCollection() string { type VolumeEcShardsToVolumeResponse struct { } -func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } -func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} -func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } +func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{57} +} type ReadVolumeFileStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -2081,8 +2085,10 @@ type QueryRequest_InputSerialization_JSONInput struct { func (m *QueryRequest_InputSerialization_JSONInput) Reset() { *m = QueryRequest_InputSerialization_JSONInput{} } -func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} +func (m *QueryRequest_InputSerialization_JSONInput) String() string { + return proto.CompactTextString(m) +} +func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70, 1, 1} } diff --git a/weed/storage/needle/needle.go b/weed/storage/needle/needle.go index f906062de..d3969e868 100644 --- a/weed/storage/needle/needle.go +++ b/weed/storage/needle/needle.go @@ -48,7 +48,6 @@ func (n *Needle) String() (str string) { return } - func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit int64) (n *Needle, originalSize int, e error) { n = new(Needle) pu, e := ParseUpload(r, sizeLimit) @@ -60,7 +59,6 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit n.LastModified = pu.ModifiedTime n.Ttl = pu.Ttl - if len(pu.FileName) < 256 { n.Name = []byte(pu.FileName) n.SetHasName() diff --git a/weed/topology/node.go b/weed/topology/node.go index 501a22623..ceeb96d60 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -85,11 +85,11 @@ func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, filterFirstNodeFn func(d //pick nodes randomly by weights, the node picked earlier has higher final weights sortedCandidates := make([]Node, 0, len(candidates)) - for i:=0; i=lastWeights) && (weightsInterval= lastWeights) && (weightsInterval < lastWeights+weights) { sortedCandidates = append(sortedCandidates, candidates[k]) candidatesWeights[k] = 0 totalWeights -= weights diff --git a/weed/topology/volume_growth_test.go b/weed/topology/volume_growth_test.go index 83671cd2b..6ff5be0eb 100644 --- a/weed/topology/volume_growth_test.go +++ b/weed/topology/volume_growth_test.go @@ -321,7 +321,7 @@ func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) { distribution := map[NodeId]int{} // assign 1000 volumes - for i:=0;i<1000 ;i++ { + for i := 0; i < 1000; i++ { servers, err := vg.findEmptySlotsForOneVolume(topo, volumeGrowOption) if err != nil { fmt.Println("finding empty slots error :", err) @@ -339,4 +339,4 @@ func TestFindEmptySlotsForOneVolumeScheduleByWeight(t *testing.T) { for k, v := range distribution { fmt.Printf("%s : %d\n", k, v) } -} \ No newline at end of file +} From 0375ce2c2e77c2529569e8d5289f54434be09348 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 07:25:15 -0800 Subject: [PATCH 0200/2432] filer: set mime type from volume server --- weed/operation/upload_content.go | 1 + weed/server/filer_server_handlers_write.go | 7 +++++-- weed/server/volume_server_handlers_write.go | 1 + 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 884933f18..a6dde973f 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -27,6 +27,7 @@ type UploadResult struct { Error string `json:"error,omitempty"` ETag string `json:"eTag,omitempty"` CipherKey []byte `json:"cipherKey,omitempty"` + Mime string `json:"mime,omitempty"` } var ( diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index b36333447..01f46a53b 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -186,6 +186,7 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w Replication: replication, Collection: collection, TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), + Mime: ret.Mime, }, Chunks: []*filer_pb.FileChunk{{ FileId: fileId, @@ -194,8 +195,10 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w ETag: ret.ETag, }}, } - if ext := filenamePath.Ext(path); ext != "" { - entry.Attr.Mime = mime.TypeByExtension(ext) + if entry.Attr.Mime == "" { + if ext := filenamePath.Ext(path); ext != "" { + entry.Attr.Mime = mime.TypeByExtension(ext) + } } // glog.V(4).Infof("saving %s => %+v", path, entry) if dbErr := fs.filer.CreateEntry(ctx, entry, false); dbErr != nil { diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index b6a242641..101be4c43 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -68,6 +68,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { } ret.Size = uint32(originalSize) ret.ETag = needle.Etag() + ret.Mime = string(needle.Mime) setEtag(w, ret.ETag) writeJsonQuiet(w, r, httpStatus, ret) } From 9228ff192cb5c28833cfd4c3a9bb7828924425ee Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 11:08:57 -0800 Subject: [PATCH 0201/2432] encryption works --- unmaintained/repeated_vacuum/repeated_vacuum.go | 2 +- weed/operation/upload_content.go | 2 +- weed/server/filer_server_handlers_write_cipher.go | 8 +------- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 4a0464eda..ebe5d8225 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -35,7 +35,7 @@ func main() { targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) - _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), false, reader, false, "", nil, assignResult.Auth) + _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), false, reader, false, "bench/test", nil, assignResult.Auth) if err != nil { log.Fatalf("upload: %v", err) } diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index a6dde973f..0ea39e306 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -62,7 +62,7 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, contentIsGzipped := isGzipped shouldGzipNow := false if !isGzipped { - if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeZipped { + if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); mtype == "" || iAmSure && shouldBeZipped { shouldGzipNow = true contentIsGzipped = true } diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go index e65915971..bd2b52fb3 100644 --- a/weed/server/filer_server_handlers_write_cipher.go +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -33,21 +33,15 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024 pu, err := needle.ParseUpload(r, sizeLimit) - data := pu.Data uncompressedData := pu.Data - cipherKey := util.GenCipherKey() if pu.IsGzipped { uncompressedData = pu.UncompressedData - data, err = util.Encrypt(pu.UncompressedData, cipherKey) - if err != nil { - return nil, fmt.Errorf("encrypt input: %v", err) - } } if pu.MimeType == "" { pu.MimeType = http.DetectContentType(uncompressedData) } - uploadResult, uploadError := operation.Upload(urlLocation, pu.FileName, true, bytes.NewReader(data), pu.IsGzipped, "", pu.PairMap, auth) + uploadResult, uploadError := operation.Upload(urlLocation, pu.FileName, true, bytes.NewReader(uncompressedData), false, pu.MimeType, pu.PairMap, auth) if uploadError != nil { return nil, fmt.Errorf("upload to volume server: %v", uploadError) } From 936e7cdbfb02e719058fa341772f2f8afee2a9f1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 12:46:00 -0800 Subject: [PATCH 0202/2432] pass in filer.copy cipher option --- weed/command/filer_copy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index fa948249a..50a120875 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -413,7 +413,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, replication = assignResult.Replication } - uploadResult, err := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), false, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth)) + uploadResult, err := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth)) if err != nil { uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) return From 8645283a7b8a50485390267be9f83b83707f6161 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 16:51:46 -0800 Subject: [PATCH 0203/2432] fuse mount: avoid lookup nil entry fix https://github.com/chrislusf/seaweedfs/issues/1221 --- weed/filer2/filer_client_util.go | 4 ++-- weed/filesys/xattr.go | 9 +++---- weed/pb/filer_pb/filer_pb_helper.go | 24 +++++++++++++++++++ weed/replication/sink/filersink/filer_sink.go | 7 ++---- weed/s3api/filer_util.go | 11 +++++---- weed/s3api/s3api_bucket_handlers.go | 6 ++++- weed/shell/command_fs_cat.go | 6 +---- weed/shell/command_fs_meta_cat.go | 5 +--- weed/shell/command_fs_mv.go | 4 ++-- weed/shell/commands.go | 6 +---- 10 files changed, 47 insertions(+), 35 deletions(-) diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index e80c4bf36..04da240b6 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -102,9 +102,9 @@ func GetEntry(filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.E } // glog.V(3).Infof("read %s request: %v", fullFilePath, request) - resp, err := client.LookupDirectoryEntry(context.Background(), request) + resp, err := filer_pb.LookupEntry(client, request) if err != nil { - if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { + if err == ErrNotFound { return nil } glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index 3ccecdf98..993e7ad93 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -1,9 +1,6 @@ package filesys import ( - "context" - "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -124,9 +121,9 @@ func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err err Directory: dir, } - resp, err := client.LookupDirectoryEntry(context.Background(), request) - if err != nil || resp == nil || resp.Entry == nil { - if err == filer2.ErrNotFound || strings.Contains(err.Error(), filer2.ErrNotFound.Error()) { + resp, err := filer_pb.LookupEntry(client, request) + if err != nil { + if err == filer2.ErrNotFound { glog.V(3).Infof("file attr read not found file %v: %v", request, err) return fuse.ENOENT } diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 408caadcd..b69a83354 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -3,7 +3,9 @@ package filer_pb import ( "context" "fmt" + "strings" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/needle" ) @@ -84,3 +86,25 @@ func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { } return nil } + +func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { + resp, err := filer_pb.LookupEntry(client, request) + if err != nil { + if err == filer2.ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { + return nil, filer2.ErrNotFound + } + glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Entry.Name, err) + return nil, fmt.Errorf("LookupEntry1: %v", err) + } + if resp.Error != "" && strings.Contains(resp.Error, ErrNotFound.Error()) { + return nil, filer2.ErrNotFound + } + if resp.Error != "" { + glog.V(3).Infof("lookup %s/%v: %v", request.Directory, request.Entry.Name, err) + return nil, fmt.Errorf("LookupEntry2: %v", err) + } + if resp.Entry == nil { + return nil, filer2.ErrNotFound + } + return resp, nil +} diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 6b82b90df..838c2c441 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -98,7 +98,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { Name: name, } glog.V(1).Infof("lookup: %v", lookupRequest) - if resp, err := client.LookupDirectoryEntry(context.Background(), lookupRequest); err == nil && resp.Entry != nil { + if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) { glog.V(0).Infof("already replicated %s", key) return nil @@ -148,14 +148,11 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent } glog.V(4).Infof("lookup entry: %v", request) - resp, err := client.LookupDirectoryEntry(context.Background(), request) + resp, err := filer_pb.LookupEntry(client, request) if err != nil { glog.V(0).Infof("lookup %s: %v", key, err) return err } - if resp.Entry == nil { - return filer2.ErrNotFound - } existingEntry = resp.Entry diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index b94b30a87..d43ca8e5d 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) @@ -202,15 +203,15 @@ func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isD } glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) - resp, err := client.LookupDirectoryEntry(context.Background(), request) + resp, err := filer_pb.LookupEntry(client, request) if err != nil { + if err == filer2.ErrNotFound { + exists = false + return nil + } glog.V(0).Infof("exists entry %v: %v", request, err) return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) } - if resp.Entry == nil { - exists = false - return nil - } exists = resp.Entry.IsDirectory == isDirectory diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index a40c6244c..c165ae2c4 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) @@ -117,7 +118,10 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request } glog.V(1).Infof("lookup bucket: %v", request) - if resp, err := client.LookupDirectoryEntry(context.Background(), request); err != nil || resp.Entry == nil { + if _, err := filer_pb.LookupEntry(client, request); err != nil { + if err == filer2.ErrNotFound { + return filer2.ErrNotFound + } return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err) } diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 8364e0de1..3db487979 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" "math" @@ -50,13 +49,10 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write Name: name, Directory: dir, } - respLookupEntry, err := client.LookupDirectoryEntry(context.Background(), request) + respLookupEntry, err := filer_pb.LookupEntry(client, request) if err != nil { return err } - if respLookupEntry.Entry == nil { - return fmt.Errorf("file not found: %s", path) - } return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt32) diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go index ec5a093df..52e2ee6c0 100644 --- a/weed/shell/command_fs_meta_cat.go +++ b/weed/shell/command_fs_meta_cat.go @@ -49,13 +49,10 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W Name: name, Directory: dir, } - respLookupEntry, err := client.LookupDirectoryEntry(context.Background(), request) + respLookupEntry, err := filer_pb.LookupEntry(client, request) if err != nil { return err } - if respLookupEntry.Entry == nil { - return fmt.Errorf("file not found: %s", path) - } m := jsonpb.Marshaler{ EmitDefaults: true, diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index 9b74e85e9..85275058e 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -58,12 +58,12 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer Name: destinationDir, Directory: destinationName, } - respDestinationLookupEntry, err := client.LookupDirectoryEntry(context.Background(), destinationRequest) + respDestinationLookupEntry, err := filer_pb.LookupEntry(client, destinationRequest) var targetDir, targetName string // moving a file or folder - if err == nil && respDestinationLookupEntry.Entry != nil && respDestinationLookupEntry.Entry.IsDirectory { + if err == nil && respDestinationLookupEntry.Entry.IsDirectory { // to a directory targetDir = filepath.ToSlash(filepath.Join(destinationDir, destinationName)) targetName = sourceName diff --git a/weed/shell/commands.go b/weed/shell/commands.go index 93a4c94bb..2239fa435 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -71,7 +71,7 @@ func (ce *CommandEnv) checkDirectory(filerServer string, filerPort int64, path s return ce.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { - resp, lookupErr := client.LookupDirectoryEntry(context.Background(), &filer_pb.LookupDirectoryEntryRequest{ + resp, lookupErr := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, Name: name, }) @@ -79,10 +79,6 @@ func (ce *CommandEnv) checkDirectory(filerServer string, filerPort int64, path s return lookupErr } - if resp.Entry == nil { - return fmt.Errorf("entry not found") - } - if !resp.Entry.IsDirectory { return fmt.Errorf("not a directory") } From afb20de14cd597c4651a409ac3129f854f6bd1c5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 17:01:39 -0800 Subject: [PATCH 0204/2432] breaks dependency loop --- .../filer2/abstract_sql/abstract_sql_store.go | 3 ++- weed/filer2/cassandra/cassandra_store.go | 9 +++++--- weed/filer2/etcd/etcd_store.go | 6 ++++-- weed/filer2/filer.go | 3 ++- weed/filer2/filer_client_util.go | 2 +- weed/filer2/filerstore.go | 3 --- weed/filer2/leveldb/leveldb_store.go | 3 ++- weed/filer2/leveldb2/leveldb2_store.go | 3 ++- weed/filer2/redis/universal_redis_store.go | 3 ++- weed/filer2/tikv/tikv_store.go | 3 ++- weed/filesys/xattr.go | 2 +- weed/pb/filer_pb/filer_pb_helper.go | 21 +++++++------------ weed/s3api/filer_util.go | 3 +-- weed/s3api/s3api_bucket_handlers.go | 5 ++--- weed/server/filer_grpc_server.go | 2 +- weed/server/filer_server_handlers_read.go | 3 ++- weed/server/filer_server_handlers_write.go | 2 +- weed/shell/command_fs_meta_cat.go | 1 - weed/shell/commands.go | 1 - 19 files changed, 39 insertions(+), 39 deletions(-) diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index 47fe507a1..864c858d3 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -7,6 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -104,7 +105,7 @@ func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.Fu row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir) var data []byte if err := row.Scan(&data); err != nil { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } entry := &filer2.Entry{ diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index f81ef946f..6f25fffec 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -3,10 +3,13 @@ package cassandra import ( "context" "fmt" + + "github.com/gocql/gocql" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gocql/gocql" ) func init() { @@ -80,12 +83,12 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.Full "SELECT meta FROM filemeta WHERE directory=? AND name=?", dir, name).Consistency(gocql.One).Scan(&data); err != nil { if err != gocql.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } } if len(data) == 0 { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } entry = &filer2.Entry{ diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer2/etcd/etcd_store.go index 0f0c01426..83a6ddc5d 100644 --- a/weed/filer2/etcd/etcd_store.go +++ b/weed/filer2/etcd/etcd_store.go @@ -6,10 +6,12 @@ import ( "strings" "time" + "go.etcd.io/etcd/clientv3" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" - "go.etcd.io/etcd/clientv3" ) const ( @@ -99,7 +101,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) } if len(resp.Kvs) == 0 { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } entry = &filer2.Entry{ diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index cbb14a5c1..0b6a5c96e 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -13,6 +13,7 @@ import ( "github.com/karlseguin/ccache" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" ) @@ -126,7 +127,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) mkdirErr := f.store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { - if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound { + if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == filer_pb.ErrNotFound { glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index 04da240b6..98bb57779 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -104,7 +104,7 @@ func GetEntry(filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.E // glog.V(3).Infof("read %s request: %v", fullFilePath, request) resp, err := filer_pb.LookupEntry(client, request) if err != nil { - if err == ErrNotFound { + if err == filer_pb.ErrNotFound { return nil } glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go index ae25534ed..f724f79c2 100644 --- a/weed/filer2/filerstore.go +++ b/weed/filer2/filerstore.go @@ -2,7 +2,6 @@ package filer2 import ( "context" - "errors" "time" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -28,8 +27,6 @@ type FilerStore interface { RollbackTransaction(ctx context.Context) error } -var ErrNotFound = errors.New("filer: no entry is found in filer store") - type FilerStoreWrapper struct { actualStore FilerStore } diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 44e6ac0eb..807fcb56f 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -11,6 +11,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" ) @@ -94,7 +95,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPa data, err := store.db.Get(key, nil) if err == leveldb.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go index 358d4d92a..0b07c6833 100644 --- a/weed/filer2/leveldb2/leveldb2_store.go +++ b/weed/filer2/leveldb2/leveldb2_store.go @@ -14,6 +14,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" ) @@ -104,7 +105,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullP data, err := store.dbs[partitionId].Get(key, nil) if err == leveldb.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index 2162be733..c5b9d9416 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -11,6 +11,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) const ( @@ -64,7 +65,7 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2 data, err := store.Client.Get(string(fullpath)).Result() if err == redis.Nil { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { diff --git a/weed/filer2/tikv/tikv_store.go b/weed/filer2/tikv/tikv_store.go index 24e05e3ad..accd4f169 100644 --- a/weed/filer2/tikv/tikv_store.go +++ b/weed/filer2/tikv/tikv_store.go @@ -12,6 +12,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" "github.com/pingcap/tidb/kv" @@ -110,7 +111,7 @@ func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) data, err := store.getTx(ctx).Get(ctx, key) if err == kv.ErrNotExist { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index 993e7ad93..af154a7ee 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -123,7 +123,7 @@ func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err err resp, err := filer_pb.LookupEntry(client, request) if err != nil { - if err == filer2.ErrNotFound { + if err == filer_pb.ErrNotFound { glog.V(3).Infof("file attr read not found file %v: %v", request, err) return fuse.ENOENT } diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index b69a83354..96ab2154f 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -2,10 +2,10 @@ package filer_pb import ( "context" + "errors" "fmt" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/needle" ) @@ -88,23 +88,18 @@ func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { } func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { - resp, err := filer_pb.LookupEntry(client, request) + resp, err := client.LookupDirectoryEntry(context.Background(), request) if err != nil { - if err == filer2.ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { - return nil, filer2.ErrNotFound + if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { + return nil, ErrNotFound } - glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Entry.Name, err) + glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err) return nil, fmt.Errorf("LookupEntry1: %v", err) } - if resp.Error != "" && strings.Contains(resp.Error, ErrNotFound.Error()) { - return nil, filer2.ErrNotFound - } - if resp.Error != "" { - glog.V(3).Infof("lookup %s/%v: %v", request.Directory, request.Entry.Name, err) - return nil, fmt.Errorf("LookupEntry2: %v", err) - } if resp.Entry == nil { - return nil, filer2.ErrNotFound + return nil, ErrNotFound } return resp, nil } + +var ErrNotFound = errors.New("filer: no entry is found in filer store") diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index d43ca8e5d..ec1eedcb4 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -8,7 +8,6 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) @@ -205,7 +204,7 @@ func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isD glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) resp, err := filer_pb.LookupEntry(client, request) if err != nil { - if err == filer2.ErrNotFound { + if err == filer_pb.ErrNotFound { exists = false return nil } diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index c165ae2c4..3e5089bed 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/gorilla/mux" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) @@ -119,8 +118,8 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request glog.V(1).Infof("lookup bucket: %v", request) if _, err := filer_pb.LookupEntry(client, request); err != nil { - if err == filer2.ErrNotFound { - return filer2.ErrNotFound + if err == filer_pb.ErrNotFound { + return filer_pb.ErrNotFound } return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err) } diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index b000bf3a6..b904c1393 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -19,7 +19,7 @@ import ( func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) - if err == filer2.ErrNotFound { + if err == filer_pb.ErrNotFound { return &filer_pb.LookupDirectoryEntryResponse{}, nil } if err != nil { diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index ca687ca5d..1864a26d1 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -15,6 +15,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -33,7 +34,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, fs.listDirectoryHandler(w, r) return } - if err == filer2.ErrNotFound { + if err == filer_pb.ErrNotFound { glog.V(1).Infof("Not found %s: %v", path, err) stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc() w.WriteHeader(http.StatusNotFound) diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 01f46a53b..be6bb40f2 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -307,7 +307,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { if err != nil { glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error()) httpStatus := http.StatusInternalServerError - if err == filer2.ErrNotFound { + if err == filer_pb.ErrNotFound { httpStatus = http.StatusNotFound } writeJsonError(w, r, httpStatus, err) diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go index 52e2ee6c0..cd1ffb6fd 100644 --- a/weed/shell/command_fs_meta_cat.go +++ b/weed/shell/command_fs_meta_cat.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" diff --git a/weed/shell/commands.go b/weed/shell/commands.go index 2239fa435..b8832ad93 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -1,7 +1,6 @@ package shell import ( - "context" "fmt" "io" "net/url" From 1a75fbfce05e580f70e5a4a68a7d748c6e2a25e6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 18:06:48 -0800 Subject: [PATCH 0205/2432] refactoring --- weed/server/filer_server_handlers_read.go | 14 +++- weed/server/volume_server_handlers_read.go | 90 ++-------------------- 2 files changed, 15 insertions(+), 89 deletions(-) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 1864a26d1..232609e6e 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -182,11 +182,18 @@ func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Reque totalSize := int64(filer2.TotalSize(entry.Chunks)) + processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + return fs.writeContent(writer, entry, offset, int(size)) + }) + +} + +func processRangeRequst(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { rangeReq := r.Header.Get("Range") if rangeReq == "" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if err := fs.writeContent(w, entry, 0, int(totalSize)); err != nil { + if err := writeFn(w, 0, totalSize); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -227,7 +234,7 @@ func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Reque w.Header().Set("Content-Range", ra.contentRange(totalSize)) w.WriteHeader(http.StatusPartialContent) - err = fs.writeContent(w, entry, ra.start, int(ra.length)) + err = writeFn(w, ra.start, ra.length) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -255,7 +262,7 @@ func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Reque pw.CloseWithError(e) return } - if e = fs.writeContent(part, entry, ra.start, int(ra.length)); e != nil { + if e = writeFn(part, ra.start, ra.length); e != nil { pw.CloseWithError(e) return } @@ -271,7 +278,6 @@ func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Reque http.Error(w, "Internal Error", http.StatusInternalServerError) return } - } func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error { diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 40a951a74..99cb1e3da 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "mime" - "mime/multipart" "net/http" "net/url" "path" @@ -248,92 +247,13 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) return nil } - rangeReq := r.Header.Get("Range") - if rangeReq == "" { - w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if _, e = rs.Seek(0, 0); e != nil { - return e - } - _, e = io.Copy(w, rs) - return e - } - //the rest is dealing with partial content request - //mostly copy from src/pkg/net/http/fs.go - ranges, err := parseRange(rangeReq, totalSize) - if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return nil - } - if sumRangesSize(ranges) > totalSize { - // The total number of bytes in all the ranges - // is larger than the size of the file by - // itself, so this is probably an attack, or a - // dumb client. Ignore the range request. - return nil - } - if len(ranges) == 0 { - return nil - } - if len(ranges) == 1 { - // RFC 2616, Section 14.16: - // "When an HTTP message includes the content of a single - // range (for example, a response to a request for a - // single range, or to a request for a set of ranges - // that overlap without any holes), this content is - // transmitted with a Content-Range header, and a - // Content-Length header showing the number of bytes - // actually transferred. - // ... - // A response to a request for a single range MUST NOT - // be sent using the multipart/byteranges media type." - ra := ranges[0] - w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) - w.Header().Set("Content-Range", ra.contentRange(totalSize)) - w.WriteHeader(http.StatusPartialContent) - if _, e = rs.Seek(ra.start, 0); e != nil { + processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + if _, e = rs.Seek(offset, 0); e != nil { return e } - - _, e = io.CopyN(w, rs, ra.length) + _, e = io.CopyN(writer, rs, size) return e - } - // process multiple ranges - for _, ra := range ranges { - if ra.start > totalSize { - http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return nil - } - } - sendSize := rangesMIMESize(ranges, mimeType, totalSize) - pr, pw := io.Pipe() - mw := multipart.NewWriter(pw) - w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) - sendContent := pr - defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. - go func() { - for _, ra := range ranges { - part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) - if e != nil { - pw.CloseWithError(e) - return - } - if _, e = rs.Seek(ra.start, 0); e != nil { - pw.CloseWithError(e) - return - } - if _, e = io.CopyN(part, rs, ra.length); e != nil { - pw.CloseWithError(e) - return - } - } - mw.Close() - pw.Close() - }() - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - w.WriteHeader(http.StatusPartialContent) - _, e = io.CopyN(w, sendContent, sendSize) - return e + }) + return nil } From 11fceaf2f7b3378c0e81b607b2600b2d0537d7fb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 7 Mar 2020 18:09:30 -0800 Subject: [PATCH 0206/2432] refactoring --- weed/server/filer_server_handlers_read.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 232609e6e..17842f4d3 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -183,7 +183,7 @@ func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Reque totalSize := int64(filer2.TotalSize(entry.Chunks)) processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { - return fs.writeContent(writer, entry, offset, int(size)) + return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, int(size)) }) } @@ -279,9 +279,3 @@ func processRangeRequst(r *http.Request, w http.ResponseWriter, totalSize int64, return } } - -func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error { - - return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, size) - -} From 9b3109a5d88dc4bc3353dae0dff95f3a7890c691 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Mar 2020 15:42:44 -0700 Subject: [PATCH 0207/2432] filer: processing all response headers, no pass through to volume server * filer calculate MD5 etag * filer handle response headers, instread of pass it to volume servers --- .../templates/filer-statefulset.yaml | 3 - k8s/seaweedfs/values.yaml | 3 - weed/command/filer.go | 3 - weed/command/server.go | 5 - weed/s3api/s3api_object_handlers.go | 1 - weed/server/common.go | 106 +++++++++ weed/server/filer_server.go | 1 - weed/server/filer_server_handlers_read.go | 212 ++---------------- weed/server/filer_server_handlers_write.go | 13 +- .../filer_server_handlers_write_autochunk.go | 8 +- weed/server/volume_server_handlers_read.go | 26 +-- weed/storage/needle/crc.go | 14 +- 12 files changed, 141 insertions(+), 254 deletions(-) diff --git a/k8s/seaweedfs/templates/filer-statefulset.yaml b/k8s/seaweedfs/templates/filer-statefulset.yaml index 6ee57edf7..43da74c43 100644 --- a/k8s/seaweedfs/templates/filer-statefulset.yaml +++ b/k8s/seaweedfs/templates/filer-statefulset.yaml @@ -99,9 +99,6 @@ spec: {{- end }} filer \ -port={{ .Values.filer.port }} \ - {{- if .Values.filer.redirectOnRead }} - -redirectOnRead \ - {{- end }} {{- if .Values.filer.disableHttp }} -disableHttp \ {{- end }} diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index 7afc2dacf..b65f7376b 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -177,9 +177,6 @@ filer: grpcPort: 18888 loggingOverrideLevel: null - # Whether proxy or redirect to volume server during file GET request - redirectOnRead: false - # Limit sub dir listing size (default 100000) dirListLimit: 100000 diff --git a/weed/command/filer.go b/weed/command/filer.go index 327ee8316..fb1ee2b0f 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -27,7 +27,6 @@ type FilerOptions struct { publicPort *int collection *string defaultReplicaPlacement *string - redirectOnRead *bool disableDirListing *bool maxMB *int dirListingLimit *int @@ -48,7 +47,6 @@ func init() { f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port") f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public") f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified") - f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") @@ -105,7 +103,6 @@ func (fo *FilerOptions) startFiler() { Masters: strings.Split(*fo.masters, ","), Collection: *fo.collection, DefaultReplication: *fo.defaultReplicaPlacement, - RedirectOnRead: *fo.redirectOnRead, DisableDirListing: *fo.disableDirListing, MaxMB: *fo.maxMB, DirListingLimit: *fo.dirListingLimit, diff --git a/weed/command/server.go b/weed/command/server.go index f45429193..560b90037 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -78,7 +78,6 @@ func init() { filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port") filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port") filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.") - filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") @@ -115,10 +114,6 @@ func runServer(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - if *filerOptions.redirectOnRead { - *isStartingFiler = true - } - if *isStartingS3 { *isStartingFiler = true } diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index c14bfa2cb..9d03cdbe3 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -223,7 +223,6 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des proxyReq.Header.Set("Host", s3a.option.Filer) proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) - proxyReq.Header.Set("Etag-MD5", "True") for header, values := range r.Header { for _, value := range values { diff --git a/weed/server/common.go b/weed/server/common.go index f88533c24..5904e2c47 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -5,6 +5,8 @@ import ( "encoding/json" "errors" "fmt" + "io" + "mime/multipart" "net/http" "path/filepath" "strconv" @@ -210,3 +212,107 @@ func handleStaticResources2(r *mux.Router) { r.Handle("/favicon.ico", http.FileServer(statikFS)) r.PathPrefix("/seaweedfsstatic/").Handler(http.StripPrefix("/seaweedfsstatic", http.FileServer(statikFS))) } + +func adjustHeadersAfterHEAD(w http.ResponseWriter, r *http.Request, filename string) { + if filename != "" { + contentDisposition := "inline" + if r.FormValue("dl") != "" { + if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl { + contentDisposition = "attachment" + } + } + w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`) + } +} + +func processRangeRequst(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { + rangeReq := r.Header.Get("Range") + + if rangeReq == "" { + w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) + if err := writeFn(w, 0, totalSize); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return + } + + //the rest is dealing with partial content request + //mostly copy from src/pkg/net/http/fs.go + ranges, err := parseRange(rangeReq, totalSize) + if err != nil { + http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) + return + } + if sumRangesSize(ranges) > totalSize { + // The total number of bytes in all the ranges + // is larger than the size of the file by + // itself, so this is probably an attack, or a + // dumb client. Ignore the range request. + return + } + if len(ranges) == 0 { + return + } + if len(ranges) == 1 { + // RFC 2616, Section 14.16: + // "When an HTTP message includes the content of a single + // range (for example, a response to a request for a + // single range, or to a request for a set of ranges + // that overlap without any holes), this content is + // transmitted with a Content-Range header, and a + // Content-Length header showing the number of bytes + // actually transferred. + // ... + // A response to a request for a single range MUST NOT + // be sent using the multipart/byteranges media type." + ra := ranges[0] + w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) + w.Header().Set("Content-Range", ra.contentRange(totalSize)) + w.WriteHeader(http.StatusPartialContent) + + err = writeFn(w, ra.start, ra.length) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return + } + + // process multiple ranges + for _, ra := range ranges { + if ra.start > totalSize { + http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) + return + } + } + sendSize := rangesMIMESize(ranges, mimeType, totalSize) + pr, pw := io.Pipe() + mw := multipart.NewWriter(pw) + w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) + sendContent := pr + defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. + go func() { + for _, ra := range ranges { + part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) + if e != nil { + pw.CloseWithError(e) + return + } + if e = writeFn(part, ra.start, ra.length); e != nil { + pw.CloseWithError(e) + return + } + } + mw.Close() + pw.Close() + }() + if w.Header().Get("Content-Encoding") == "" { + w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) + } + w.WriteHeader(http.StatusPartialContent) + if _, err := io.CopyN(w, sendContent, sendSize); err != nil { + http.Error(w, "Internal Error", http.StatusInternalServerError) + return + } +} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 4c493b6b3..70da9094b 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -37,7 +37,6 @@ type FilerOption struct { Masters []string Collection string DefaultReplication string - RedirectOnRead bool DisableDirListing bool MaxMB int DirListingLimit int diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 17842f4d3..5967535b8 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -2,14 +2,10 @@ package weed_server import ( "context" - "fmt" "io" - "io/ioutil" "mime" - "mime/multipart" "net/http" - "net/url" - "path" + "path/filepath" "strconv" "strings" @@ -17,7 +13,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) { @@ -68,118 +63,30 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, } w.Header().Set("Accept-Ranges", "bytes") - if r.Method == "HEAD" { - w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) - w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) - if entry.Attr.Mime != "" { - w.Header().Set("Content-Type", entry.Attr.Mime) - } - setEtag(w, filer2.ETag(entry.Chunks)) - return - } - - if len(entry.Chunks) == 1 { - fs.handleSingleChunk(w, r, entry) - return - } - - fs.handleMultipleChunks(w, r, entry) - -} - -func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { - - fileId := entry.Chunks[0].GetFileIdString() - - urlString, err := fs.filer.MasterClient.LookupFileId(fileId) - if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err) - w.WriteHeader(http.StatusNotFound) - return - } - - if fs.option.RedirectOnRead && entry.Chunks[0].CipherKey == nil { - stats.FilerRequestCounter.WithLabelValues("redirect").Inc() - http.Redirect(w, r, urlString, http.StatusFound) - return - } - - u, _ := url.Parse(urlString) - q := u.Query() - for key, values := range r.URL.Query() { - for _, value := range values { - q.Add(key, value) - } - } - u.RawQuery = q.Encode() - request := &http.Request{ - Method: r.Method, - URL: u, - Proto: r.Proto, - ProtoMajor: r.ProtoMajor, - ProtoMinor: r.ProtoMinor, - Header: r.Header, - Body: r.Body, - Host: r.Host, - ContentLength: r.ContentLength, - } - glog.V(3).Infoln("retrieving from", u) - resp, do_err := util.Do(request) - if do_err != nil { - glog.V(0).Infoln("failing to connect to volume server", do_err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, do_err) - return - } - defer func() { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - for k, v := range resp.Header { - w.Header()[k] = v - } - if entry.Attr.Mime != "" { - w.Header().Set("Content-Type", entry.Attr.Mime) - } - if entry.Chunks[0].CipherKey == nil { - w.WriteHeader(resp.StatusCode) - io.Copy(w, resp.Body) - } else { - fs.writeEncryptedChunk(w, resp, entry) - } -} - -func (fs *FilerServer) writeEncryptedChunk(w http.ResponseWriter, resp *http.Response, entry *filer2.Entry) { - chunk := entry.Chunks[0] - encryptedData, err := ioutil.ReadAll(resp.Body) - if err != nil { - glog.V(1).Infof("read encrypted %s failed, err: %v", chunk.FileId, err) - w.WriteHeader(http.StatusNotFound) - return - } - decryptedData, err := util.Decrypt(encryptedData, util.CipherKey(chunk.CipherKey)) - if err != nil { - glog.V(1).Infof("decrypt %s failed, err: %v", chunk.FileId, err) - w.WriteHeader(http.StatusNotFound) - return - } - w.Header().Set("Content-Length", fmt.Sprintf("%d", chunk.Size)) - w.WriteHeader(resp.StatusCode) - w.Write(decryptedData) -} - -func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { + w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) + // mime type mimeType := entry.Attr.Mime if mimeType == "" { - if ext := path.Ext(entry.Name()); ext != "" { + if ext := filepath.Ext(entry.Name()); ext != "" { mimeType = mime.TypeByExtension(ext) } } if mimeType != "" { w.Header().Set("Content-Type", mimeType) } + + // set etag setEtag(w, filer2.ETag(entry.Chunks)) + if r.Method == "HEAD" { + w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) + return + } + + filename := entry.Name() + adjustHeadersAfterHEAD(w, r, filename) + totalSize := int64(filer2.TotalSize(entry.Chunks)) processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { @@ -188,94 +95,3 @@ func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Reque } -func processRangeRequst(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { - rangeReq := r.Header.Get("Range") - - if rangeReq == "" { - w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if err := writeFn(w, 0, totalSize); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - return - } - - //the rest is dealing with partial content request - //mostly copy from src/pkg/net/http/fs.go - ranges, err := parseRange(rangeReq, totalSize) - if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return - } - if sumRangesSize(ranges) > totalSize { - // The total number of bytes in all the ranges - // is larger than the size of the file by - // itself, so this is probably an attack, or a - // dumb client. Ignore the range request. - return - } - if len(ranges) == 0 { - return - } - if len(ranges) == 1 { - // RFC 2616, Section 14.16: - // "When an HTTP message includes the content of a single - // range (for example, a response to a request for a - // single range, or to a request for a set of ranges - // that overlap without any holes), this content is - // transmitted with a Content-Range header, and a - // Content-Length header showing the number of bytes - // actually transferred. - // ... - // A response to a request for a single range MUST NOT - // be sent using the multipart/byteranges media type." - ra := ranges[0] - w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) - w.Header().Set("Content-Range", ra.contentRange(totalSize)) - w.WriteHeader(http.StatusPartialContent) - - err = writeFn(w, ra.start, ra.length) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - return - } - - // process multiple ranges - for _, ra := range ranges { - if ra.start > totalSize { - http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return - } - } - sendSize := rangesMIMESize(ranges, mimeType, totalSize) - pr, pw := io.Pipe() - mw := multipart.NewWriter(pw) - w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) - sendContent := pr - defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. - go func() { - for _, ra := range ranges { - part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) - if e != nil { - pw.CloseWithError(e) - return - } - if e = writeFn(part, ra.start, ra.length); e != nil { - pw.CloseWithError(e) - return - } - } - mw.Close() - pw.Close() - }() - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - w.WriteHeader(http.StatusPartialContent) - if _, err := io.CopyN(w, sendContent, sendSize); err != nil { - http.Error(w, "Internal Error", http.StatusInternalServerError) - return - } -} diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index be6bb40f2..7fb48f838 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -2,6 +2,7 @@ package weed_server import ( "context" + "crypto/md5" "encoding/json" "errors" "fmt" @@ -220,6 +221,8 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }() ret = &operation.UploadResult{} + hash := md5.New() + var body = ioutil.NopCloser(io.TeeReader(r.Body, hash)) request := &http.Request{ Method: r.Method, @@ -228,7 +231,7 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se ProtoMajor: r.ProtoMajor, ProtoMinor: r.ProtoMinor, Header: r.Header, - Body: r.Body, + Body: body, Host: r.Host, ContentLength: r.ContentLength, } @@ -247,7 +250,7 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() }() - etag := resp.Header.Get("ETag") + respBody, raErr := ioutil.ReadAll(resp.Body) if raErr != nil { glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, raErr.Error()) @@ -255,6 +258,7 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se err = raErr return } + glog.V(4).Infoln("post result", string(respBody)) unmarshalErr := json.Unmarshal(respBody, &ret) if unmarshalErr != nil { @@ -282,9 +286,8 @@ func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth se return } } - if etag != "" { - ret.ETag = etag - } + // use filer calculated md5 ETag, instead of the volume server crc ETag + ret.ETag = fmt.Sprintf("%x", hash.Sum(nil)) return } diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 1c7891353..641a367b1 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -102,8 +102,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r } // upload the chunk to the volume server - chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) - uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, chunkName, "", fileId, auth) + uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, "", "", nil, auth) if uploadErr != nil { return nil, uploadErr } @@ -175,8 +174,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r return } -func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, - limitedReader io.Reader, fileName string, contentType string, fileId string, auth security.EncodedJwt) (*operation.UploadResult, error) { +func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error) { stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc() start := time.Now() @@ -184,5 +182,5 @@ func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *ht stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) }() - return operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, nil, auth) + return operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth) } diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 99cb1e3da..6e603d158 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -9,7 +9,7 @@ import ( "mime" "net/http" "net/url" - "path" + "path/filepath" "strconv" "strings" "time" @@ -111,11 +111,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) w.WriteHeader(http.StatusNotModified) return } - if r.Header.Get("ETag-MD5") == "True" { - setEtag(w, n.MD5()) - } else { - setEtag(w, n.Etag()) - } + setEtag(w, n.Etag()) if n.HasPairs() { pairMap := make(map[string]string) @@ -135,7 +131,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) if n.NameSize > 0 && filename == "" { filename = string(n.Name) if ext == "" { - ext = path.Ext(filename) + ext = filepath.Ext(filename) } } mtype := "" @@ -179,7 +175,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, fileName = chunkManifest.Name } - ext := path.Ext(fileName) + ext := filepath.Ext(fileName) mType := "" if chunkManifest.Mime != "" { @@ -226,28 +222,22 @@ func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext strin func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error { totalSize, e := rs.Seek(0, 2) if mimeType == "" { - if ext := path.Ext(filename); ext != "" { + if ext := filepath.Ext(filename); ext != "" { mimeType = mime.TypeByExtension(ext) } } if mimeType != "" { w.Header().Set("Content-Type", mimeType) } - if filename != "" { - contentDisposition := "inline" - if r.FormValue("dl") != "" { - if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl { - contentDisposition = "attachment" - } - } - w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`) - } w.Header().Set("Accept-Ranges", "bytes") + if r.Method == "HEAD" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) return nil } + adjustHeadersAfterHEAD(w, r, filename) + processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { if _, e = rs.Seek(offset, 0); e != nil { return e diff --git a/weed/storage/needle/crc.go b/weed/storage/needle/crc.go index 00ea1db69..6fd910bb7 100644 --- a/weed/storage/needle/crc.go +++ b/weed/storage/needle/crc.go @@ -1,11 +1,11 @@ package needle import ( - "crypto/md5" "fmt" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/klauspost/crc32" + + "github.com/chrislusf/seaweedfs/weed/util" ) var table = crc32.MakeTable(crc32.Castagnoli) @@ -29,13 +29,3 @@ func (n *Needle) Etag() string { util.Uint32toBytes(bits, uint32(n.Checksum)) return fmt.Sprintf("%x", bits) } - -func (n *Needle) MD5() string { - - hash := md5.New() - - hash.Write(n.Data) - - return fmt.Sprintf("%x", hash.Sum(nil)) - -} From 1dc30214cbaca4a93285270a29ff77cd3d1f728f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Mar 2020 17:02:47 -0700 Subject: [PATCH 0208/2432] mark encryptVolumeData as not ready --- weed/command/filer.go | 2 +- weed/command/server.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/command/filer.go b/weed/command/filer.go index fb1ee2b0f..b027686b5 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -52,7 +52,7 @@ func init() { f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") - f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") + f.cipher = cmdFiler.Flag.Bool("work_in_progress_encryptVolumeData", false, " encrypt data on volume servers") } var cmdFiler = &Command{ diff --git a/weed/command/server.go b/weed/command/server.go index 560b90037..ea2844db9 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -81,7 +81,7 @@ func init() { filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") - filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") + filerOptions.cipher = cmdServer.Flag.Bool("filer.work_in_progress_encryptVolumeData", false, " encrypt data on volume servers") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") From e04c1a13614ee0e4ef5e8ba6a8fe07b36b14827b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Mar 2020 17:03:27 -0700 Subject: [PATCH 0209/2432] filer: remove chunk manifest file support This is not needed for filer --- weed/server/filer_server_handlers_write.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 7fb48f838..997e64d07 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -113,17 +113,6 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) u, _ := url.Parse(urlLocation) - - // This allows a client to generate a chunk manifest and submit it to the filer -- it is a little off - // because they need to provide FIDs instead of file paths... - cm, _ := strconv.ParseBool(query.Get("cm")) - if cm { - q := u.Query() - q.Set("cm", "true") - u.RawQuery = q.Encode() - } - glog.V(4).Infoln("post to", u) - ret, err := fs.uploadToVolumeServer(r, u, auth, w, fileId) if err != nil { return From 5ac6297c685a3bd6c9b8a3d0f2328dde01f7013a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Mar 2020 17:03:37 -0700 Subject: [PATCH 0210/2432] adjust parameter names --- weed/operation/upload_content.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 0ea39e306..e27ff4da7 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -43,25 +43,25 @@ func init() { var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") // Upload sends a POST request to a volume server to upload the content with adjustable compression level -func UploadWithLocalCompressionLevel(uploadUrl string, filename string, cipher bool, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) { +func UploadWithLocalCompressionLevel(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) { if compressionLevel < 1 { compressionLevel = 1 } if compressionLevel > 9 { compressionLevel = 9 } - return doUpload(uploadUrl, filename, cipher, reader, isGzipped, mtype, pairMap, compressionLevel, jwt) + return doUpload(uploadUrl, filename, cipher, reader, isInputGzipped, mtype, pairMap, compressionLevel, jwt) } // Upload sends a POST request to a volume server to upload the content with fast compression -func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - return doUpload(uploadUrl, filename, cipher, reader, isGzipped, mtype, pairMap, flate.BestSpeed, jwt) +func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { + return doUpload(uploadUrl, filename, cipher, reader, isInputGzipped, mtype, pairMap, flate.BestSpeed, jwt) } -func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) { - contentIsGzipped := isGzipped +func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) { + contentIsGzipped := isInputGzipped shouldGzipNow := false - if !isGzipped { + if !isInputGzipped { if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); mtype == "" || iAmSure && shouldBeZipped { shouldGzipNow = true contentIsGzipped = true From 2e3f6ad3a97bc7fad349e63289695547f92c1f8b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Mar 2020 21:39:33 -0700 Subject: [PATCH 0211/2432] filer: remember content is gzipped or not --- other/java/client/src/main/proto/filer.proto | 1 + .../repeated_vacuum/repeated_vacuum.go | 4 +- weed/command/filer_copy.go | 12 +- weed/filer2/filechunks.go | 14 +- weed/filer2/filer_client_util.go | 2 +- weed/filer2/stream.go | 3 +- weed/filesys/dir.go | 2 - weed/filesys/dirty_page.go | 1 + weed/operation/submit.go | 9 +- weed/operation/upload_content.go | 166 ++++++++++--- weed/pb/filer.proto | 1 + weed/pb/filer_pb/filer.pb.go | 229 +++++++++--------- weed/pb/master_pb/master.pb.go | 20 +- weed/pb/volume_server_pb/volume_server.pb.go | 26 +- weed/replication/sink/azuresink/azure_sink.go | 2 +- weed/replication/sink/b2sink/b2_sink.go | 2 +- .../replication/sink/filersink/fetch_write.go | 1 + weed/replication/sink/gcssink/gcs_sink.go | 2 +- weed/replication/sink/s3sink/s3_write.go | 2 +- weed/server/common.go | 3 +- .../filer_server_handlers_write_autochunk.go | 8 +- .../filer_server_handlers_write_cipher.go | 10 +- weed/server/webdav_server.go | 5 +- weed/topology/store_replicate.go | 3 +- weed/util/http_util.go | 16 +- 25 files changed, 335 insertions(+), 209 deletions(-) diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 5983c84d8..8df46e917 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -100,6 +100,7 @@ message FileChunk { FileId fid = 7; FileId source_fid = 8; bytes cipher_key = 9; + bool is_gzipped = 10; } message FileId { diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index ebe5d8225..c2f626f72 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -1,7 +1,6 @@ package main import ( - "bytes" "flag" "fmt" "log" @@ -31,11 +30,10 @@ func main() { data := make([]byte, 1024) rand.Read(data) - reader := bytes.NewReader(data) targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) - _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), false, reader, false, "bench/test", nil, assignResult.Auth) + _, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth) if err != nil { log.Fatalf("upload: %v", err) } diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 50a120875..1162bb204 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -38,7 +38,6 @@ type CopyOptions struct { masterClient *wdclient.MasterClient concurrenctFiles *int concurrenctChunks *int - compressionLevel *int grpcDialOption grpc.DialOption masters []string cipher bool @@ -54,7 +53,6 @@ func init() { copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit") copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines") copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file") - copy.compressionLevel = cmdCopy.Flag.Int("compressionLevel", 9, "local file compression level 1 ~ 9") } var cmdCopy = &Command{ @@ -270,6 +268,10 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err // upload the file content fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) + data, err := ioutil.ReadAll(f) + if err != nil { + return err + } var chunks []*filer_pb.FileChunk var assignResult *filer_pb.AssignVolumeResponse @@ -303,7 +305,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId - uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, worker.options.cipher, f, false, mimeType, nil, security.EncodedJwt(assignResult.Auth), *worker.options.compressionLevel) + uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth)) if err != nil { return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } @@ -317,8 +319,9 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err Offset: 0, Size: uint64(uploadResult.Size), Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + ETag: uploadResult.Md5, CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, }) fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) @@ -429,6 +432,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, Mtime: time.Now().UnixNano(), ETag: uploadResult.ETag, CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, } fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) }(i) diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go index 98a965337..711488df1 100644 --- a/weed/filer2/filechunks.go +++ b/weed/filer2/filechunks.go @@ -72,6 +72,7 @@ type ChunkView struct { LogicOffset int64 IsFullChunk bool CipherKey []byte + isGzipped bool } func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) { @@ -87,6 +88,7 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int stop := offset + int64(size) for _, chunk := range visibles { + if chunk.start <= offset && offset < chunk.stop && offset < stop { isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop views = append(views, &ChunkView{ @@ -96,6 +98,7 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int LogicOffset: offset, IsFullChunk: isFullChunk, CipherKey: chunk.cipherKey, + isGzipped: chunk.isGzipped, }) offset = min(chunk.stop, stop) } @@ -122,7 +125,7 @@ var bufPool = sync.Pool{ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval { - newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, true, chunk.CipherKey) + newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, true, chunk.CipherKey, chunk.IsGzipped) length := len(visibles) if length == 0 { @@ -136,11 +139,11 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb. logPrintf(" before", visibles) for _, v := range visibles { if v.start < chunk.Offset && chunk.Offset < v.stop { - newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, false, v.cipherKey)) + newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, false, v.cipherKey, v.isGzipped)) } chunkStop := chunk.Offset + int64(chunk.Size) if v.start < chunkStop && chunkStop < v.stop { - newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, false, v.cipherKey)) + newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, false, v.cipherKey, v.isGzipped)) } if chunkStop <= v.start || v.stop <= chunk.Offset { newVisibles = append(newVisibles, v) @@ -171,6 +174,7 @@ func NonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []Vi var newVisibles []VisibleInterval for _, chunk := range chunks { + newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk) t := visibles[:0] visibles = newVisibles @@ -193,9 +197,10 @@ type VisibleInterval struct { fileId string isFullChunk bool cipherKey []byte + isGzipped bool } -func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool, cipherKey []byte) VisibleInterval { +func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool, cipherKey []byte, isGzipped bool) VisibleInterval { return VisibleInterval{ start: start, stop: stop, @@ -203,6 +208,7 @@ func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, is modifiedTime: modifiedTime, isFullChunk: isFullChunk, cipherKey: cipherKey, + isGzipped: isGzipped, } } diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index 98bb57779..1c1fa6a5b 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -70,7 +70,7 @@ func ReadIntoBuffer(filerClient FilerClient, fullFilePath FullPath, buff []byte, volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url) var n int64 - n, err = util.ReadUrl(fmt.Sprintf("http://%s/%s", volumeServerAddress, chunkView.FileId), chunkView.CipherKey, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)]) + n, err = util.ReadUrl(fmt.Sprintf("http://%s/%s", volumeServerAddress, chunkView.FileId), chunkView.CipherKey, chunkView.isGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)]) if err != nil { diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go index 0a1f943ea..381d99144 100644 --- a/weed/filer2/stream.go +++ b/weed/filer2/stream.go @@ -26,8 +26,9 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f } for _, chunkView := range chunkViews { + urlString := fileId2Url[chunkView.FileId] - err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) { + err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.isGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) { w.Write(data) }) if err != nil { diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 1b11ddb9e..483229b3f 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -35,8 +35,6 @@ var _ = fs.NodeForgetter(&Dir{}) func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { - glog.V(3).Infof("dir Attr %s, existing attr: %+v", dir.Path, attr) - // https://github.com/bazil/fuse/issues/196 attr.Valid = time.Second diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index ff3b8f885..7e33c97a7 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -191,6 +191,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, Mtime: time.Now().UnixNano(), ETag: uploadResult.ETag, CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, }, nil } diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 8c7ed5d7b..5e4dc4374 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -1,8 +1,6 @@ package operation import ( - "bytes" - "google.golang.org/grpc" "io" "mime" "net/url" @@ -11,6 +9,8 @@ import ( "strconv" "strings" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" ) @@ -52,7 +52,7 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart } ret, err := Assign(master, grpcDialOption, ar) if err != nil { - for index, _ := range files { + for index := range files { results[index].Error = err.Error() } return results, err @@ -214,12 +214,11 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s if e != nil { return e } - bufReader := bytes.NewReader(buf) glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...") u, _ := url.Parse(fileUrl) q := u.Query() q.Set("cm", "true") u.RawQuery = q.Encode() - _, e = Upload(u.String(), manifest.Name, false, bufReader, false, "application/json", nil, jwt) + _, e = UploadData(u.String(), manifest.Name, false, buf, false, "application/json", nil, jwt) return e } diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index e27ff4da7..a83317975 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -3,7 +3,7 @@ package operation import ( "bytes" "compress/flate" - "compress/gzip" + "crypto/md5" "encoding/json" "errors" "fmt" @@ -28,6 +28,8 @@ type UploadResult struct { ETag string `json:"eTag,omitempty"` CipherKey []byte `json:"cipherKey,omitempty"` Mime string `json:"mime,omitempty"` + Gzip uint32 `json:"gzip,omitempty"` + Md5 string `json:"md5,omitempty"` } var ( @@ -43,22 +45,28 @@ func init() { var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") // Upload sends a POST request to a volume server to upload the content with adjustable compression level -func UploadWithLocalCompressionLevel(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) { - if compressionLevel < 1 { - compressionLevel = 1 +func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + hash := md5.New() + hash.Write(data) + uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputGzipped, mtype, pairMap, jwt) + if uploadResult != nil { + uploadResult.Md5 = fmt.Sprintf("%x", hash.Sum(nil)) } - if compressionLevel > 9 { - compressionLevel = 9 - } - return doUpload(uploadUrl, filename, cipher, reader, isInputGzipped, mtype, pairMap, compressionLevel, jwt) + return } // Upload sends a POST request to a volume server to upload the content with fast compression -func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - return doUpload(uploadUrl, filename, cipher, reader, isInputGzipped, mtype, pairMap, flate.BestSpeed, jwt) +func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + hash := md5.New() + reader = io.TeeReader(reader, hash) + uploadResult, err = doUpload(uploadUrl, filename, cipher, reader, isInputGzipped, mtype, pairMap, flate.BestSpeed, jwt) + if uploadResult != nil { + uploadResult.Md5 = fmt.Sprintf("%x", hash.Sum(nil)) + } + return } -func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) { +func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { contentIsGzipped := isInputGzipped shouldGzipNow := false if !isInputGzipped { @@ -67,33 +75,131 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, contentIsGzipped = true } } - // encrypt data - var cipherKey util.CipherKey + var clearDataLen int - var err error + + // gzip if possible + // this could be double copying + clearDataLen = len(data) + if shouldGzipNow { + data, err = util.GzipData(data) + } else if isInputGzipped { + // just to get the clear data length + clearData, err := util.UnGzipData(data) + if err == nil { + clearDataLen = len(clearData) + } + } + if cipher { - cipherKey, reader, clearDataLen, _, err = util.EncryptReader(reader) - if err != nil { - return nil, err + // encrypt(gzip(data)) + + // encrypt + cipherKey := util.GenCipherKey() + encryptedData, encryptionErr := util.Encrypt(data, cipherKey) + if encryptionErr != nil { + err = fmt.Errorf("encrypt input: %v", encryptionErr) + return } + + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(encryptedData) + return + }, "", false, "", nil, jwt) + if uploadResult != nil { + uploadResult.Name = filename + uploadResult.Mime = mtype + uploadResult.CipherKey = cipherKey + } + } else { + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(data) + return + }, filename, contentIsGzipped, mtype, pairMap, jwt) + } + + uploadResult.Size = uint32(clearDataLen) + if contentIsGzipped { + uploadResult.Gzip = 1 } - // upload data - uploadResult, err := upload_content(uploadUrl, func(w io.Writer) (err error) { - if shouldGzipNow { - gzWriter, _ := gzip.NewWriterLevel(w, compression) - _, err = io.Copy(gzWriter, reader) - gzWriter.Close() - } else { - _, err = io.Copy(w, reader) + return uploadResult, err +} + +func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + contentIsGzipped := isInputGzipped + shouldGzipNow := false + if !isInputGzipped { + if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); mtype == "" || iAmSure && shouldBeZipped { + shouldGzipNow = true + contentIsGzipped = true + } + } + + var clearDataLen int + + // gzip if possible + // this could be double copying + data, readErr := ioutil.ReadAll(reader) + if readErr != nil { + err = fmt.Errorf("read input: %v", readErr) + return + } + clearDataLen = len(data) + if shouldGzipNow { + data, err = util.GzipData(data) + } else if isInputGzipped { + // just to get the clear data length + clearData, err := util.UnGzipData(data) + if err == nil { + clearDataLen = len(clearData) + } + } + + println("data size", clearDataLen) + + if cipher { + // encrypt(gzip(data)) + + // encrypt + cipherKey := util.GenCipherKey() + encryptedData, encryptionErr := util.Encrypt(data, cipherKey) + if encryptionErr != nil { + err = fmt.Errorf("encrypt input: %v", encryptionErr) + return } + println("encrypted data size", len(encryptedData)) + + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + n, err := w.Write(encryptedData) + println("writtern data size", n) + return + }, "", false, "", nil, jwt) + if uploadResult != nil { + uploadResult.Name = filename + uploadResult.Mime = mtype + uploadResult.CipherKey = cipherKey + uploadResult.Size = uint32(clearDataLen) + } + } else { + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + n, err := w.Write(data) + println("written data size", n) + return + }, filename, contentIsGzipped, mtype, pairMap, jwt) + } + + if uploadResult == nil { return - }, filename, contentIsGzipped, mtype, pairMap, jwt) + } - // remember cipher key - if uploadResult != nil && cipherKey != nil { - uploadResult.CipherKey = cipherKey - uploadResult.Size = uint32(clearDataLen) + uploadResult.Size = uint32(clearDataLen) + if contentIsGzipped { + uploadResult.Gzip = 1 } return uploadResult, err diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 5983c84d8..8df46e917 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -100,6 +100,7 @@ message FileChunk { FileId fid = 7; FileId source_fid = 8; bytes cipher_key = 9; + bool is_gzipped = 10; } message FileId { diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 92a12321c..9cf659ece 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -288,6 +288,7 @@ type FileChunk struct { Fid *FileId `protobuf:"bytes,7,opt,name=fid" json:"fid,omitempty"` SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid" json:"source_fid,omitempty"` CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"` + IsGzipped bool `protobuf:"varint,10,opt,name=is_gzipped,json=isGzipped" json:"is_gzipped,omitempty"` } func (m *FileChunk) Reset() { *m = FileChunk{} } @@ -358,6 +359,13 @@ func (m *FileChunk) GetCipherKey() []byte { return nil } +func (m *FileChunk) GetIsGzipped() bool { + if m != nil { + return m.IsGzipped + } + return false +} + type FileId struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` @@ -1610,114 +1618,115 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1742 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x6f, 0xdc, 0xc6, - 0x15, 0x37, 0xf7, 0x3f, 0xdf, 0xee, 0x3a, 0xd2, 0xac, 0x9c, 0xac, 0xd7, 0x92, 0xab, 0xd0, 0x75, - 0xaa, 0xc2, 0x86, 0x6a, 0xa8, 0x39, 0x24, 0x4d, 0x7b, 0xb0, 0x65, 0xb9, 0x30, 0x62, 0x3b, 0x2e, - 0x65, 0x17, 0x29, 0x0a, 0x94, 0xa0, 0xc8, 0xd9, 0xd5, 0x54, 0x24, 0x87, 0x19, 0x0e, 0x2d, 0xb9, - 0xdf, 0xa3, 0x97, 0x02, 0x3d, 0xf4, 0x7b, 0x14, 0xbd, 0x14, 0x05, 0xfa, 0x39, 0x7a, 0xec, 0xa1, - 0x9f, 0xa1, 0x98, 0x37, 0x24, 0x77, 0xb8, 0x5c, 0x49, 0x49, 0x8b, 0xdc, 0x38, 0xef, 0xdf, 0xbc, - 0xf9, 0xbd, 0xbf, 0xbb, 0x30, 0x9c, 0xb3, 0x88, 0x8a, 0xfd, 0x54, 0x70, 0xc9, 0xc9, 0x00, 0x0f, - 0x5e, 0x7a, 0xe2, 0x7c, 0x05, 0x77, 0x5e, 0x70, 0x7e, 0x96, 0xa7, 0x4f, 0x99, 0xa0, 0x81, 0xe4, - 0xe2, 0xfd, 0x51, 0x22, 0xc5, 0x7b, 0x97, 0x7e, 0x93, 0xd3, 0x4c, 0x92, 0x6d, 0xb0, 0xc3, 0x92, - 0x31, 0xb5, 0x76, 0xad, 0x3d, 0xdb, 0x5d, 0x12, 0x08, 0x81, 0x4e, 0xe2, 0xc7, 0x74, 0xda, 0x42, - 0x06, 0x7e, 0x3b, 0x47, 0xb0, 0xbd, 0xde, 0x60, 0x96, 0xf2, 0x24, 0xa3, 0xe4, 0x3e, 0x74, 0xa9, - 0x22, 0xa0, 0xb5, 0xe1, 0xc1, 0x07, 0xfb, 0xa5, 0x2b, 0xfb, 0x5a, 0x4e, 0x73, 0x9d, 0xbf, 0x59, - 0x40, 0x5e, 0xb0, 0x4c, 0x2a, 0x22, 0xa3, 0xd9, 0xb7, 0xf3, 0xe7, 0x43, 0xe8, 0xa5, 0x82, 0xce, - 0xd9, 0x45, 0xe1, 0x51, 0x71, 0x22, 0x0f, 0x61, 0x33, 0x93, 0xbe, 0x90, 0xcf, 0x04, 0x8f, 0x9f, - 0xb1, 0x88, 0xbe, 0x52, 0x4e, 0xb7, 0x51, 0xa4, 0xc9, 0x20, 0xfb, 0x40, 0x58, 0x12, 0x44, 0x79, - 0xc6, 0xde, 0xd1, 0xe3, 0x92, 0x3b, 0xed, 0xec, 0x5a, 0x7b, 0x03, 0x77, 0x0d, 0x87, 0x6c, 0x41, - 0x37, 0x62, 0x31, 0x93, 0xd3, 0xee, 0xae, 0xb5, 0x37, 0x76, 0xf5, 0xc1, 0xf9, 0x39, 0x4c, 0x6a, - 0xfe, 0x7f, 0xb7, 0xe7, 0xff, 0xb9, 0x05, 0x5d, 0x24, 0x54, 0x18, 0x5b, 0x4b, 0x8c, 0xc9, 0xc7, - 0x30, 0x62, 0x99, 0xb7, 0x04, 0xa2, 0x85, 0xbe, 0x0d, 0x59, 0x56, 0x61, 0x4e, 0x1e, 0x40, 0x2f, - 0x38, 0xcd, 0x93, 0xb3, 0x6c, 0xda, 0xde, 0x6d, 0xef, 0x0d, 0x0f, 0x26, 0xcb, 0x8b, 0xd4, 0x43, - 0x0f, 0x15, 0xcf, 0x2d, 0x44, 0xc8, 0x67, 0x00, 0xbe, 0x94, 0x82, 0x9d, 0xe4, 0x92, 0x66, 0xf8, - 0xd2, 0xe1, 0xc1, 0xd4, 0x50, 0xc8, 0x33, 0xfa, 0xb8, 0xe2, 0xbb, 0x86, 0x2c, 0xf9, 0x1c, 0x06, - 0xf4, 0x42, 0xd2, 0x24, 0xa4, 0xe1, 0xb4, 0x8b, 0x17, 0xed, 0xac, 0xbc, 0x68, 0xff, 0xa8, 0xe0, - 0xeb, 0xf7, 0x55, 0xe2, 0xb3, 0x2f, 0x60, 0x5c, 0x63, 0x91, 0x0d, 0x68, 0x9f, 0xd1, 0x32, 0xaa, - 0xea, 0x53, 0x21, 0xfb, 0xce, 0x8f, 0x72, 0x9d, 0x60, 0x23, 0x57, 0x1f, 0x7e, 0xd6, 0xfa, 0xcc, - 0x72, 0x9e, 0x82, 0xfd, 0x2c, 0x8f, 0xa2, 0x4a, 0x31, 0x64, 0xa2, 0x54, 0x0c, 0x99, 0x58, 0xa2, - 0xdc, 0xba, 0x12, 0xe5, 0xbf, 0x5a, 0xb0, 0x79, 0xf4, 0x8e, 0x26, 0xf2, 0x15, 0x97, 0x6c, 0xce, - 0x02, 0x5f, 0x32, 0x9e, 0x90, 0x87, 0x60, 0xf3, 0x28, 0xf4, 0xae, 0x0c, 0xd3, 0x80, 0x47, 0x85, - 0xd7, 0x0f, 0xc1, 0x4e, 0xe8, 0xb9, 0x77, 0xe5, 0x75, 0x83, 0x84, 0x9e, 0x6b, 0xe9, 0x7b, 0x30, - 0x0e, 0x69, 0x44, 0x25, 0xf5, 0xaa, 0xe8, 0xa8, 0xd0, 0x8d, 0x34, 0xf1, 0x50, 0x87, 0xe3, 0x13, - 0xf8, 0x40, 0x99, 0x4c, 0x7d, 0x41, 0x13, 0xe9, 0xa5, 0xbe, 0x3c, 0xc5, 0x98, 0xd8, 0xee, 0x38, - 0xa1, 0xe7, 0xaf, 0x91, 0xfa, 0xda, 0x97, 0xa7, 0xce, 0x1f, 0x5b, 0x60, 0x57, 0xc1, 0x24, 0x1f, - 0x41, 0x5f, 0x5d, 0xeb, 0xb1, 0xb0, 0x40, 0xa2, 0xa7, 0x8e, 0xcf, 0x43, 0x55, 0x15, 0x7c, 0x3e, - 0xcf, 0xa8, 0x44, 0xf7, 0xda, 0x6e, 0x71, 0x52, 0x99, 0x95, 0xb1, 0x3f, 0xe8, 0x42, 0xe8, 0xb8, - 0xf8, 0xad, 0x10, 0x8f, 0x25, 0x8b, 0x29, 0x5e, 0xd8, 0x76, 0xf5, 0x81, 0x4c, 0xa0, 0x4b, 0x3d, - 0xe9, 0x2f, 0x30, 0xc3, 0x6d, 0xb7, 0x43, 0xdf, 0xf8, 0x0b, 0xf2, 0x43, 0xb8, 0x99, 0xf1, 0x5c, - 0x04, 0xd4, 0x2b, 0xaf, 0xed, 0x21, 0x77, 0xa4, 0xa9, 0xcf, 0xf4, 0xe5, 0x0e, 0xb4, 0xe7, 0x2c, - 0x9c, 0xf6, 0x11, 0x98, 0x8d, 0x7a, 0x12, 0x3e, 0x0f, 0x5d, 0xc5, 0x24, 0x3f, 0x01, 0xa8, 0x2c, - 0x85, 0xd3, 0xc1, 0x25, 0xa2, 0x76, 0x69, 0x37, 0x24, 0x3b, 0x00, 0x01, 0x4b, 0x4f, 0xa9, 0xf0, - 0x54, 0xc2, 0xd8, 0x98, 0x1c, 0xb6, 0xa6, 0x7c, 0x49, 0xdf, 0x3b, 0x5f, 0x43, 0xaf, 0xb8, 0xfd, - 0x0e, 0xd8, 0xef, 0x78, 0x94, 0xc7, 0x15, 0x2a, 0x63, 0x77, 0xa0, 0x09, 0xcf, 0x43, 0x72, 0x1b, - 0xb0, 0x0d, 0xa2, 0x8d, 0x16, 0x62, 0x80, 0x00, 0x7e, 0x49, 0xb1, 0x91, 0x04, 0x9c, 0x9f, 0x31, - 0x0d, 0x4e, 0xdf, 0x2d, 0x4e, 0xce, 0x7f, 0x5a, 0x70, 0xb3, 0x5e, 0x0d, 0xea, 0x0a, 0xb4, 0x82, - 0x50, 0x5a, 0x68, 0x06, 0xcd, 0x1e, 0xd7, 0xe0, 0x6c, 0x99, 0x70, 0x96, 0x2a, 0x31, 0x0f, 0xf5, - 0x05, 0x63, 0xad, 0xf2, 0x92, 0x87, 0x54, 0x25, 0x73, 0xce, 0x42, 0xc4, 0x7f, 0xec, 0xaa, 0x4f, - 0x45, 0x59, 0xb0, 0xb0, 0xe8, 0x2e, 0xea, 0x13, 0xdd, 0x13, 0x68, 0xb7, 0xa7, 0x23, 0xaa, 0x4f, - 0x2a, 0xa2, 0xb1, 0xa2, 0xf6, 0x75, 0x98, 0xd4, 0x37, 0xd9, 0x85, 0xa1, 0xa0, 0x69, 0x54, 0x24, - 0x37, 0xa2, 0x6b, 0xbb, 0x26, 0x89, 0xdc, 0x05, 0x08, 0x78, 0x14, 0xd1, 0x00, 0x05, 0x6c, 0x14, - 0x30, 0x28, 0x2a, 0xb1, 0xa4, 0x8c, 0xbc, 0x8c, 0x06, 0x53, 0xd8, 0xb5, 0xf6, 0xba, 0x6e, 0x4f, - 0xca, 0xe8, 0x98, 0x06, 0xea, 0x1d, 0x79, 0x46, 0x85, 0x87, 0xfd, 0x69, 0x88, 0x7a, 0x03, 0x45, - 0xc0, 0x2e, 0xba, 0x03, 0xb0, 0x10, 0x3c, 0x4f, 0x35, 0x77, 0xb4, 0xdb, 0x56, 0xad, 0x1a, 0x29, - 0xc8, 0xbe, 0x0f, 0x37, 0xb3, 0xf7, 0x71, 0xc4, 0x92, 0x33, 0x4f, 0xfa, 0x62, 0x41, 0xe5, 0x74, - 0xac, 0x53, 0xbc, 0xa0, 0xbe, 0x41, 0xa2, 0x93, 0x02, 0x39, 0x14, 0xd4, 0x97, 0xf4, 0x3b, 0x4c, - 0xa5, 0x6f, 0x57, 0xfc, 0xe4, 0x16, 0xf4, 0xb8, 0x47, 0x2f, 0x82, 0xa8, 0xa8, 0xc1, 0x2e, 0x3f, - 0xba, 0x08, 0x22, 0xe7, 0x01, 0x4c, 0x6a, 0x37, 0x16, 0x7d, 0x7b, 0x0b, 0xba, 0x54, 0x08, 0x5e, - 0x76, 0x19, 0x7d, 0x70, 0x7e, 0x03, 0xe4, 0x6d, 0x1a, 0x7e, 0x1f, 0xee, 0x39, 0xb7, 0x60, 0x52, - 0x33, 0xad, 0xfd, 0x70, 0xfe, 0x61, 0x01, 0x79, 0x8a, 0xcd, 0xe2, 0xff, 0x9b, 0xd3, 0xaa, 0x7c, - 0xd5, 0x0c, 0xd1, 0xcd, 0x28, 0xf4, 0xa5, 0x5f, 0x4c, 0xb8, 0x11, 0xcb, 0xb4, 0xfd, 0xa7, 0xbe, - 0xf4, 0x8b, 0x49, 0x23, 0x68, 0x90, 0x0b, 0x35, 0xf4, 0x30, 0x09, 0x71, 0xd2, 0xb8, 0x25, 0x89, - 0x7c, 0x0a, 0x1f, 0xb2, 0x45, 0xc2, 0x05, 0x5d, 0x8a, 0x79, 0x1a, 0xaa, 0x1e, 0x0a, 0x6f, 0x69, - 0x6e, 0xa5, 0x70, 0x84, 0xc8, 0x3d, 0x80, 0x49, 0xed, 0x19, 0x57, 0xc2, 0xfc, 0x27, 0x0b, 0xa6, - 0x8f, 0x25, 0x8f, 0x59, 0xe0, 0x52, 0xe5, 0x7c, 0xed, 0xe9, 0xf7, 0x60, 0xac, 0xda, 0xf5, 0xea, - 0xf3, 0x47, 0x3c, 0x0a, 0x97, 0xe3, 0xf0, 0x36, 0xa8, 0x8e, 0xed, 0x19, 0x28, 0xf4, 0x79, 0x14, - 0x62, 0x26, 0xde, 0x03, 0xd5, 0x56, 0x0d, 0x7d, 0xbd, 0x18, 0x8c, 0x12, 0x7a, 0x5e, 0xd3, 0x57, - 0x42, 0xa8, 0xaf, 0x7b, 0x71, 0x3f, 0xa1, 0xe7, 0x4a, 0xdf, 0xb9, 0x03, 0xb7, 0xd7, 0xf8, 0x56, - 0x84, 0xeb, 0x9f, 0x16, 0x4c, 0x1e, 0x67, 0x19, 0x5b, 0x24, 0xbf, 0xc6, 0xb6, 0x53, 0x3a, 0xbd, - 0x05, 0xdd, 0x80, 0xe7, 0x89, 0x44, 0x67, 0xbb, 0xae, 0x3e, 0xac, 0x54, 0x62, 0xab, 0x51, 0x89, - 0x2b, 0xb5, 0xdc, 0x6e, 0xd6, 0xb2, 0x51, 0xab, 0x9d, 0x5a, 0xad, 0xfe, 0x00, 0x86, 0x2a, 0xc8, - 0x5e, 0x40, 0x13, 0x49, 0x45, 0xd1, 0xc8, 0x41, 0x91, 0x0e, 0x91, 0xa2, 0x04, 0xcc, 0x81, 0xa3, - 0x7b, 0x39, 0xa4, 0xcb, 0x69, 0xf3, 0x2f, 0x0b, 0xb6, 0xea, 0x4f, 0x29, 0x62, 0x76, 0xe9, 0xe0, - 0x51, 0xad, 0x4c, 0x44, 0xc5, 0x3b, 0xd4, 0xa7, 0x6a, 0x0a, 0x69, 0x7e, 0x12, 0xb1, 0xc0, 0x53, - 0x0c, 0xed, 0xbf, 0xad, 0x29, 0x6f, 0x45, 0xb4, 0x44, 0xa5, 0x63, 0xa2, 0x42, 0xa0, 0xe3, 0xe7, - 0xf2, 0xb4, 0x1c, 0x3e, 0xea, 0x7b, 0x05, 0xa9, 0xde, 0x75, 0x48, 0xf5, 0x9b, 0x48, 0x55, 0x99, - 0x36, 0x30, 0x33, 0xed, 0x53, 0x98, 0xe8, 0xed, 0xb5, 0x1e, 0xae, 0x1d, 0x80, 0x6a, 0x8e, 0x64, - 0x53, 0x4b, 0x37, 0xb3, 0x72, 0x90, 0x64, 0xce, 0x2f, 0xc0, 0x7e, 0xc1, 0xb5, 0xdd, 0x8c, 0x3c, - 0x02, 0x3b, 0x2a, 0x0f, 0x28, 0x3a, 0x3c, 0x20, 0xcb, 0x1a, 0x2f, 0xe5, 0xdc, 0xa5, 0x90, 0xf3, - 0x05, 0x0c, 0x4a, 0x72, 0x89, 0x99, 0x75, 0x19, 0x66, 0xad, 0x15, 0xcc, 0x9c, 0xbf, 0x5b, 0xb0, - 0x55, 0x77, 0xb9, 0x08, 0xcb, 0x5b, 0x18, 0x57, 0x57, 0x78, 0xb1, 0x9f, 0x16, 0xbe, 0x3c, 0x32, - 0x7d, 0x69, 0xaa, 0x55, 0x0e, 0x66, 0x2f, 0xfd, 0x54, 0xe7, 0xf2, 0x28, 0x32, 0x48, 0xb3, 0x37, - 0xb0, 0xd9, 0x10, 0x59, 0xb3, 0xba, 0xfd, 0xd8, 0x5c, 0xdd, 0x6a, 0xeb, 0x67, 0xa5, 0x6d, 0xee, - 0x73, 0x9f, 0xc3, 0x47, 0xba, 0x1d, 0x1c, 0x56, 0x31, 0x2c, 0xb1, 0xaf, 0x87, 0xda, 0x5a, 0x0d, - 0xb5, 0x33, 0x83, 0x69, 0x53, 0xb5, 0x28, 0xbf, 0x05, 0x6c, 0x1e, 0x4b, 0x5f, 0xb2, 0x4c, 0xb2, - 0xa0, 0xfa, 0x0d, 0xb1, 0x92, 0x1b, 0xd6, 0x75, 0x13, 0xb1, 0x59, 0x87, 0x1b, 0xd0, 0x96, 0xb2, - 0xcc, 0x5f, 0xf5, 0xa9, 0xa2, 0x40, 0xcc, 0x9b, 0x8a, 0x18, 0x7c, 0x0f, 0x57, 0xa9, 0x7c, 0x90, - 0x5c, 0xfa, 0x91, 0xde, 0x38, 0x3a, 0xb8, 0x71, 0xd8, 0x48, 0xc1, 0x95, 0x43, 0x0f, 0xe5, 0x50, - 0x73, 0xbb, 0x7a, 0x1f, 0x51, 0x04, 0x64, 0xee, 0x00, 0x60, 0xa9, 0xea, 0x2a, 0xeb, 0x69, 0x5d, - 0x45, 0x39, 0x54, 0x04, 0xe7, 0x2e, 0x6c, 0xff, 0x92, 0x4a, 0xb5, 0x3b, 0x89, 0x43, 0x9e, 0xcc, - 0xd9, 0x22, 0x17, 0xbe, 0x11, 0x0a, 0xe7, 0xdf, 0x16, 0xec, 0x5c, 0x22, 0x50, 0x3c, 0x78, 0x0a, - 0xfd, 0xd8, 0xcf, 0x24, 0x15, 0x65, 0x95, 0x94, 0xc7, 0x55, 0x28, 0x5a, 0xd7, 0x41, 0xd1, 0x6e, - 0x40, 0x71, 0x0b, 0x7a, 0xb1, 0x7f, 0xe1, 0xc5, 0x27, 0xc5, 0x72, 0xd4, 0x8d, 0xfd, 0x8b, 0x97, - 0x27, 0xd8, 0xd9, 0x98, 0xf0, 0x4e, 0xf2, 0xe0, 0x8c, 0xca, 0xac, 0xea, 0x6c, 0x4c, 0x3c, 0xd1, - 0x14, 0xf5, 0x68, 0x25, 0xf0, 0x4d, 0x4e, 0x73, 0x9a, 0x15, 0xbd, 0x42, 0x0d, 0xc7, 0x5f, 0x21, - 0x01, 0x97, 0x29, 0x5c, 0x1d, 0xb1, 0x4b, 0x0c, 0xdc, 0xe2, 0x74, 0xf0, 0x97, 0x01, 0x8c, 0x8e, - 0xa9, 0x7f, 0x4e, 0x69, 0x88, 0x0f, 0x26, 0x8b, 0xb2, 0xd0, 0xea, 0xbf, 0x6c, 0xc9, 0xfd, 0xd5, - 0x8a, 0x5a, 0xfb, 0x53, 0x7a, 0xf6, 0xc9, 0x75, 0x62, 0x45, 0xce, 0xde, 0x20, 0xaf, 0x60, 0x68, - 0xfc, 0x74, 0x24, 0xdb, 0x86, 0x62, 0xe3, 0x17, 0xf1, 0x6c, 0xe7, 0x12, 0x6e, 0x69, 0xed, 0x91, - 0x45, 0x5e, 0xc0, 0xd0, 0x58, 0x69, 0x4c, 0x7b, 0xcd, 0xdd, 0xca, 0xb4, 0xb7, 0x66, 0x0f, 0x72, - 0x6e, 0x28, 0x6b, 0xc6, 0x62, 0x62, 0x5a, 0x6b, 0xae, 0x42, 0xa6, 0xb5, 0x75, 0xdb, 0x0c, 0x5a, - 0x33, 0xf6, 0x00, 0xd3, 0x5a, 0x73, 0xcb, 0x31, 0xad, 0xad, 0x59, 0x1e, 0x9c, 0x1b, 0xe4, 0x6b, - 0x98, 0x1c, 0x4b, 0x41, 0xfd, 0x78, 0xc9, 0x5e, 0x41, 0xf0, 0x7f, 0xb0, 0xba, 0x67, 0x3d, 0xb2, - 0xc8, 0xef, 0x60, 0xb3, 0x31, 0xe5, 0x89, 0xb3, 0xd4, 0xbc, 0x6c, 0x3d, 0x99, 0xdd, 0xbb, 0x52, - 0xa6, 0xf2, 0xfc, 0x2b, 0x18, 0x99, 0xc3, 0x95, 0x18, 0x4e, 0xad, 0xd9, 0x1f, 0x66, 0x77, 0x2f, - 0x63, 0x9b, 0x06, 0xcd, 0xfe, 0x6e, 0x1a, 0x5c, 0x33, 0xe1, 0x4c, 0x83, 0xeb, 0xc6, 0x82, 0x73, - 0x83, 0xfc, 0x16, 0x36, 0x56, 0xfb, 0x2c, 0xf9, 0x78, 0x15, 0xba, 0x46, 0xfb, 0x9e, 0x39, 0x57, - 0x89, 0x54, 0xc6, 0x9f, 0x03, 0x2c, 0xdb, 0x27, 0xb9, 0xb3, 0xd4, 0x69, 0xb4, 0xef, 0xd9, 0xf6, - 0x7a, 0x66, 0x65, 0xea, 0xf7, 0x70, 0x6b, 0x6d, 0x8f, 0x22, 0x46, 0x01, 0x5e, 0xd5, 0xe5, 0x66, - 0x3f, 0xba, 0x56, 0xae, 0xbc, 0xeb, 0xc9, 0x5d, 0xd8, 0xc8, 0x74, 0x8b, 0x98, 0x67, 0xfb, 0x41, - 0xc4, 0x68, 0x22, 0x9f, 0x00, 0x6a, 0xbc, 0x16, 0x5c, 0xf2, 0x93, 0x1e, 0xfe, 0xdd, 0xf6, 0xd3, - 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xf6, 0xb5, 0x1d, 0x19, 0x7d, 0x13, 0x00, 0x00, + // 1759 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x6f, 0xdb, 0xc8, + 0x15, 0x0f, 0x25, 0x4b, 0x16, 0x9f, 0xa4, 0xac, 0x3d, 0x72, 0xb2, 0x8a, 0x62, 0xa7, 0x5e, 0xa6, + 0xd9, 0xba, 0x48, 0xe0, 0x06, 0xee, 0x1e, 0x76, 0xbb, 0xed, 0x21, 0x71, 0x9c, 0x45, 0xd0, 0x24, + 0x9b, 0xd2, 0x49, 0xb1, 0x45, 0x81, 0x12, 0x34, 0x39, 0x96, 0xa7, 0xa6, 0x38, 0xdc, 0x99, 0x61, + 0x6c, 0xef, 0x47, 0x29, 0xd0, 0x43, 0xbf, 0x43, 0x8f, 0x45, 0x2f, 0x45, 0x81, 0x7e, 0x8e, 0x1e, + 0x7b, 0xe8, 0x67, 0x28, 0xe6, 0x0d, 0x49, 0x0d, 0x45, 0xd9, 0xde, 0xed, 0x62, 0x6f, 0x9c, 0xf7, + 0x6f, 0xde, 0xfc, 0xde, 0x5f, 0x09, 0xfa, 0xc7, 0x2c, 0xa1, 0x62, 0x37, 0x13, 0x5c, 0x71, 0xd2, + 0xc3, 0x43, 0x90, 0x1d, 0x79, 0x5f, 0xc2, 0xdd, 0x97, 0x9c, 0x9f, 0xe6, 0xd9, 0x33, 0x26, 0x68, + 0xa4, 0xb8, 0xb8, 0x38, 0x48, 0x95, 0xb8, 0xf0, 0xe9, 0xd7, 0x39, 0x95, 0x8a, 0x6c, 0x82, 0x1b, + 0x97, 0x8c, 0xb1, 0xb3, 0xed, 0xec, 0xb8, 0xfe, 0x9c, 0x40, 0x08, 0xac, 0xa4, 0xe1, 0x8c, 0x8e, + 0x5b, 0xc8, 0xc0, 0x6f, 0xef, 0x00, 0x36, 0x97, 0x1b, 0x94, 0x19, 0x4f, 0x25, 0x25, 0x0f, 0xa0, + 0x43, 0x35, 0x01, 0xad, 0xf5, 0xf7, 0x3e, 0xd8, 0x2d, 0x5d, 0xd9, 0x35, 0x72, 0x86, 0xeb, 0xfd, + 0xdd, 0x01, 0xf2, 0x92, 0x49, 0xa5, 0x89, 0x8c, 0xca, 0x6f, 0xe7, 0xcf, 0x6d, 0xe8, 0x66, 0x82, + 0x1e, 0xb3, 0xf3, 0xc2, 0xa3, 0xe2, 0x44, 0x1e, 0xc1, 0xba, 0x54, 0xa1, 0x50, 0xcf, 0x05, 0x9f, + 0x3d, 0x67, 0x09, 0x7d, 0xad, 0x9d, 0x6e, 0xa3, 0x48, 0x93, 0x41, 0x76, 0x81, 0xb0, 0x34, 0x4a, + 0x72, 0xc9, 0xde, 0xd3, 0xc3, 0x92, 0x3b, 0x5e, 0xd9, 0x76, 0x76, 0x7a, 0xfe, 0x12, 0x0e, 0xd9, + 0x80, 0x4e, 0xc2, 0x66, 0x4c, 0x8d, 0x3b, 0xdb, 0xce, 0xce, 0xd0, 0x37, 0x07, 0xef, 0x97, 0x30, + 0xaa, 0xf9, 0xff, 0xdd, 0x9e, 0xff, 0xe7, 0x16, 0x74, 0x90, 0x50, 0x61, 0xec, 0xcc, 0x31, 0x26, + 0x1f, 0xc1, 0x80, 0xc9, 0x60, 0x0e, 0x44, 0x0b, 0x7d, 0xeb, 0x33, 0x59, 0x61, 0x4e, 0x1e, 0x42, + 0x37, 0x3a, 0xc9, 0xd3, 0x53, 0x39, 0x6e, 0x6f, 0xb7, 0x77, 0xfa, 0x7b, 0xa3, 0xf9, 0x45, 0xfa, + 0xa1, 0xfb, 0x9a, 0xe7, 0x17, 0x22, 0xe4, 0x53, 0x80, 0x50, 0x29, 0xc1, 0x8e, 0x72, 0x45, 0x25, + 0xbe, 0xb4, 0xbf, 0x37, 0xb6, 0x14, 0x72, 0x49, 0x9f, 0x54, 0x7c, 0xdf, 0x92, 0x25, 0x9f, 0x41, + 0x8f, 0x9e, 0x2b, 0x9a, 0xc6, 0x34, 0x1e, 0x77, 0xf0, 0xa2, 0xad, 0x85, 0x17, 0xed, 0x1e, 0x14, + 0x7c, 0xf3, 0xbe, 0x4a, 0x7c, 0xf2, 0x39, 0x0c, 0x6b, 0x2c, 0xb2, 0x06, 0xed, 0x53, 0x5a, 0x46, + 0x55, 0x7f, 0x6a, 0x64, 0xdf, 0x87, 0x49, 0x6e, 0x12, 0x6c, 0xe0, 0x9b, 0xc3, 0x2f, 0x5a, 0x9f, + 0x3a, 0xde, 0x33, 0x70, 0x9f, 0xe7, 0x49, 0x52, 0x29, 0xc6, 0x4c, 0x94, 0x8a, 0x31, 0x13, 0x73, + 0x94, 0x5b, 0x57, 0xa2, 0xfc, 0x37, 0x07, 0xd6, 0x0f, 0xde, 0xd3, 0x54, 0xbd, 0xe6, 0x8a, 0x1d, + 0xb3, 0x28, 0x54, 0x8c, 0xa7, 0xe4, 0x11, 0xb8, 0x3c, 0x89, 0x83, 0x2b, 0xc3, 0xd4, 0xe3, 0x49, + 0xe1, 0xf5, 0x23, 0x70, 0x53, 0x7a, 0x16, 0x5c, 0x79, 0x5d, 0x2f, 0xa5, 0x67, 0x46, 0xfa, 0x3e, + 0x0c, 0x63, 0x9a, 0x50, 0x45, 0x83, 0x2a, 0x3a, 0x3a, 0x74, 0x03, 0x43, 0xdc, 0x37, 0xe1, 0xf8, + 0x18, 0x3e, 0xd0, 0x26, 0xb3, 0x50, 0xd0, 0x54, 0x05, 0x59, 0xa8, 0x4e, 0x30, 0x26, 0xae, 0x3f, + 0x4c, 0xe9, 0xd9, 0x1b, 0xa4, 0xbe, 0x09, 0xd5, 0x89, 0xf7, 0xd7, 0x16, 0xb8, 0x55, 0x30, 0xc9, + 0x87, 0xb0, 0xaa, 0xaf, 0x0d, 0x58, 0x5c, 0x20, 0xd1, 0xd5, 0xc7, 0x17, 0xb1, 0xae, 0x0a, 0x7e, + 0x7c, 0x2c, 0xa9, 0x42, 0xf7, 0xda, 0x7e, 0x71, 0xd2, 0x99, 0x25, 0xd9, 0x37, 0xa6, 0x10, 0x56, + 0x7c, 0xfc, 0xd6, 0x88, 0xcf, 0x14, 0x9b, 0x51, 0xbc, 0xb0, 0xed, 0x9b, 0x03, 0x19, 0x41, 0x87, + 0x06, 0x2a, 0x9c, 0x62, 0x86, 0xbb, 0xfe, 0x0a, 0x7d, 0x1b, 0x4e, 0xc9, 0x8f, 0xe1, 0xa6, 0xe4, + 0xb9, 0x88, 0x68, 0x50, 0x5e, 0xdb, 0x45, 0xee, 0xc0, 0x50, 0x9f, 0x9b, 0xcb, 0x3d, 0x68, 0x1f, + 0xb3, 0x78, 0xbc, 0x8a, 0xc0, 0xac, 0xd5, 0x93, 0xf0, 0x45, 0xec, 0x6b, 0x26, 0xf9, 0x19, 0x40, + 0x65, 0x29, 0x1e, 0xf7, 0x2e, 0x11, 0x75, 0x4b, 0xbb, 0x31, 0xd9, 0x02, 0x88, 0x58, 0x76, 0x42, + 0x45, 0xa0, 0x13, 0xc6, 0xc5, 0xe4, 0x70, 0x0d, 0xe5, 0xd7, 0xf4, 0x42, 0xb3, 0x99, 0x0c, 0xa6, + 0xdf, 0xb0, 0x2c, 0xa3, 0xf1, 0x18, 0x10, 0x61, 0x97, 0xc9, 0x2f, 0x0c, 0xc1, 0xfb, 0x0a, 0xba, + 0x85, 0x73, 0x77, 0xc1, 0x7d, 0xcf, 0x93, 0x7c, 0x56, 0x81, 0x36, 0xf4, 0x7b, 0x86, 0xf0, 0x22, + 0x26, 0x77, 0x00, 0xbb, 0x24, 0x5e, 0xd1, 0x42, 0x88, 0x10, 0x5f, 0x7d, 0xc1, 0x6d, 0xe8, 0x46, + 0x9c, 0x9f, 0x32, 0x83, 0xdd, 0xaa, 0x5f, 0x9c, 0xbc, 0xff, 0xb6, 0xe0, 0x66, 0xbd, 0x58, 0xf4, + 0x15, 0x68, 0x05, 0x91, 0x76, 0xd0, 0x0c, 0x9a, 0x3d, 0xac, 0xa1, 0xdd, 0xb2, 0xd1, 0x2e, 0x55, + 0x66, 0x3c, 0x36, 0x17, 0x0c, 0x8d, 0xca, 0x2b, 0x1e, 0x53, 0x9d, 0xeb, 0x39, 0x8b, 0x31, 0x3c, + 0x43, 0x5f, 0x7f, 0x6a, 0xca, 0x94, 0xc5, 0x45, 0xf3, 0xd1, 0x9f, 0xe8, 0x9e, 0x40, 0xbb, 0x5d, + 0x13, 0x70, 0x73, 0xd2, 0x01, 0x9f, 0x69, 0xea, 0xaa, 0x89, 0xa2, 0xfe, 0x26, 0xdb, 0xd0, 0x17, + 0x34, 0x4b, 0x8a, 0xdc, 0x47, 0xf0, 0x5d, 0xdf, 0x26, 0x91, 0x7b, 0x00, 0x11, 0x4f, 0x12, 0x1a, + 0xa1, 0x80, 0x8b, 0x02, 0x16, 0x45, 0xe7, 0x9d, 0x52, 0x49, 0x20, 0x69, 0x84, 0x50, 0x77, 0xfc, + 0xae, 0x52, 0xc9, 0x21, 0x8d, 0xf4, 0x3b, 0x72, 0x49, 0x45, 0x80, 0xed, 0xab, 0x8f, 0x7a, 0x3d, + 0x4d, 0xc0, 0x26, 0xbb, 0x05, 0x30, 0x15, 0x3c, 0xcf, 0x0c, 0x77, 0xb0, 0xdd, 0xd6, 0x9d, 0x1c, + 0x29, 0xc8, 0x7e, 0x00, 0x37, 0xe5, 0xc5, 0x2c, 0x61, 0xe9, 0x69, 0xa0, 0x42, 0x31, 0xa5, 0x6a, + 0x3c, 0x34, 0x15, 0x50, 0x50, 0xdf, 0x22, 0xd1, 0xcb, 0x80, 0xec, 0x0b, 0x1a, 0x2a, 0xfa, 0x1d, + 0x86, 0xd6, 0xb7, 0xeb, 0x0d, 0xe4, 0x16, 0x74, 0x79, 0x40, 0xcf, 0xa3, 0xa4, 0x28, 0xd1, 0x0e, + 0x3f, 0x38, 0x8f, 0x12, 0xef, 0x21, 0x8c, 0x6a, 0x37, 0x16, 0x6d, 0x7d, 0x03, 0x3a, 0x54, 0x08, + 0x5e, 0x36, 0x21, 0x73, 0xf0, 0x7e, 0x07, 0xe4, 0x5d, 0x16, 0xff, 0x10, 0xee, 0x79, 0xb7, 0x60, + 0x54, 0x33, 0x6d, 0xfc, 0xf0, 0xfe, 0xe9, 0x00, 0x79, 0x86, 0xbd, 0xe4, 0xfb, 0x8d, 0x71, 0x5d, + 0xdd, 0x7a, 0xc4, 0x98, 0x5e, 0x15, 0x87, 0x2a, 0x2c, 0x06, 0xe0, 0x80, 0x49, 0x63, 0xff, 0x59, + 0xa8, 0xc2, 0x62, 0x10, 0x09, 0x1a, 0xe5, 0x42, 0xcf, 0x44, 0x4c, 0x42, 0x1c, 0x44, 0x7e, 0x49, + 0x22, 0x9f, 0xc0, 0x6d, 0x36, 0x4d, 0xb9, 0xa0, 0x73, 0xb1, 0xc0, 0x40, 0xd5, 0x45, 0xe1, 0x0d, + 0xc3, 0xad, 0x14, 0x0e, 0x10, 0xb9, 0x87, 0x30, 0xaa, 0x3d, 0xe3, 0x4a, 0x98, 0xff, 0xe4, 0xc0, + 0xf8, 0x89, 0xe2, 0x33, 0x16, 0xf9, 0x54, 0x3b, 0x5f, 0x7b, 0xfa, 0x7d, 0x18, 0xea, 0x6e, 0xbe, + 0xf8, 0xfc, 0x01, 0x4f, 0xe2, 0xf9, 0xb4, 0xbc, 0x03, 0xba, 0xa1, 0x07, 0x16, 0x0a, 0xab, 0x3c, + 0x89, 0x31, 0x13, 0xef, 0x83, 0xee, 0xba, 0x96, 0xbe, 0xd9, 0x1b, 0x06, 0x29, 0x3d, 0xab, 0xe9, + 0x6b, 0x21, 0xd4, 0x37, 0xad, 0x7a, 0x35, 0xa5, 0x67, 0x5a, 0xdf, 0xbb, 0x0b, 0x77, 0x96, 0xf8, + 0x56, 0x84, 0xeb, 0x5f, 0x0e, 0x8c, 0x9e, 0x48, 0xc9, 0xa6, 0xe9, 0x6f, 0xb1, 0xed, 0x94, 0x4e, + 0x6f, 0x40, 0x27, 0xe2, 0x79, 0xaa, 0xd0, 0xd9, 0x8e, 0x6f, 0x0e, 0x0b, 0x95, 0xd8, 0x6a, 0x54, + 0xe2, 0x42, 0x2d, 0xb7, 0x9b, 0xb5, 0x6c, 0xd5, 0xea, 0x4a, 0xad, 0x56, 0x7f, 0x04, 0x7d, 0x1d, + 0xe4, 0x20, 0xa2, 0xa9, 0xa2, 0xa2, 0xe8, 0xf3, 0xa0, 0x49, 0xfb, 0x48, 0xd1, 0x02, 0xf6, 0x3c, + 0x32, 0xad, 0x1e, 0xb2, 0xf9, 0x30, 0xfa, 0xb7, 0x03, 0x1b, 0xf5, 0xa7, 0x14, 0x31, 0xbb, 0x74, + 0x2e, 0xe9, 0x56, 0x26, 0x92, 0xe2, 0x1d, 0xfa, 0x53, 0x37, 0x85, 0x2c, 0x3f, 0x4a, 0x58, 0x14, + 0x68, 0x86, 0xf1, 0xdf, 0x35, 0x94, 0x77, 0x22, 0x99, 0xa3, 0xb2, 0x62, 0xa3, 0x42, 0x60, 0x25, + 0xcc, 0xd5, 0x49, 0x39, 0x9b, 0xf4, 0xf7, 0x02, 0x52, 0xdd, 0xeb, 0x90, 0x5a, 0x6d, 0x22, 0x55, + 0x65, 0x5a, 0xcf, 0xce, 0xb4, 0x4f, 0x60, 0x64, 0x96, 0xdb, 0x7a, 0xb8, 0xb6, 0x00, 0xaa, 0x39, + 0x22, 0xc7, 0x8e, 0x69, 0x66, 0xe5, 0x20, 0x91, 0xde, 0xaf, 0xc0, 0x7d, 0xc9, 0x8d, 0x5d, 0x49, + 0x1e, 0x83, 0x9b, 0x94, 0x07, 0x14, 0xed, 0xef, 0x91, 0x79, 0x8d, 0x97, 0x72, 0xfe, 0x5c, 0xc8, + 0xfb, 0x1c, 0x7a, 0x25, 0xb9, 0xc4, 0xcc, 0xb9, 0x0c, 0xb3, 0xd6, 0x02, 0x66, 0xde, 0x3f, 0x1c, + 0xd8, 0xa8, 0xbb, 0x5c, 0x84, 0xe5, 0x1d, 0x0c, 0xab, 0x2b, 0x82, 0x59, 0x98, 0x15, 0xbe, 0x3c, + 0xb6, 0x7d, 0x69, 0xaa, 0x55, 0x0e, 0xca, 0x57, 0x61, 0x66, 0x72, 0x79, 0x90, 0x58, 0xa4, 0xc9, + 0x5b, 0x58, 0x6f, 0x88, 0x2c, 0xd9, 0xec, 0x7e, 0x6a, 0x6f, 0x76, 0xb5, 0xed, 0xb4, 0xd2, 0xb6, + 0xd7, 0xbd, 0xcf, 0xe0, 0x43, 0xd3, 0x0e, 0xf6, 0xab, 0x18, 0x96, 0xd8, 0xd7, 0x43, 0xed, 0x2c, + 0x86, 0xda, 0x9b, 0xc0, 0xb8, 0xa9, 0x5a, 0x94, 0xdf, 0x14, 0xd6, 0x0f, 0x55, 0xa8, 0x98, 0x54, + 0x2c, 0xaa, 0x7e, 0x62, 0x2c, 0xe4, 0x86, 0x73, 0xdd, 0x44, 0x6c, 0xd6, 0xe1, 0x1a, 0xb4, 0x95, + 0x2a, 0xf3, 0x57, 0x7f, 0xea, 0x28, 0x10, 0xfb, 0xa6, 0x22, 0x06, 0x3f, 0xc0, 0x55, 0x3a, 0x1f, + 0x14, 0x57, 0x61, 0x62, 0x36, 0x8e, 0x15, 0xdc, 0x38, 0x5c, 0xa4, 0xe0, 0xca, 0x61, 0x86, 0x72, + 0x6c, 0xb8, 0x1d, 0xb3, 0x8f, 0x68, 0x02, 0x32, 0xb7, 0x00, 0xb0, 0x54, 0x4d, 0x95, 0x75, 0x8d, + 0xae, 0xa6, 0xec, 0x6b, 0x82, 0x77, 0x0f, 0x36, 0xbf, 0xa0, 0x4a, 0xef, 0x4e, 0x62, 0x9f, 0xa7, + 0xc7, 0x6c, 0x9a, 0x8b, 0xd0, 0x0a, 0x85, 0xf7, 0x1f, 0x07, 0xb6, 0x2e, 0x11, 0x28, 0x1e, 0x3c, + 0x86, 0xd5, 0x59, 0x28, 0x15, 0x15, 0x65, 0x95, 0x94, 0xc7, 0x45, 0x28, 0x5a, 0xd7, 0x41, 0xd1, + 0x6e, 0x40, 0x71, 0x0b, 0xba, 0xb3, 0xf0, 0x3c, 0x98, 0x1d, 0x15, 0xcb, 0x51, 0x67, 0x16, 0x9e, + 0xbf, 0x3a, 0xc2, 0xce, 0xc6, 0x44, 0x70, 0x94, 0x47, 0xa7, 0x54, 0xc9, 0xaa, 0xb3, 0x31, 0xf1, + 0xd4, 0x50, 0xf4, 0xa3, 0xb5, 0xc0, 0xd7, 0x39, 0xcd, 0xa9, 0x2c, 0x7a, 0x85, 0x1e, 0x8e, 0xbf, + 0x41, 0x02, 0x2e, 0x53, 0xb8, 0x59, 0x62, 0x97, 0xe8, 0xf9, 0xc5, 0x69, 0xef, 0x2f, 0x3d, 0x18, + 0x1c, 0xd2, 0xf0, 0x8c, 0xd2, 0x18, 0x1f, 0x4c, 0xa6, 0x65, 0xa1, 0xd5, 0x7f, 0xf8, 0x92, 0x07, + 0x8b, 0x15, 0xb5, 0xf4, 0x97, 0xf6, 0xe4, 0xe3, 0xeb, 0xc4, 0x8a, 0x9c, 0xbd, 0x41, 0x5e, 0x43, + 0xdf, 0xfa, 0x65, 0x49, 0x36, 0x2d, 0xc5, 0xc6, 0x0f, 0xe6, 0xc9, 0xd6, 0x25, 0xdc, 0xd2, 0xda, + 0x63, 0x87, 0xbc, 0x84, 0xbe, 0xb5, 0xd2, 0xd8, 0xf6, 0x9a, 0xbb, 0x95, 0x6d, 0x6f, 0xc9, 0x1e, + 0xe4, 0xdd, 0xd0, 0xd6, 0xac, 0xc5, 0xc4, 0xb6, 0xd6, 0x5c, 0x85, 0x6c, 0x6b, 0xcb, 0xb6, 0x19, + 0xb4, 0x66, 0xed, 0x01, 0xb6, 0xb5, 0xe6, 0x96, 0x63, 0x5b, 0x5b, 0xb2, 0x3c, 0x78, 0x37, 0xc8, + 0x57, 0x30, 0x3a, 0x54, 0x82, 0x86, 0xb3, 0x39, 0x7b, 0x01, 0xc1, 0xff, 0xc3, 0xea, 0x8e, 0xf3, + 0xd8, 0x21, 0x7f, 0x80, 0xf5, 0xc6, 0x94, 0x27, 0xde, 0x5c, 0xf3, 0xb2, 0xf5, 0x64, 0x72, 0xff, + 0x4a, 0x99, 0xca, 0xf3, 0x2f, 0x61, 0x60, 0x0f, 0x57, 0x62, 0x39, 0xb5, 0x64, 0x7f, 0x98, 0xdc, + 0xbb, 0x8c, 0x6d, 0x1b, 0xb4, 0xfb, 0xbb, 0x6d, 0x70, 0xc9, 0x84, 0xb3, 0x0d, 0x2e, 0x1b, 0x0b, + 0xde, 0x0d, 0xf2, 0x7b, 0x58, 0x5b, 0xec, 0xb3, 0xe4, 0xa3, 0x45, 0xe8, 0x1a, 0xed, 0x7b, 0xe2, + 0x5d, 0x25, 0x52, 0x19, 0x7f, 0x01, 0x30, 0x6f, 0x9f, 0xe4, 0xee, 0x5c, 0xa7, 0xd1, 0xbe, 0x27, + 0x9b, 0xcb, 0x99, 0x95, 0xa9, 0x3f, 0xc2, 0xad, 0xa5, 0x3d, 0x8a, 0x58, 0x05, 0x78, 0x55, 0x97, + 0x9b, 0xfc, 0xe4, 0x5a, 0xb9, 0xf2, 0xae, 0xa7, 0xf7, 0x60, 0x4d, 0x9a, 0x16, 0x71, 0x2c, 0x77, + 0xa3, 0x84, 0xd1, 0x54, 0x3d, 0x05, 0xd4, 0x78, 0x23, 0xb8, 0xe2, 0x47, 0x5d, 0xfc, 0x37, 0xee, + 0xe7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x35, 0x0b, 0x9e, 0x2e, 0x9c, 0x13, 0x00, 0x00, } diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 95c9533a1..c33e2b768 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -428,12 +428,10 @@ type VolumeEcShardInformationMessage struct { EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits" json:"ec_index_bits,omitempty"` } -func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } -func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardInformationMessage) ProtoMessage() {} -func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{4} -} +func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } +func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardInformationMessage) ProtoMessage() {} +func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *VolumeEcShardInformationMessage) GetId() uint32 { if m != nil { @@ -1424,12 +1422,10 @@ type GetMasterConfigurationResponse struct { MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` } -func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } -func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{32} -} +func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } +func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*GetMasterConfigurationResponse) ProtoMessage() {} +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { if m != nil { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 588b18f2e..56baa0cf7 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1035,12 +1035,10 @@ func (m *VolumeEcShardsGenerateRequest) GetCollection() string { type VolumeEcShardsGenerateResponse struct { } -func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } -func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{41} -} +func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } +func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } type VolumeEcShardsRebuildRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1413,12 +1411,10 @@ func (m *VolumeEcShardsToVolumeRequest) GetCollection() string { type VolumeEcShardsToVolumeResponse struct { } -func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } -func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} -func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{57} -} +func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } +func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } type ReadVolumeFileStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -2085,10 +2081,8 @@ type QueryRequest_InputSerialization_JSONInput struct { func (m *QueryRequest_InputSerialization_JSONInput) Reset() { *m = QueryRequest_InputSerialization_JSONInput{} } -func (m *QueryRequest_InputSerialization_JSONInput) String() string { - return proto.CompactTextString(m) -} -func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} +func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } +func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70, 1, 1} } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 66e7b9c3e..89e04922f 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -115,7 +115,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { } var writeErr error - readErr := util.ReadUrlAsStream(fileUrl, nil, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { + readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { _, writeErr = appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) }) diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 0f8313638..df0653f73 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -103,7 +103,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { } var writeErr error - readErr := util.ReadUrlAsStream(fileUrl, nil, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { + readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { _, err := writer.Write(data) if err != nil { writeErr = err diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 512fbd46b..07218b9b3 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -51,6 +51,7 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, dir stri ETag: sourceChunk.ETag, SourceFileId: sourceChunk.GetFileIdString(), CipherKey: sourceChunk.CipherKey, + IsGzipped: sourceChunk.IsGzipped, }, nil } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index dd8567e0e..694399274 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -101,7 +101,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { return err } - err = util.ReadUrlAsStream(fileUrl, nil, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { + err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk, chunk.Offset, int(chunk.Size), func(data []byte) { wc.Write(data) }) diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index dcc041642..854688b1e 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -162,6 +162,6 @@ func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, e return nil, err } buf := make([]byte, chunk.Size) - util.ReadUrl(fileUrl, nil, false, chunk.Offset, int(chunk.Size), buf) + util.ReadUrl(fileUrl, nil, false,false, chunk.Offset, int(chunk.Size), buf) return bytes.NewReader(buf), nil } diff --git a/weed/server/common.go b/weed/server/common.go index 5904e2c47..e06142d7f 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -1,7 +1,6 @@ package weed_server import ( - "bytes" "encoding/json" "errors" "fmt" @@ -136,7 +135,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, pu.FileName, false, bytes.NewReader(pu.Data), pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth) + uploadResult, err := operation.UploadData(url, pu.FileName, false, pu.Data, pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 641a367b1..bce3c78bc 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -2,6 +2,7 @@ package weed_server import ( "context" + "fmt" "io" "net/http" "path" @@ -87,6 +88,9 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r if fileName != "" { fileName = path.Base(fileName) } + contentType := part1.Header.Get("Content-Type") + + fmt.Printf("autochunk part header: %+v\n", part1.Header) var fileChunks []*filer_pb.FileChunk @@ -102,7 +106,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r } // upload the chunk to the volume server - uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, "", "", nil, auth) + uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth) if uploadErr != nil { return nil, uploadErr } @@ -121,6 +125,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r Mtime: time.Now().UnixNano(), ETag: uploadResult.ETag, CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, }, ) @@ -154,6 +159,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r Replication: replication, Collection: collection, TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), + Mime: contentType, }, Chunks: fileChunks, } diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go index bd2b52fb3..3aebc4b61 100644 --- a/weed/server/filer_server_handlers_write_cipher.go +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -1,7 +1,6 @@ package weed_server import ( - "bytes" "context" "fmt" "net/http" @@ -28,7 +27,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) - // Note: gzip(cipher(data)), cipher data first, then gzip + // Note: encrypt(gzip(data)), encrypt data first, then gzip sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024 @@ -41,7 +40,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht pu.MimeType = http.DetectContentType(uncompressedData) } - uploadResult, uploadError := operation.Upload(urlLocation, pu.FileName, true, bytes.NewReader(uncompressedData), false, pu.MimeType, pu.PairMap, auth) + uploadResult, uploadError := operation.UploadData(urlLocation, pu.FileName, true, uncompressedData, false, pu.MimeType, pu.PairMap, auth) if uploadError != nil { return nil, fmt.Errorf("upload to volume server: %v", uploadError) } @@ -53,11 +52,14 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht Offset: 0, Size: uint64(uploadResult.Size), Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, + ETag: uploadResult.Md5, CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, }, } + fmt.Printf("uploaded: %+v\n", uploadResult) + path := r.URL.Path if strings.HasSuffix(path, "/") { if pu.FileName != "" { diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 9451fdf99..1fb0912c5 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -1,7 +1,6 @@ package weed_server import ( - "bytes" "context" "fmt" "io" @@ -418,8 +417,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { } fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, f.name, f.fs.option.Cipher, bufReader, false, "", nil, auth) + uploadResult, err := operation.UploadData(fileUrl, f.name, f.fs.option.Cipher, buf, false, "", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err) return 0, fmt.Errorf("upload data: %v", err) @@ -436,6 +434,7 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { Mtime: time.Now().UnixNano(), ETag: uploadResult.ETag, CipherKey: uploadResult.CipherKey, + IsGzipped: uploadResult.Gzip > 0, } f.entry.Chunks = append(f.entry.Chunks, chunk) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index 332ec4d65..4acc71be5 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -1,7 +1,6 @@ package topology import ( - "bytes" "encoding/json" "errors" "fmt" @@ -73,7 +72,7 @@ func ReplicatedWrite(masterNode string, s *storage.Store, } // volume server do not know about encryption - _, err := operation.Upload(u.String(), string(n.Name), false, bytes.NewReader(n.Data), n.IsGzipped(), string(n.Mime), pairMap, jwt) + _, err := operation.UploadData(u.String(), string(n.Name), false, n.Data, n.IsGzipped(), string(n.Mime), pairMap, jwt) return err }); err != nil { size = 0 diff --git a/weed/util/http_util.go b/weed/util/http_util.go index 833db910c..750516b92 100644 --- a/weed/util/http_util.go +++ b/weed/util/http_util.go @@ -189,11 +189,11 @@ func NormalizeUrl(url string) string { return "http://" + url } -func ReadUrl(fileUrl string, cipherKey []byte, isFullChunk bool, offset int64, size int, buf []byte) (int64, error) { +func ReadUrl(fileUrl string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int, buf []byte) (int64, error) { if cipherKey != nil { var n int - err := readEncryptedUrl(fileUrl, cipherKey, offset, size, func(data []byte) { + err := readEncryptedUrl(fileUrl, cipherKey, isGzipped, offset, size, func(data []byte) { n = copy(buf, data) }) return int64(n), err @@ -258,10 +258,10 @@ func ReadUrl(fileUrl string, cipherKey []byte, isFullChunk bool, offset int64, s return n, err } -func ReadUrlAsStream(fileUrl string, cipherKey []byte, isFullChunk bool, offset int64, size int, fn func(data []byte)) error { +func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) error { if cipherKey != nil { - return readEncryptedUrl(fileUrl, cipherKey, offset, size, fn) + return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, offset, size, fn) } req, err := http.NewRequest("GET", fileUrl, nil) @@ -300,7 +300,7 @@ func ReadUrlAsStream(fileUrl string, cipherKey []byte, isFullChunk bool, offset } -func readEncryptedUrl(fileUrl string, cipherKey []byte, offset int64, size int, fn func(data []byte)) error { +func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentGzipped bool, offset int64, size int, fn func(data []byte)) error { encryptedData, err := Get(fileUrl) if err != nil { return fmt.Errorf("fetch %s: %v", fileUrl, err) @@ -309,6 +309,12 @@ func readEncryptedUrl(fileUrl string, cipherKey []byte, offset int64, size int, if err != nil { return fmt.Errorf("decrypt %s: %v", fileUrl, err) } + if isContentGzipped { + decryptedData, err = UnGzipData(decryptedData) + if err != nil { + return fmt.Errorf("unzip decrypt %s: %v", fileUrl, err) + } + } if len(decryptedData) < int(offset)+size { return fmt.Errorf("read decrypted %s size %d [%d, %d)", fileUrl, len(decryptedData), offset, int(offset)+size) } From 3934c69757757fd0067f4db0fb99735c1d95fb67 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Mar 2020 21:54:47 -0700 Subject: [PATCH 0212/2432] clean up --- weed/operation/upload_content.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index a83317975..52f8f9e2b 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -158,8 +158,6 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, } } - println("data size", clearDataLen) - if cipher { // encrypt(gzip(data)) @@ -170,12 +168,10 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, err = fmt.Errorf("encrypt input: %v", encryptionErr) return } - println("encrypted data size", len(encryptedData)) // upload data uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { - n, err := w.Write(encryptedData) - println("writtern data size", n) + _, err = w.Write(encryptedData) return }, "", false, "", nil, jwt) if uploadResult != nil { @@ -187,8 +183,7 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, } else { // upload data uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { - n, err := w.Write(data) - println("written data size", n) + _, err = w.Write(data) return }, filename, contentIsGzipped, mtype, pairMap, jwt) } From c32f95c3802df38857c7f4acf99b51cdb267134d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 8 Mar 2020 22:02:34 -0700 Subject: [PATCH 0213/2432] 1.59 --- k8s/seaweedfs/Chart.yaml | 2 +- k8s/seaweedfs/values.yaml | 2 +- weed/util/constants.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index d0c650c7c..ccf621c96 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: 1.58 +version: 1.59 diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index b65f7376b..841551496 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - imageTag: "1.58" + imageTag: "1.59" imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/weed/util/constants.go b/weed/util/constants.go index 11b6e0382..05f2f0ec8 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 58) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 59) ) From 8a899992f2d3f1b248f91065f3d5c8db3e10f325 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 9 Mar 2020 00:16:10 -0700 Subject: [PATCH 0214/2432] filer: fix ttl parsing fix https://github.com/chrislusf/seaweedfs/issues/1225 --- weed/server/filer_server_handlers_write.go | 27 ++++++++++++------- .../filer_server_handlers_write_autochunk.go | 11 ++++---- .../filer_server_handlers_write_cipher.go | 7 +++-- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 997e64d07..5cd174b17 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -23,6 +23,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -39,7 +40,7 @@ type FilerPostResult struct { Url string `json:"url,omitempty"` } -func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, auth security.EncodedJwt, err error) { +func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection, dataCenter, ttlString string) (fileId, urlLocation string, auth security.EncodedJwt, err error) { stats.FilerRequestCounter.WithLabelValues("assign").Inc() start := time.Now() @@ -49,7 +50,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, Count: 1, Replication: replication, Collection: collection, - Ttl: r.URL.Query().Get("ttl"), + Ttl: ttlString, DataCenter: dataCenter, } var altRequest *operation.VolumeAssignRequest @@ -58,7 +59,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, Count: 1, Replication: replication, Collection: collection, - Ttl: r.URL.Query().Get("ttl"), + Ttl: ttlString, DataCenter: "", } } @@ -86,13 +87,21 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { if dataCenter == "" { dataCenter = fs.option.DataCenter } + ttlString := r.URL.Query().Get("ttl") - if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter); autoChunked { + // read ttl in seconds + ttl, err := needle.ReadTTL(ttlString) + ttlSeconds := int32(0) + if err == nil { + ttlSeconds = int32(ttl.Minutes()) * 60 + } + + if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString); autoChunked { return } if fs.option.Cipher { - reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter) + reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) } else if reply != nil { @@ -102,7 +111,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { return } - fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString) if err != nil || fileId == "" || urlLocation == "" { glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) @@ -118,7 +127,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { return } - if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, fileId); err != nil { + if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, fileId, ttlSeconds); err != nil { return } @@ -136,7 +145,7 @@ func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { // update metadata in filer store func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, - replication string, collection string, ret *operation.UploadResult, fileId string) (err error) { + replication string, collection string, ret *operation.UploadResult, fileId string, ttlSeconds int32) (err error) { stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc() start := time.Now() @@ -175,7 +184,7 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w Gid: OS_GID, Replication: replication, Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), + TtlSec: ttlSeconds, Mime: ret.Mime, }, Chunks: []*filer_pb.FileChunk{{ diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index bce3c78bc..a2672b836 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -16,11 +16,10 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, - replication string, collection string, dataCenter string) bool { + replication string, collection string, dataCenter string, ttlSec int32, ttlString string) bool { if r.Method != "POST" { glog.V(4).Infoln("AutoChunking not supported for method", r.Method) return false @@ -56,7 +55,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * return false } - reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter) + reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter, ttlSec, ttlString) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) } else if reply != nil { @@ -66,7 +65,7 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * } func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, - contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { + contentLength int64, chunkSize int32, replication string, collection string, dataCenter string, ttlSec int32, ttlString string) (filerResult *FilerPostResult, replyerr error) { stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc() start := time.Now() @@ -100,7 +99,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r limitedReader := io.LimitReader(part1, int64(chunkSize)) // assign one file id for one chunk - fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString) if assignErr != nil { return nil, assignErr } @@ -158,7 +157,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r Gid: OS_GID, Replication: replication, Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), + TtlSec: ttlSec, Mime: contentType, }, Chunks: fileChunks, diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go index 3aebc4b61..06670399c 100644 --- a/weed/server/filer_server_handlers_write_cipher.go +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -12,14 +12,13 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" - "github.com/chrislusf/seaweedfs/weed/util" ) // handling single chunk POST or PUT upload func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, - replication string, collection string, dataCenter string) (filerResult *FilerPostResult, err error) { + replication string, collection string, dataCenter string, ttlSeconds int32, ttlString string) (filerResult *FilerPostResult, err error) { - fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString) if err != nil || fileId == "" || urlLocation == "" { return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) @@ -77,7 +76,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht Gid: OS_GID, Replication: replication, Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), + TtlSec: ttlSeconds, Mime: pu.MimeType, }, Chunks: fileChunks, From 89eb05b50f10b6ca74a374e5435df2f72019f635 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 9 Mar 2020 01:02:01 -0700 Subject: [PATCH 0215/2432] filer: support TTL for all filer stores --- weed/command/filer_copy.go | 17 ++++++++--- weed/filer2/filer.go | 28 +++++++++++++++++-- weed/filer2/filer_buckets.go | 2 +- weed/filer2/filer_delete_entry.go | 2 +- weed/filer2/leveldb/leveldb_store_test.go | 6 ++-- weed/filer2/leveldb2/leveldb2_store_test.go | 6 ++-- weed/server/filer_grpc_server.go | 4 +-- weed/server/filer_grpc_server_rename.go | 2 +- weed/server/filer_server_handlers_read_dir.go | 4 +-- weed/storage/volume.go | 4 +-- 10 files changed, 53 insertions(+), 22 deletions(-) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 1162bb204..0aee8cd80 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -20,6 +20,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" ) @@ -41,6 +42,7 @@ type CopyOptions struct { grpcDialOption grpc.DialOption masters []string cipher bool + ttlSec int32 } func init() { @@ -124,6 +126,13 @@ func runCopy(cmd *Command, args []string) bool { copy.masters = masters copy.cipher = cipher + ttl, err := needle.ReadTTL(*copy.ttl) + if err != nil { + fmt.Printf("parsing ttl %s: %v\n", *copy.ttl, err) + return false + } + copy.ttlSec = int32(ttl.Minutes()) * 60 + if *cmdCopy.IsDebug { util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") } @@ -286,7 +295,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err Count: 1, Replication: *worker.options.replication, Collection: *worker.options.collection, - TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + TtlSec: worker.options.ttlSec, ParentPath: task.destinationUrlPath, } @@ -342,7 +351,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err Mime: mimeType, Replication: *worker.options.replication, Collection: *worker.options.collection, - TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + TtlSec: worker.options.ttlSec, }, Chunks: chunks, }, @@ -388,7 +397,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, Count: 1, Replication: *worker.options.replication, Collection: *worker.options.collection, - TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + TtlSec: worker.options.ttlSec, ParentPath: task.destinationUrlPath, } @@ -469,7 +478,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, Mime: mimeType, Replication: replication, Collection: collection, - TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + TtlSec: worker.options.ttlSec, }, Chunks: chunks, }, diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 0b6a5c96e..c3048b45d 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -223,14 +223,36 @@ func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err er }, }, nil } - return f.store.FindEntry(ctx, p) + entry, err = f.store.FindEntry(ctx, p) + if entry != nil && entry.TtlSec > 0 { + if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + f.store.DeleteEntry(ctx, p.Child(entry.Name())) + return nil, filer_pb.ErrNotFound + } + } + return + } -func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { +func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, err error) { if strings.HasSuffix(string(p), "/") && len(p) > 1 { p = p[0 : len(p)-1] } - return f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) + listedEntries, listErr := f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) + if listErr != nil { + return listedEntries, expiredCount, err + } + for _, entry := range listedEntries { + if entry.TtlSec > 0 { + if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + f.store.DeleteEntry(ctx, p.Child(entry.Name())) + expiredCount++ + continue + } + } + entries = append(entries, entry) + } + return } func (f *Filer) cacheDelDirectory(dirpath string) { diff --git a/weed/filer2/filer_buckets.go b/weed/filer2/filer_buckets.go index 601b7dbf3..cb65fea14 100644 --- a/weed/filer2/filer_buckets.go +++ b/weed/filer2/filer_buckets.go @@ -28,7 +28,7 @@ func (f *Filer) LoadBuckets(dirBucketsPath string) { limit := math.MaxInt32 - entries, err := f.ListDirectoryEntries(context.Background(), FullPath(dirBucketsPath), "", false, limit) + entries, _, err := f.ListDirectoryEntries(context.Background(), FullPath(dirBucketsPath), "", false, limit) if err != nil { glog.V(1).Infof("no buckets found: %v", err) diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go index d0792ac66..b7ec805c5 100644 --- a/weed/filer2/filer_delete_entry.go +++ b/weed/filer2/filer_delete_entry.go @@ -57,7 +57,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry lastFileName := "" includeLastFile := false for { - entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize) + entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize) if err != nil { glog.Errorf("list folder %s: %v", entry.FullPath, err) return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 497158420..dcb99a3bd 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -48,14 +48,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) { ctx := context.Background() // checking one upper directory - entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go index dc94f2ac7..c1f2d6a0c 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -48,14 +48,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) { ctx := context.Background() // checking one upper directory - entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index b904c1393..488967ec2 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -53,7 +53,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file lastFileName := req.StartFromFileName includeLastFile := req.InclusiveStartFrom for limit > 0 { - entries, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) + entries, expiredCount, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) if err != nil { return err @@ -92,7 +92,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file } } - if len(entries) < paginationLimit { + if len(entries)+expiredCount < paginationLimit { break } diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index 0669a26f1..3b2655585 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -68,7 +68,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer includeLastFile := false for { - entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024) + entries, _, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024) if err != nil { return err } diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index 87e864559..13f60eefe 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -32,7 +32,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName := r.FormValue("lastFileName") - entries, err := fs.filer.ListDirectoryEntries(context.Background(), filer2.FullPath(path), lastFileName, false, limit) + entries, expiredCount, err := fs.filer.ListDirectoryEntries(context.Background(), filer2.FullPath(path), lastFileName, false, limit) if err != nil { glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) @@ -40,7 +40,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque return } - shouldDisplayLoadMore := len(entries) == limit + shouldDisplayLoadMore := len(entries)+expiredCount == limit if path == "/" { path = "" } diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 88a5db4c5..7da83de7a 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -180,9 +180,9 @@ func (v *Volume) expired(volumeSizeLimit uint64) bool { if v.Ttl == nil || v.Ttl.Minutes() == 0 { return false } - glog.V(1).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds) + glog.V(2).Infof("now:%v lastModified:%v", time.Now().Unix(), v.lastModifiedTsSeconds) livedMinutes := (time.Now().Unix() - int64(v.lastModifiedTsSeconds)) / 60 - glog.V(1).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes) + glog.V(2).Infof("ttl:%v lived:%v", v.Ttl, livedMinutes) if int64(v.Ttl.Minutes()) < livedMinutes { return true } From 14ae33d642e93b79a3f12d84f140deed8ebbf1b1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 9 Mar 2020 10:36:03 -0700 Subject: [PATCH 0216/2432] adjust logging --- weed/topology/store_replicate.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index 4acc71be5..8c4996d45 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -158,8 +158,8 @@ func getWritableRemoteReplications(s *storage.Store, volumeId needle.VolumeId, m if copyCount > 1 { if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil { if len(lookupResult.Locations) < copyCount { - err = fmt.Errorf("replicating opetations [%d] is less than volume's replication copy count [%d]", - len(lookupResult.Locations), copyCount) + err = fmt.Errorf("replicating opetations [%d] is less than volume %d replication copy count [%d]", + len(lookupResult.Locations), volumeId, copyCount) return } selfUrl := s.Ip + ":" + strconv.Itoa(s.Port) From 1f8fc4b5edfeb9838345de146beb1ec033231dd3 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 9 Mar 2020 20:45:28 -0700 Subject: [PATCH 0217/2432] vacuum benchmarking --- .../repeated_vacuum/repeated_vacuum.go | 43 ++++++++++++------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index c2f626f72..96d4ccdf6 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -6,14 +6,17 @@ import ( "log" "math/rand" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) var ( - master = flag.String("master", "127.0.0.1:9333", "the master server") - repeat = flag.Int("n", 5, "repeat how many times") + master = flag.String("master", "127.0.0.1:9333", "the master server") + repeat = flag.Int("n", 5, "repeat how many times") + garbageThreshold = flag.Float64("garbageThreshold", 0.3, "garbageThreshold") ) func main() { @@ -22,26 +25,36 @@ func main() { util.LoadConfiguration("security", false) grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + genFile(grpcDialOption, 0) + for i := 0; i < *repeat; i++ { - assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1}) - if err != nil { - log.Fatalf("assign: %v", err) - } + // create 2 files, and delete one of them - data := make([]byte, 1024) - rand.Read(data) + assignResult, targetUrl := genFile(grpcDialOption, i) - targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + util.Delete(targetUrl, string(assignResult.Auth)) - _, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth) - if err != nil { - log.Fatalf("upload: %v", err) - } + println("vacuum", i, "threshold", *garbageThreshold) + util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold)) - util.Delete(targetUrl, string(assignResult.Auth)) + } - util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master)) +} +func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) { + assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1}) + if err != nil { + log.Fatalf("assign: %v", err) } + data := make([]byte, 1024) + rand.Read(data) + + targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + + _, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth) + if err != nil { + log.Fatalf("upload: %v", err) + } + return assignResult, targetUrl } From 0871d2cff091e43190065bd17dba624cf6da1d6d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 9 Mar 2020 22:29:02 -0700 Subject: [PATCH 0218/2432] volume: fix memory leak during compaction fix https://github.com/chrislusf/seaweedfs/issues/1222 --- weed/command/export.go | 2 ++ weed/command/fix.go | 1 + weed/server/master_server_handlers_admin.go | 2 +- weed/storage/erasure_coding/ec_encoder.go | 7 +++++-- weed/storage/erasure_coding/ec_test.go | 4 +++- weed/storage/needle_map/memdb.go | 4 ++++ weed/storage/needle_map/memdb_test.go | 23 +++++++++++++++++++++ 7 files changed, 39 insertions(+), 4 deletions(-) create mode 100644 weed/storage/needle_map/memdb_test.go diff --git a/weed/command/export.go b/weed/command/export.go index 8d664ad3b..8c32b3f4d 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -195,6 +195,8 @@ func runExport(cmd *Command, args []string) bool { vid := needle.VolumeId(*export.volumeId) needleMap := needle_map.NewMemDb() + defer needleMap.Close() + if err := needleMap.LoadFromIdx(path.Join(*export.dir, fileName+".idx")); err != nil { glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err) } diff --git a/weed/command/fix.go b/weed/command/fix.go index 8903595fa..90d1c4893 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -70,6 +70,7 @@ func runFix(cmd *Command, args []string) bool { indexFileName := path.Join(*fixVolumePath, baseFileName+".idx") nm := needle_map.NewMemDb() + defer nm.Close() vid := needle.VolumeId(*fixVolumeId) scanner := &VolumeFileScanner4Fix{ diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index 2965a4863..5d0986f97 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -61,7 +61,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque return } } - glog.Infoln("garbageThreshold =", gcThreshold) + // glog.Infoln("garbageThreshold =", gcThreshold) ms.Topo.Vacuum(ms.grpcDialOption, gcThreshold, ms.preallocateSize) ms.dirStatusHandler(w, r) } diff --git a/weed/storage/erasure_coding/ec_encoder.go b/weed/storage/erasure_coding/ec_encoder.go index 9e2edf57d..97c3ccbd9 100644 --- a/weed/storage/erasure_coding/ec_encoder.go +++ b/weed/storage/erasure_coding/ec_encoder.go @@ -27,6 +27,9 @@ const ( func WriteSortedFileFromIdx(baseFileName string, ext string) (e error) { nm, err := readNeedleMap(baseFileName) + if nm != nil { + defer nm.Close() + } if err != nil { return fmt.Errorf("readNeedleMap: %v", err) } @@ -196,7 +199,7 @@ func encodeDatFile(remainingSize int64, err error, baseFileName string, bufferSi } buffers := make([][]byte, TotalShardsCount) - for i, _ := range buffers { + for i := range buffers { buffers[i] = make([]byte, bufferSize) } @@ -233,7 +236,7 @@ func rebuildEcFiles(shardHasData []bool, inputFiles []*os.File, outputFiles []*o } buffers := make([][]byte, TotalShardsCount) - for i, _ := range buffers { + for i := range buffers { if shardHasData[i] { buffers[i] = make([]byte, ErasureCodingSmallBlockSize) } diff --git a/weed/storage/erasure_coding/ec_test.go b/weed/storage/erasure_coding/ec_test.go index b2c94cfd7..92b83cdc8 100644 --- a/weed/storage/erasure_coding/ec_test.go +++ b/weed/storage/erasure_coding/ec_test.go @@ -7,9 +7,10 @@ import ( "os" "testing" + "github.com/klauspost/reedsolomon" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/klauspost/reedsolomon" ) const ( @@ -42,6 +43,7 @@ func TestEncodingDecoding(t *testing.T) { func validateFiles(baseFileName string) error { nm, err := readNeedleMap(baseFileName) + defer nm.Close() if err != nil { return fmt.Errorf("readNeedleMap: %v", err) } diff --git a/weed/storage/needle_map/memdb.go b/weed/storage/needle_map/memdb.go index 9eb4d9f56..a52d52a10 100644 --- a/weed/storage/needle_map/memdb.go +++ b/weed/storage/needle_map/memdb.go @@ -113,3 +113,7 @@ func (cm *MemDb) LoadFromIdx(idxName string) (ret error) { }) } + +func (cm *MemDb) Close() { + cm.db.Close() +} diff --git a/weed/storage/needle_map/memdb_test.go b/weed/storage/needle_map/memdb_test.go new file mode 100644 index 000000000..7b45d23f8 --- /dev/null +++ b/weed/storage/needle_map/memdb_test.go @@ -0,0 +1,23 @@ +package needle_map + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/storage/types" +) + +func BenchmarkMemDb(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + nm := NewMemDb() + + nid := types.NeedleId(345) + offset := types.Offset{ + OffsetHigher: types.OffsetHigher{}, + OffsetLower: types.OffsetLower{}, + } + nm.Set(nid, offset, 324) + nm.Close() + } + +} From e73b0c24578f15900d88427c5c770056651c369d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 9 Mar 2020 22:31:14 -0700 Subject: [PATCH 0219/2432] remove not ready status --- weed/command/filer.go | 2 +- weed/command/server.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/command/filer.go b/weed/command/filer.go index b027686b5..fb1ee2b0f 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -52,7 +52,7 @@ func init() { f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") - f.cipher = cmdFiler.Flag.Bool("work_in_progress_encryptVolumeData", false, " encrypt data on volume servers") + f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") } var cmdFiler = &Command{ diff --git a/weed/command/server.go b/weed/command/server.go index ea2844db9..560b90037 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -81,7 +81,7 @@ func init() { filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") - filerOptions.cipher = cmdServer.Flag.Bool("filer.work_in_progress_encryptVolumeData", false, " encrypt data on volume servers") + filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") From e6de42f88806f8b845e6f295fc1cde9fb1a7f5fc Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 9 Mar 2020 22:32:40 -0700 Subject: [PATCH 0220/2432] 1.60 --- k8s/seaweedfs/Chart.yaml | 2 +- k8s/seaweedfs/values.yaml | 2 +- weed/util/constants.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index ccf621c96..550d8bbf2 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: 1.59 +version: 1.60 diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index 841551496..ddac43f0b 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - imageTag: "1.59" + imageTag: "1.60" imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/weed/util/constants.go b/weed/util/constants.go index 05f2f0ec8..89b76f1e5 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 59) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 60) ) From d214cefc2e9ad7c6a4c60cbbd934f164aa91663e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 9 Mar 2020 23:28:01 -0700 Subject: [PATCH 0221/2432] filer: list diretory pagination make up for the expired entries --- weed/filer2/filer.go | 19 +++++++++++++++++-- weed/filer2/filer_buckets.go | 2 +- weed/filer2/filer_delete_entry.go | 2 +- weed/filer2/leveldb/leveldb_store_test.go | 6 +++--- weed/filer2/leveldb2/leveldb2_store_test.go | 6 +++--- weed/server/filer_grpc_server.go | 4 ++-- weed/server/filer_grpc_server_rename.go | 2 +- weed/server/filer_server_handlers_read_dir.go | 4 ++-- 8 files changed, 30 insertions(+), 15 deletions(-) diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index c3048b45d..d3343f610 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -234,15 +234,30 @@ func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err er } -func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, err error) { +func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { if strings.HasSuffix(string(p), "/") && len(p) > 1 { p = p[0 : len(p)-1] } + + var makeupEntries []*Entry + entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit) + for expiredCount > 0 && err == nil { + makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount) + if err == nil { + entries = append(entries, makeupEntries...) + } + } + + return entries, err +} + +func (f *Filer) doListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) { listedEntries, listErr := f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) if listErr != nil { - return listedEntries, expiredCount, err + return listedEntries, expiredCount, "", listErr } for _, entry := range listedEntries { + lastFileName = entry.Name() if entry.TtlSec > 0 { if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { f.store.DeleteEntry(ctx, p.Child(entry.Name())) diff --git a/weed/filer2/filer_buckets.go b/weed/filer2/filer_buckets.go index cb65fea14..601b7dbf3 100644 --- a/weed/filer2/filer_buckets.go +++ b/weed/filer2/filer_buckets.go @@ -28,7 +28,7 @@ func (f *Filer) LoadBuckets(dirBucketsPath string) { limit := math.MaxInt32 - entries, _, err := f.ListDirectoryEntries(context.Background(), FullPath(dirBucketsPath), "", false, limit) + entries, err := f.ListDirectoryEntries(context.Background(), FullPath(dirBucketsPath), "", false, limit) if err != nil { glog.V(1).Infof("no buckets found: %v", err) diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go index b7ec805c5..d0792ac66 100644 --- a/weed/filer2/filer_delete_entry.go +++ b/weed/filer2/filer_delete_entry.go @@ -57,7 +57,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry lastFileName := "" includeLastFile := false for { - entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize) + entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize) if err != nil { glog.Errorf("list folder %s: %v", entry.FullPath, err) return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index dcb99a3bd..497158420 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -48,14 +48,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) { ctx := context.Background() // checking one upper directory - entries, _, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go index c1f2d6a0c..dc94f2ac7 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -48,14 +48,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) { ctx := context.Background() // checking one upper directory - entries, _, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 488967ec2..b904c1393 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -53,7 +53,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file lastFileName := req.StartFromFileName includeLastFile := req.InclusiveStartFrom for limit > 0 { - entries, expiredCount, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) + entries, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) if err != nil { return err @@ -92,7 +92,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file } } - if len(entries)+expiredCount < paginationLimit { + if len(entries) < paginationLimit { break } diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index 3b2655585..0669a26f1 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -68,7 +68,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer includeLastFile := false for { - entries, _, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024) + entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024) if err != nil { return err } diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index 13f60eefe..87e864559 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -32,7 +32,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName := r.FormValue("lastFileName") - entries, expiredCount, err := fs.filer.ListDirectoryEntries(context.Background(), filer2.FullPath(path), lastFileName, false, limit) + entries, err := fs.filer.ListDirectoryEntries(context.Background(), filer2.FullPath(path), lastFileName, false, limit) if err != nil { glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) @@ -40,7 +40,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque return } - shouldDisplayLoadMore := len(entries)+expiredCount == limit + shouldDisplayLoadMore := len(entries) == limit if path == "/" { path = "" } From 43e626157956db8687ec5001171a5b2e1ffaae08 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Mar 2020 13:31:23 -0700 Subject: [PATCH 0222/2432] volume: fix memory leak --- weed/storage/volume_vacuum.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 185484477..c8be1d9f5 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -342,6 +342,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca defer dst.Close() nm := needle_map.NewMemDb() + defer nm.Close() scanner := &VolumeFileScanner4Vacuum{ v: v, @@ -370,7 +371,9 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str defer dstDatBackend.Close() oldNm := needle_map.NewMemDb() + defer oldNm.Close() newNm := needle_map.NewMemDb() + defer newNm.Close() if err = oldNm.LoadFromIdx(srcIdxName); err != nil { return } From 88a110e67ed413913a22fbe4e4b965590b56e0b1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Mar 2020 13:32:22 -0700 Subject: [PATCH 0223/2432] release file handle --- weed/storage/volume_vacuum.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index c8be1d9f5..5d712c7ff 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -381,6 +381,7 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str return err } srcDatBackend = backend.NewDiskFile(dataFile) + defer srcDatBackend.Close() now := uint64(time.Now().Unix()) From d3e1ef64b3db95ed435b9c19bca13378c46383b7 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Mar 2020 13:33:26 -0700 Subject: [PATCH 0224/2432] ensure error checking on windows --- weed/storage/volume_vacuum.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 5d712c7ff..5d0d63877 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -113,8 +113,14 @@ func (v *Volume) CommitCompact() error { } } else { if runtime.GOOS == "windows" { - os.RemoveAll(v.FileName() + ".dat") - os.RemoveAll(v.FileName() + ".idx") + e = os.RemoveAll(v.FileName() + ".dat") + if e != nil { + return e + } + e = os.RemoveAll(v.FileName() + ".idx") + if e != nil { + return e + } } var e error if e = os.Rename(v.FileName()+".cpd", v.FileName()+".dat"); e != nil { @@ -414,7 +420,7 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str return fmt.Errorf("cannot append needle: %s", err) } newOffset += n.DiskSize(version) - glog.V(3).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) + glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) return nil }) From ead756981b0b318aa84124df3e7110f82dd6747e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Mar 2020 13:35:05 -0700 Subject: [PATCH 0225/2432] adjust go mod --- go.mod | 1 - 1 file changed, 1 deletion(-) diff --git a/go.mod b/go.mod index 48879fd8c..78803f70e 100644 --- a/go.mod +++ b/go.mod @@ -69,7 +69,6 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.4.0 github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect - github.com/stretchr/testify v1.4.0 github.com/syndtr/goleveldb v1.0.0 github.com/tidwall/gjson v1.3.2 github.com/tidwall/match v1.0.1 From 7f3ce7b6d63a3bae0ac275c27a5cf959f80c8902 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Mar 2020 13:36:45 -0700 Subject: [PATCH 0226/2432] adjust tikv build env --- weed/filer2/tikv/tikv_store.go | 1 + weed/filer2/tikv/tikv_store_unsupported.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/weed/filer2/tikv/tikv_store.go b/weed/filer2/tikv/tikv_store.go index accd4f169..2a9dd6648 100644 --- a/weed/filer2/tikv/tikv_store.go +++ b/weed/filer2/tikv/tikv_store.go @@ -1,5 +1,6 @@ // +build !386 // +build !arm +// +build !windows package tikv diff --git a/weed/filer2/tikv/tikv_store_unsupported.go b/weed/filer2/tikv/tikv_store_unsupported.go index daf29612e..713c84bf8 100644 --- a/weed/filer2/tikv/tikv_store_unsupported.go +++ b/weed/filer2/tikv/tikv_store_unsupported.go @@ -1,4 +1,4 @@ -// +build 386 arm +// +build 386 arm windows package tikv From 60f5f05c78a2918d5219c925cea5847759281a2c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 10 Mar 2020 13:52:49 -0700 Subject: [PATCH 0227/2432] 1.61 --- k8s/seaweedfs/Chart.yaml | 2 +- k8s/seaweedfs/values.yaml | 2 +- weed/util/constants.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index 550d8bbf2..136d91e20 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: 1.60 +version: 1.61 diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index ddac43f0b..d3e030a08 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - imageTag: "1.60" + imageTag: "1.61" imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/weed/util/constants.go b/weed/util/constants.go index 89b76f1e5..c23bc11f6 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 60) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 61) ) From 81610ed0067958537b67f23b0d086ce3c47ebc54 Mon Sep 17 00:00:00 2001 From: HongyanShen <763987993@qq.com> Date: Wed, 11 Mar 2020 14:37:14 +0800 Subject: [PATCH 0228/2432] fix: #1226 --- weed/replication/sink/filersink/fetch_write.go | 11 +++++++---- weed/replication/sink/s3sink/s3_sink.go | 10 +++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 07218b9b3..360a34620 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -19,17 +19,20 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir str if len(sourceChunks) == 0 { return } + + replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks)) + var wg sync.WaitGroup - for _, sourceChunk := range sourceChunks { + for chunkIndex, sourceChunk := range sourceChunks { wg.Add(1) - go func(chunk *filer_pb.FileChunk) { + go func(chunk *filer_pb.FileChunk, index int) { defer wg.Done() replicatedChunk, e := fs.replicateOneChunk(chunk, dir) if e != nil { err = e } - replicatedChunks = append(replicatedChunks, replicatedChunk) - }(sourceChunk) + replicatedChunks[index] = replicatedChunk + }(sourceChunk, chunkIndex) } wg.Wait() diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 5f548559b..e0aee5ada 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -91,7 +91,6 @@ func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks b } func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { - key = cleanKey(key) if entry.IsDirectory { @@ -106,19 +105,20 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { totalSize := filer2.TotalSize(entry.Chunks) chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - var parts []*s3.CompletedPart + parts := make([]*s3.CompletedPart, len(chunkViews)) + var wg sync.WaitGroup for chunkIndex, chunk := range chunkViews { partId := chunkIndex + 1 wg.Add(1) - go func(chunk *filer2.ChunkView) { + go func(chunk *filer2.ChunkView, index int) { defer wg.Done() if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { err = uploadErr } else { - parts = append(parts, part) + parts[index] = part } - }(chunk) + }(chunk, chunkIndex) } wg.Wait() From ebc739afb5b312557ab9e349ea18cae591aaab9f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 11 Mar 2020 01:13:40 -0700 Subject: [PATCH 0229/2432] filer store postgres: skip empty parameters better fix https://github.com/chrislusf/seaweedfs/issues/1227 --- weed/filer2/postgres/postgres_store.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go index 27a0c2513..2e5f892f1 100644 --- a/weed/filer2/postgres/postgres_store.go +++ b/weed/filer2/postgres/postgres_store.go @@ -11,7 +11,7 @@ import ( ) const ( - CONNECTION_URL_PATTERN = "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30" + CONNECTION_URL_PATTERN = "host=%s port=%d user=%s sslmode=%s connect_timeout=30" ) func init() { @@ -49,7 +49,13 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" - sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, password, database, sslmode) + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, sslmode) + if password != "" { + sqlUrl += " password="+password + } + if database != "" { + sqlUrl += " dbname="+database + } var dbErr error store.DB, dbErr = sql.Open("postgres", sqlUrl) if dbErr != nil { From 4237a813ccc92011d740c31c22e5bc29b7d3316e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 11 Mar 2020 01:13:50 -0700 Subject: [PATCH 0230/2432] skip printouts --- weed/server/filer_server_handlers_write_autochunk.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index a2672b836..666004c33 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -2,7 +2,6 @@ package weed_server import ( "context" - "fmt" "io" "net/http" "path" @@ -89,8 +88,6 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r } contentType := part1.Header.Get("Content-Type") - fmt.Printf("autochunk part header: %+v\n", part1.Header) - var fileChunks []*filer_pb.FileChunk chunkOffset := int64(0) From d439d837726f5fdb53e686275a29eaa02d66ad62 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 11 Mar 2020 10:32:17 -0700 Subject: [PATCH 0231/2432] volume: follow compactionBytePerSecond related to https://github.com/chrislusf/seaweedfs/issues/1108 --- weed/command/backup.go | 2 +- weed/command/compact.go | 2 +- weed/storage/store_vacuum.go | 3 +-- weed/storage/volume_vacuum.go | 12 ++++++++---- weed/storage/volume_vacuum_test.go | 2 +- 5 files changed, 12 insertions(+), 9 deletions(-) diff --git a/weed/command/backup.go b/weed/command/backup.go index eb2b5ba4a..615be80cf 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -119,7 +119,7 @@ func runBackup(cmd *Command, args []string) bool { } if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) { - if err = v.Compact2(30 * 1024 * 1024 * 1024); err != nil { + if err = v.Compact2(30*1024*1024*1024, 0); err != nil { fmt.Printf("Compact Volume before synchronizing %v\n", err) return true } diff --git a/weed/command/compact.go b/weed/command/compact.go index 85313b749..4e28aa725 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -50,7 +50,7 @@ func runCompact(cmd *Command, args []string) bool { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } else { - if err = v.Compact2(preallocate); err != nil { + if err = v.Compact2(preallocate, 0); err != nil { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go index e94d9b516..38159496e 100644 --- a/weed/storage/store_vacuum.go +++ b/weed/storage/store_vacuum.go @@ -16,8 +16,7 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) { } func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error { if v := s.findVolume(vid); v != nil { - return v.Compact2(preallocate) // compactionBytePerSecond - // return v.Compact(preallocate, compactionBytePerSecond) + return v.Compact2(preallocate, compactionBytePerSecond) } return fmt.Errorf("volume id %d is not found during compact", vid) } diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 5d0d63877..669d5dd6c 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -57,7 +57,7 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error } // compact a volume based on deletions in .idx files -func (v *Volume) Compact2(preallocate int64) error { +func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) error { if v.MemoryMapMaxSizeMb != 0 { //it makes no sense to compact in memory return nil @@ -73,7 +73,7 @@ func (v *Volume) Compact2(preallocate int64) error { v.lastCompactIndexOffset = v.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactionRevision glog.V(3).Infof("creating copies for volume %d ...", v.Id) - return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate) + return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate, compactionBytePerSecond) } func (v *Volume) CommitCompact() error { @@ -366,7 +366,7 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca return } -func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64) (err error) { +func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName string, sb super_block.SuperBlock, version needle.Version, preallocate int64, compactionBytePerSecond int64) (err error) { var ( srcDatBackend, dstDatBackend backend.BackendStorageFile dataFile *os.File @@ -395,6 +395,8 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str dstDatBackend.WriteAt(sb.Bytes(), 0) newOffset := int64(sb.BlockSize()) + writeThrottler := util.NewWriteThrottler(compactionBytePerSecond) + oldNm.AscendingVisit(func(value needle_map.NeedleValue) error { offset, size := value.Offset, value.Size @@ -419,7 +421,9 @@ func copyDataBasedOnIndexFile(srcDatName, srcIdxName, dstDatName, datIdxName str if _, _, _, err = n.Append(dstDatBackend, sb.Version); err != nil { return fmt.Errorf("cannot append needle: %s", err) } - newOffset += n.DiskSize(version) + delta := n.DiskSize(version) + newOffset += delta + writeThrottler.MaybeSlowdown(delta) glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", newOffset, "data_size", n.Size) return nil diff --git a/weed/storage/volume_vacuum_test.go b/weed/storage/volume_vacuum_test.go index 95f43d6ec..51f04c8b1 100644 --- a/weed/storage/volume_vacuum_test.go +++ b/weed/storage/volume_vacuum_test.go @@ -84,7 +84,7 @@ func TestCompaction(t *testing.T) { } startTime := time.Now() - v.Compact2(0) + v.Compact2(0, 0) speed := float64(v.ContentSize()) / time.Now().Sub(startTime).Seconds() t.Logf("compaction speed: %.2f bytes/s", speed) From 9797420ac8c510f8ca70ad15d03113fba4f69baa Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 11 Mar 2020 20:34:04 -0700 Subject: [PATCH 0232/2432] go mod tidy --- go.mod | 3 +++ go.sum | 10 ++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 78803f70e..cfdd6925d 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,10 @@ require ( github.com/dustin/go-humanize v1.0.0 github.com/eapache/go-resiliency v1.2.0 // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a + github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 + github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/frankban/quicktest v1.7.2 // indirect github.com/gabriel-vasile/mimetype v1.0.0 github.com/go-redis/redis v6.15.2+incompatible diff --git a/go.sum b/go.sum index d16280568..1911f50b0 100644 --- a/go.sum +++ b/go.sum @@ -126,8 +126,14 @@ github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaI github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M= github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -135,8 +141,6 @@ github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3B github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gabriel-vasile/mimetype v0.3.17 h1:NGWgggJJqTofUcTV1E7hkk2zVjZ54EfJa1z5O3z6By4= -github.com/gabriel-vasile/mimetype v0.3.17/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI= github.com/gabriel-vasile/mimetype v1.0.0 h1:0QKnAQQhG6oOsb4GK7iPlet7RtjHi9us8RF/nXoTxhI= github.com/gabriel-vasile/mimetype v1.0.0/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -510,8 +514,6 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= From e85da50ad4933da86b1098bb3e84d01f6c469e0b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 11 Mar 2020 20:59:00 -0700 Subject: [PATCH 0233/2432] remove tikv, move its support to "tikv" branch --- go.mod | 16 +- go.sum | 162 ------------- weed/command/scaffold.go | 5 - weed/filer2/tikv/tikv_store.go | 253 --------------------- weed/filer2/tikv/tikv_store_unsupported.go | 65 ------ weed/server/filer_server.go | 1 - 6 files changed, 8 insertions(+), 494 deletions(-) delete mode 100644 weed/filer2/tikv/tikv_store.go delete mode 100644 weed/filer2/tikv/tikv_store_unsupported.go diff --git a/go.mod b/go.mod index cfdd6925d..d50ff1e3e 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/Shopify/sarama v1.23.1 github.com/aws/aws-sdk-go v1.23.13 github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 + github.com/coreos/bbolt v1.3.3 // indirect github.com/coreos/etcd v3.3.15+incompatible // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect @@ -28,11 +29,13 @@ require ( github.com/go-sql-driver/mysql v1.4.1 github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect github.com/golang/protobuf v1.3.2 github.com/google/btree v1.0.0 github.com/google/uuid v1.1.1 github.com/gorilla/mux v1.7.3 github.com/gorilla/websocket v1.4.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect github.com/hashicorp/golang-lru v0.5.3 // indirect github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f @@ -51,19 +54,14 @@ require ( github.com/nats-io/nats-server/v2 v2.0.4 // indirect github.com/onsi/ginkgo v1.10.1 // indirect github.com/onsi/gomega v1.7.0 // indirect - github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/pelletier/go-toml v1.4.0 // indirect github.com/peterh/liner v1.1.0 github.com/pierrec/lz4 v2.2.7+incompatible // indirect - github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986 // indirect - github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b - github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b // indirect github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 // indirect github.com/prometheus/procfs v0.0.4 // indirect github.com/rakyll/statik v0.1.6 github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff github.com/sirupsen/logrus v1.4.2 // indirect @@ -75,23 +73,25 @@ require ( github.com/syndtr/goleveldb v1.0.0 github.com/tidwall/gjson v1.3.2 github.com/tidwall/match v1.0.1 - github.com/uber-go/atomic v1.4.0 // indirect - github.com/uber/jaeger-client-go v2.17.0+incompatible // indirect - github.com/uber/jaeger-lib v2.0.0+incompatible // indirect github.com/willf/bitset v1.1.10 // indirect github.com/willf/bloom v2.0.3+incompatible github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect + go.etcd.io/bbolt v1.3.3 // indirect go.etcd.io/etcd v3.3.15+incompatible + go.uber.org/multierr v1.2.0 // indirect gocloud.dev v0.16.0 gocloud.dev/pubsub/natspubsub v0.16.0 gocloud.dev/pubsub/rabbitpubsub v0.16.0 + golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 // indirect golang.org/x/image v0.0.0-20190829233526-b3c06291d021 // indirect golang.org/x/net v0.0.0-20190909003024-a7b16738d86b golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 google.golang.org/api v0.9.0 google.golang.org/appengine v1.6.2 // indirect + google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 // indirect google.golang.org/grpc v1.23.0 + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect diff --git a/go.sum b/go.sum index 1911f50b0..d077ba67d 100644 --- a/go.sum +++ b/go.sum @@ -40,8 +40,6 @@ github.com/Shopify/sarama v1.23.1 h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY= -github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -55,10 +53,8 @@ github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= @@ -66,52 +62,35 @@ github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI= github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92/go.mod h1:4jyiUCD5y548+yKW+oiHtccBiMaLCCbFBpK2t7X4eUo= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 h1:hxuZop6tSoOi0sxFzoGGYdRqNrPubyaIf9KoBG9tPiE= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/disintegration/imaging v1.6.1 h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE= github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= @@ -122,8 +101,6 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= @@ -145,34 +122,24 @@ github.com/gabriel-vasile/mimetype v1.0.0 h1:0QKnAQQhG6oOsb4GK7iPlet7RtjHi9us8RF github.com/gabriel-vasile/mimetype v1.0.0/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w= github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4= github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 h1:P66kRWyEoIx6URKgAC3ijx9jo9gEid7bEhLQ/Z0G65A= github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8= -github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= @@ -180,7 +147,6 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= @@ -189,7 +155,6 @@ github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -208,9 +173,7 @@ github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:x github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60= @@ -220,14 +183,8 @@ github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= @@ -238,8 +195,6 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -259,7 +214,6 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f h1:X+tnaqoCcBgAwSTJtoYW6p0qKiuPyMfofEHEFUf2kdU= github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f/go.mod h1:Ip4fOwzCrnDVuluHBd7FXIMb7SHOKfkt9/UDrYSZvqI= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM= @@ -276,8 +230,6 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU= github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w= @@ -286,7 +238,6 @@ github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHY github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v1.2.0 h1:0VuyqOCruD33/lJ/ojXNvzVyl8Zr5zdTmj9l9qLZ86I= @@ -300,7 +251,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -313,22 +263,17 @@ github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDe github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw= github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -340,9 +285,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s= -github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt v0.2.6/go.mod h1:mQxQ0uHQ9FhEVPIcTSKwx2lqZEpXWWcCgA7R6NrWvvY= github.com/nats-io/jwt v0.2.14 h1:wA50KvFz/JXGXMHRygTWsRGh/ixxgC5E3kHvmtGLNf4= @@ -358,31 +300,18 @@ github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= -github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= -github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k= -github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= -github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/peterh/liner v1.1.0 h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os= @@ -390,65 +319,29 @@ github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.2.7+incompatible h1:Eerk9aiqeZo2QzsbWOAsELUf9ddvAxEdMY9LYze/DEc= github.com/pierrec/lz4 v2.2.7+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= -github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= -github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= -github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= -github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c h1:hvQd3aOLKLF7xvRV6DzvPkKY4QXzfVbjU1BhW0d9yL8= -github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= -github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= -github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= -github.com/pingcap/kvproto v0.0.0-20190822090350-11ea838aedf7/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= -github.com/pingcap/kvproto v0.0.0-20190910074005-0e61b6f435c1/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= -github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986 h1:XFh7n8Cheo00pakfhpUofnlptHCuz9lkp4p/jXPb8lM= -github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= -github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= -github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= -github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100 h1:TRyps2d+2TsJv1Vk4S2D+5COMDVKClnAO5aNmGGVyj0= -github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= -github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0 h1:GIEq+wZfrl2bcJxpuSrEH4H7/nlf5YdmpS+dU9lNIt8= -github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0/go.mod h1:G/6rJpnYwM0LKMec2rI82/5Kg6GaZMvlfB+e6/tvYmI= -github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b h1:6GfcYOX9/CCxPnNOivVxiDYXbZrCHU1mRp691iw9EYs= -github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b/go.mod h1:YfrHdQ613A+E2FSugyXOdJmeZQbXNjpXX2doNe8MGj8= -github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU= -github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tipb v0.0.0-20191015023537-709b39e7f8bb/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= -github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b h1:DZ0cTsn4lGMNaRjkUFKBtHn4s2F8KFMm83lWvSo+x7c= -github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= @@ -463,20 +356,11 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhD github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg= github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0= -github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v2.18.10+incompatible h1:cy84jW6EVRPa5g9HAHrlbxMSIjBhDSX0OFYyMYminYs= -github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= @@ -492,12 +376,10 @@ github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= @@ -514,39 +396,18 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI= -github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI= github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= -github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= -github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= -github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.17.0+incompatible h1:35tpDuT3k0oBiN/aGoSWuiFaqKgKZSciSMnWrazhSHE= -github.com/uber/jaeger-client-go v2.17.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw= -github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= -github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= -github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= -github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA= @@ -558,26 +419,22 @@ github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ= go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4= go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM= @@ -586,7 +443,6 @@ gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPG gocloud.dev/pubsub/natspubsub v0.16.0/go.mod h1:0n7pT7PkLMClBUHDrOkHfOFVr/o/6kawNMwsyAbwadI= gocloud.dev/pubsub/rabbitpubsub v0.16.0 h1:Bkv2njMSl2tmT3tGbvbwpiIDAXBIpqzP9dmts+rhD4E= gocloud.dev/pubsub/rabbitpubsub v0.16.0/go.mod h1:JJVdUUIqwgaaMJg/1xHQza0g4sI/4KHHSNiGE+pn4JM= -golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -595,7 +451,6 @@ golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -614,7 +469,6 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -660,14 +514,12 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI= golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -705,9 +557,7 @@ google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI= google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -718,28 +568,21 @@ google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dT google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= @@ -756,8 +599,6 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/karlseguin/expect.v1 v1.0.1 h1:9u0iUltnhFbJTHaSIH0EP+cuTU5rafIgmcsEsg2JQFw= gopkg.in/karlseguin/expect.v1 v1.0.1/go.mod h1:uB7QIJBcclvYbwlUDkSCsGjAOMis3fP280LyhuDEf2I= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -773,6 +614,3 @@ pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 h1:VO9oZbbkvTwqLimlQt15QNdOOBArT2dw/bvzsMZBiqQ= -sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index f4a08fb51..cb20adc72 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -166,11 +166,6 @@ enabled = false servers = "localhost:2379" timeout = "3s" -[tikv] -enabled = false -pdAddress = "192.168.199.113:2379" - - ` NOTIFICATION_TOML_EXAMPLE = ` diff --git a/weed/filer2/tikv/tikv_store.go b/weed/filer2/tikv/tikv_store.go deleted file mode 100644 index 2a9dd6648..000000000 --- a/weed/filer2/tikv/tikv_store.go +++ /dev/null @@ -1,253 +0,0 @@ -// +build !386 -// +build !arm -// +build !windows - -package tikv - -import ( - "bytes" - "context" - "crypto/md5" - "fmt" - "io" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - weed_util "github.com/chrislusf/seaweedfs/weed/util" - - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/store/tikv" -) - -func init() { - filer2.Stores = append(filer2.Stores, &TikvStore{}) -} - -type TikvStore struct { - store kv.Storage -} - -func (store *TikvStore) GetName() string { - return "tikv" -} - -func (store *TikvStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { - pdAddr := configuration.GetString(prefix + "pdAddress") - return store.initialize(pdAddr) -} - -func (store *TikvStore) initialize(pdAddr string) (err error) { - glog.Infof("filer store tikv pd address: %s", pdAddr) - - driver := tikv.Driver{} - - store.store, err = driver.Open(fmt.Sprintf("tikv://%s", pdAddr)) - - if err != nil { - return fmt.Errorf("open tikv %s : %v", pdAddr, err) - } - - return -} - -func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) { - tx, err := store.store.Begin() - if err != nil { - return ctx, err - } - return context.WithValue(ctx, "tx", tx), nil -} -func (store *TikvStore) CommitTransaction(ctx context.Context) error { - tx, ok := ctx.Value("tx").(kv.Transaction) - if ok { - return tx.Commit(ctx) - } - return nil -} -func (store *TikvStore) RollbackTransaction(ctx context.Context) error { - tx, ok := ctx.Value("tx").(kv.Transaction) - if ok { - return tx.Rollback() - } - return nil -} - -func (store *TikvStore) getTx(ctx context.Context) kv.Transaction { - if tx, ok := ctx.Value("tx").(kv.Transaction); ok { - return tx - } - return nil -} - -func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - dir, name := entry.DirAndName() - key := genKey(dir, name) - - value, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) - } - - err = store.getTx(ctx).Set(key, value) - - if err != nil { - return fmt.Errorf("persisting %s : %v", entry.FullPath, err) - } - - // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) - - return nil -} - -func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - return store.InsertEntry(ctx, entry) -} - -func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - dir, name := fullpath.DirAndName() - key := genKey(dir, name) - - data, err := store.getTx(ctx).Get(ctx, key) - - if err == kv.ErrNotExist { - return nil, filer_pb.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) - } - - entry = &filer2.Entry{ - FullPath: fullpath, - } - err = entry.DecodeAttributesAndChunks(data) - if err != nil { - return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) - } - - // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) - - return entry, nil -} - -func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { - dir, name := fullpath.DirAndName() - key := genKey(dir, name) - - err = store.getTx(ctx).Delete(key) - if err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { - - directoryPrefix := genDirectoryKeyPrefix(fullpath, "") - - tx := store.getTx(ctx) - - iter, err := tx.Iter(directoryPrefix, nil) - if err != nil { - return fmt.Errorf("deleteFolderChildren %s: %v", fullpath, err) - } - defer iter.Close() - for iter.Valid() { - key := iter.Key() - if !bytes.HasPrefix(key, directoryPrefix) { - break - } - fileName := getNameFromKey(key) - if fileName == "" { - iter.Next() - continue - } - - if err = tx.Delete(genKey(string(fullpath), fileName)); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - iter.Next() - } - - return nil -} - -func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { - - directoryPrefix := genDirectoryKeyPrefix(fullpath, "") - lastFileStart := genDirectoryKeyPrefix(fullpath, startFileName) - - iter, err := store.getTx(ctx).Iter(lastFileStart, nil) - if err != nil { - return nil, fmt.Errorf("list %s: %v", fullpath, err) - } - defer iter.Close() - for iter.Valid() { - key := iter.Key() - if !bytes.HasPrefix(key, directoryPrefix) { - break - } - fileName := getNameFromKey(key) - if fileName == "" { - iter.Next() - continue - } - if fileName == startFileName && !inclusive { - iter.Next() - continue - } - limit-- - if limit < 0 { - break - } - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), - } - - // println("list", entry.FullPath, "chunks", len(entry.Chunks)) - - if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { - err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) - break - } - entries = append(entries, entry) - iter.Next() - } - - return entries, err -} - -func genKey(dirPath, fileName string) (key []byte) { - key = hashToBytes(dirPath) - key = append(key, []byte(fileName)...) - return key -} - -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { - keyPrefix = hashToBytes(string(fullpath)) - if len(startFileName) > 0 { - keyPrefix = append(keyPrefix, []byte(startFileName)...) - } - return keyPrefix -} - -func getNameFromKey(key []byte) string { - - return string(key[md5.Size:]) - -} - -// hash directory -func hashToBytes(dir string) []byte { - h := md5.New() - io.WriteString(h, dir) - - b := h.Sum(nil) - - return b -} diff --git a/weed/filer2/tikv/tikv_store_unsupported.go b/weed/filer2/tikv/tikv_store_unsupported.go deleted file mode 100644 index 713c84bf8..000000000 --- a/weed/filer2/tikv/tikv_store_unsupported.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build 386 arm windows - -package tikv - -import ( - "context" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/filer2" - weed_util "github.com/chrislusf/seaweedfs/weed/util" -) - -func init() { - filer2.Stores = append(filer2.Stores, &TikvStore{}) -} - -type TikvStore struct { -} - -func (store *TikvStore) GetName() string { - return "tikv" -} - -func (store *TikvStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) initialize(pdAddr string) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) { - return nil, fmt.Errorf("not implemented for 32 bit computers") -} -func (store *TikvStore) CommitTransaction(ctx context.Context) error { - return fmt.Errorf("not implemented for 32 bit computers") -} -func (store *TikvStore) RollbackTransaction(ctx context.Context) error { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - return nil, fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { - return nil, fmt.Errorf("not implemented for 32 bit computers") -} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 70da9094b..656bb2ed8 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -22,7 +22,6 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" - _ "github.com/chrislusf/seaweedfs/weed/filer2/tikv" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/notification" _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" From 41bbea03009065c494ae23d8f08431137d4e392d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 12 Mar 2020 13:23:25 -0700 Subject: [PATCH 0234/2432] tweaking snap --- snap/README.md | 49 +++++++++++++++++++++++++++++++++++++++++++++ snap/snapcraft.yaml | 12 ++++++----- 2 files changed, 56 insertions(+), 5 deletions(-) create mode 100644 snap/README.md diff --git a/snap/README.md b/snap/README.md new file mode 100644 index 000000000..5752bd4af --- /dev/null +++ b/snap/README.md @@ -0,0 +1,49 @@ +Hi + +This PR adds support for building a snap package of seaweedfs. Snaps are cross distro Linux software packages. One snap can be installed on Ubuntu all supported LTS and non LTS releases from 14.04 onward. Additionally they can installed on Debian, Manjaro, Fedora, OpenSUSE and others. Making a snap of seaweedfs enables you to provide automatic updates on your schedule to your users via the snap store. + +If accepted, you can use snapcraft locally, a CI system such as travis or circle-ci, or our free build system (build.snapcraft.io) to create snaps and upload to the store (snapcraft.io/store). The store supports + +To test this PR locally, I used an Ubuntu 16.04 VM, with the following steps. + +``` +snap install snapcraft --classic +git clone https://github.com/popey/seaweedfs +cd seaweedfs +git checkout add-snapcraft +snapcraft +``` + +The generated a .snap file from the tip of master (I could have checked out a stable release instead). It can be installed with:- + + snap install seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --dangerous + +(the --dangerous is necessary because we’re installing an app which hasn’t gone through the snap store review process) + +Once installed, the (namespaced) weed command can be executed. If you accept this and land the snap in the store, we can request an ‘alias’ so users can use the ‘weed’ command rather than the namespaced ‘seaweedfs.weed’ + +- Run the command +- Create sample config. Snaps are securely confined so their home directory is in a different place + mkdir ~/snap/seaweedfs/current/.seaweedfs + seaweedfs.weed scaffold > ~/snap/seaweed/current/.seaweedfs/filer.toml +- Run a server + seaweedfs.weed server +- Run a benchmark + seaweedfs.weed benchmark + +Results from my test run: https://paste.ubuntu.com/p/95Xk8zFQ7w/ + +If landed, you will need to:- + +- Register an account in the snap store https://snapcraft.io/account +- Register the ‘seaweedfs’ name in the store + - snapcraft login + - snapcraft register seaweedfs +- Upload a built snap to the store + - snapcraft push seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --release edge +- Test installing on a clean Ubuntu 16.04 machine + - snap install seaweedfs --edge + +The store supports multiple risk levels as “channels” with the edge channel typically used to host the latest build from git master. Stable is where stable releases are pushed. Optionally beta and candidate channels can also be used if needed. + +Once you are happy, you can push a stable release to the stable channel, update the store page, and promote the application online (we can help there). diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 6198cfb3c..6449e9bfb 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -6,10 +6,12 @@ version: git summary: SeaweedFS # Longer multi-line description found in 'snap info $SNAPNAME' description: | - SeaweedFS is a simple and highly scalable distributed file system. - There are two objectives: to store billions of files! to serve the - files fast! SeaweedFS implements an object store with O(1) disk seek, - and an optional Filer with POSIX interface. + SeaweedFS is a simple and highly scalable distributed file system, + to store and serve billions of files fast! + SeaweedFS implements an object store with O(1) disk seek, + transparent cloud integration, and an optional Filer with POSIX interface, + supporting S3 API, Rack-Aware Erasure Coding for warm storage, + FUSE mount, Hadoop compatible, WebDAV. # Grade is stable for snaps expected to land in the stable channel grade: stable @@ -31,7 +33,7 @@ parts: go-importpath: github.com/chrislusf/seaweedfs go: # Defines the version of golang which will be bootstrapped into the snap - source-tag: go1.10.4 + source-tag: go1.14 # Apps exposes the binaries inside the snap to the host system once installed apps: From a7b027121a71b798a66fe387ee7316de2fb0ff7e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 12 Mar 2020 21:37:08 -0700 Subject: [PATCH 0235/2432] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 0c8a5d9c7..4a064e96b 100644 --- a/README.md +++ b/README.md @@ -116,6 +116,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a * [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs. * [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. * [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. +* [Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. [Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files [Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount @@ -125,6 +126,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a [WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV [ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage [CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier +[FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption [Back to TOC](#table-of-contents) From 595dd4fc9a2b040084b37c453528d272f23841e2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 12 Mar 2020 21:53:43 -0700 Subject: [PATCH 0236/2432] Update README.md --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 4a064e96b..1691b8c76 100644 --- a/README.md +++ b/README.md @@ -98,6 +98,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a * Automatic master servers failover - no single point of failure (SPOF). * Automatic Gzip compression depending on file mime type. * Automatic compaction to reclaim disk space after deletion or update. +* [Automatic TTL expires entries][VolumeServerTTL]. * Servers in the same cluster can have different disk spaces, file systems, OS etc. * Adding/Removing servers does **not** cause any data re-balancing. * Optionally fix the orientation for jpeg pictures. @@ -117,6 +118,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a * [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. * [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. * [Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. +* [File TTL][FilerTTL] automatically purge file metadata and actual file data. [Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files [Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount @@ -127,6 +129,8 @@ On top of the object store, optional [Filer] can support directories and POSIX a [ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage [CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier [FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption +[FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores +[VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live [Back to TOC](#table-of-contents) @@ -388,7 +392,7 @@ Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS pl SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read. -SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. There are proven, scalable, and easier to manage. +SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. These stores are proven, scalable, and easier to manage. | SeaweedFS | comparable to Ceph | advantage | | ------------- | ------------- | ---------------- | From 2cc8e2ea3a29716eca6dae646038242980b98ff2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 12 Mar 2020 21:54:48 -0700 Subject: [PATCH 0237/2432] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1691b8c76..d4ac9177d 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a * Automatic master servers failover - no single point of failure (SPOF). * Automatic Gzip compression depending on file mime type. * Automatic compaction to reclaim disk space after deletion or update. -* [Automatic TTL expires entries][VolumeServerTTL]. +* [Automatic entry TTL expiration][VolumeServerTTL]. * Servers in the same cluster can have different disk spaces, file systems, OS etc. * Adding/Removing servers does **not** cause any data re-balancing. * Optionally fix the orientation for jpeg pictures. From 025e586c9137da376b8a759075ce951cb195acf3 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 13 Mar 2020 14:25:56 -0700 Subject: [PATCH 0238/2432] testing vacuum --- .../repeated_vacuum/repeated_vacuum.go | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 96d4ccdf6..12ac42dbe 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "math/rand" + "time" "google.golang.org/grpc" @@ -17,6 +18,7 @@ var ( master = flag.String("master", "127.0.0.1:9333", "the master server") repeat = flag.Int("n", 5, "repeat how many times") garbageThreshold = flag.Float64("garbageThreshold", 0.3, "garbageThreshold") + replication = flag.String("replication", "", "replication 000, 001, 002, etc") ) func main() { @@ -27,6 +29,17 @@ func main() { genFile(grpcDialOption, 0) + go func() { + for { + println("vacuum threshold", *garbageThreshold) + _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold)) + if err != nil { + log.Fatalf("vacuum: %v", err) + } + time.Sleep(time.Second) + } + }() + for i := 0; i < *repeat; i++ { // create 2 files, and delete one of them @@ -34,15 +47,15 @@ func main() { util.Delete(targetUrl, string(assignResult.Auth)) - println("vacuum", i, "threshold", *garbageThreshold) - util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold)) - } } func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) { - assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1}) + assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{ + Count: 1, + Replication: *replication, + }) if err != nil { log.Fatalf("assign: %v", err) } From c90eb0da1f4cc479b85d9adec975c58f954e9dab Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 13 Mar 2020 15:41:24 -0700 Subject: [PATCH 0239/2432] volume: handling readonly volumes after compaction ensure readonly volumes are not added as writable --- weed/topology/topology_vacuum.go | 7 +++++-- weed/topology/volume_layout.go | 27 ++++++++++++++------------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index ca626e973..9d964489d 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -5,9 +5,10 @@ import ( "sync/atomic" "time" - "github.com/chrislusf/seaweedfs/weed/storage/needle" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" @@ -105,7 +106,9 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v } else { glog.V(0).Infof("Complete Committing vacuum %d on %s", vid, dn.Url()) } - if isCommitSuccess { + } + if isCommitSuccess { + for _, dn := range locationlist.list { vl.SetVolumeAvailable(dn, vid) } } diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index 7633b28be..a0c66516d 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -51,6 +51,9 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) { vl.accessLock.Lock() defer vl.accessLock.Unlock() + defer vl.ensureCorrectWritables(v) + defer vl.rememberOversizedVolume(v) + if _, ok := vl.vid2location[v.Id]; !ok { vl.vid2location[v.Id] = NewVolumeLocationList() } @@ -74,9 +77,6 @@ func (vl *VolumeLayout) RegisterVolume(v *storage.VolumeInfo, dn *DataNode) { } } - vl.rememberOversizedVolume(v) - vl.ensureCorrectWritables(v) - } func (vl *VolumeLayout) rememberOversizedVolume(v *storage.VolumeInfo) { @@ -109,22 +109,13 @@ func (vl *VolumeLayout) UnRegisterVolume(v *storage.VolumeInfo, dn *DataNode) { func (vl *VolumeLayout) ensureCorrectWritables(v *storage.VolumeInfo) { if vl.vid2location[v.Id].Length() == vl.rp.GetCopyCount() && vl.isWritable(v) { if _, ok := vl.oversizedVolumes[v.Id]; !ok { - vl.addToWritable(v.Id) + vl.setVolumeWritable(v.Id) } } else { vl.removeFromWritable(v.Id) } } -func (vl *VolumeLayout) addToWritable(vid needle.VolumeId) { - for _, id := range vl.writables { - if vid == id { - return - } - } - vl.writables = append(vl.writables, vid) -} - func (vl *VolumeLayout) isOversized(v *storage.VolumeInfo) bool { return uint64(v.Size) >= vl.volumeSizeLimit } @@ -270,7 +261,17 @@ func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId) bo vl.accessLock.Lock() defer vl.accessLock.Unlock() + vInfo, err := dn.GetVolumesById(v.Id) + if err != nil { + return false + } + vl.vid2location[vid].Set(dn) + + if vInfo.ReadOnly { + return false + } + if vl.vid2location[vid].Length() == vl.rp.GetCopyCount() { return vl.setVolumeWritable(vid) } From 4ce23e88099c5b5a89351d1d6757f587c7ad36fe Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 13 Mar 2020 15:51:38 -0700 Subject: [PATCH 0240/2432] fix compilation error --- weed/topology/volume_layout.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index a0c66516d..144e24713 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -261,7 +261,7 @@ func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId) bo vl.accessLock.Lock() defer vl.accessLock.Unlock() - vInfo, err := dn.GetVolumesById(v.Id) + vInfo, err := dn.GetVolumesById(vid) if err != nil { return false } From 3cc9e858954c1c9d91af2054b60eb9c04f4a49c4 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 13 Mar 2020 16:17:44 -0700 Subject: [PATCH 0241/2432] volume: vacuum pass preallocate variable --- weed/topology/topology_vacuum.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index 9d964489d..532029ac3 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -66,7 +66,8 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { _, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{ - VolumeId: uint32(vid), + VolumeId: uint32(vid), + Preallocate: preallocate, }) return err }) From 7213f446db011334d57c49b9b96a8ba58929228b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 13 Mar 2020 16:31:40 -0700 Subject: [PATCH 0242/2432] update grpc --- go.mod | 2 +- go.sum | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index d50ff1e3e..61716fe36 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( google.golang.org/api v0.9.0 google.golang.org/appengine v1.6.2 // indirect google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 // indirect - google.golang.org/grpc v1.23.0 + google.golang.org/grpc v1.26.0 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect diff --git a/go.sum b/go.sum index d077ba67d..5c1d52afd 100644 --- a/go.sum +++ b/go.sum @@ -59,6 +59,7 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4Yn github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI= github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92/go.mod h1:4jyiUCD5y548+yKW+oiHtccBiMaLCCbFBpK2t7X4eUo= @@ -101,6 +102,8 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= @@ -566,6 +569,7 @@ google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -574,6 +578,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From e2e691d9c23d2b18b032f4d464ca0756c134ed87 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 13 Mar 2020 23:53:15 -0700 Subject: [PATCH 0243/2432] clean up, add test --- weed/util/cipher.go | 21 --------------------- weed/util/cipher_test.go | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 21 deletions(-) create mode 100644 weed/util/cipher_test.go diff --git a/weed/util/cipher.go b/weed/util/cipher.go index 7bcb6559a..f044c2ca3 100644 --- a/weed/util/cipher.go +++ b/weed/util/cipher.go @@ -1,14 +1,11 @@ package util import ( - "bytes" "crypto/aes" "crypto/cipher" "crypto/rand" "errors" - "fmt" "io" - "io/ioutil" "github.com/chrislusf/seaweedfs/weed/glog" ) @@ -61,21 +58,3 @@ func Decrypt(ciphertext []byte, key CipherKey) ([]byte, error) { nonce, ciphertext := ciphertext[:nonceSize], ciphertext[nonceSize:] return gcm.Open(nil, nonce, ciphertext, nil) } - -func EncryptReader(clearReader io.Reader) (cipherKey CipherKey, encryptedReader io.ReadCloser, clearDataLen, encryptedDataLen int, err error) { - clearData, err := ioutil.ReadAll(clearReader) - if err != nil { - err = fmt.Errorf("read raw input: %v", err) - return - } - clearDataLen = len(clearData) - cipherKey = GenCipherKey() - encryptedData, err := Encrypt(clearData, cipherKey) - if err != nil { - err = fmt.Errorf("encrypt input: %v", err) - return - } - encryptedDataLen = len(encryptedData) - encryptedReader = ioutil.NopCloser(bytes.NewReader(encryptedData)) - return -} diff --git a/weed/util/cipher_test.go b/weed/util/cipher_test.go new file mode 100644 index 000000000..026c96ea3 --- /dev/null +++ b/weed/util/cipher_test.go @@ -0,0 +1,17 @@ +package util + +import ( + "encoding/base64" + "testing" +) + +func TestSameAsJavaImplementation(t *testing.T) { + str := "QVVhmqg112NMT7F+G/7QPynqSln3xPIhKdFGmTVKZD6IS0noyr2Z5kXFF6fPjZ/7Hq8kRhlmLeeqZUccxyaZHezOdgkjS6d4NTdHf5IjXzk7" + cipherText, _ := base64.StdEncoding.DecodeString(str) + secretKey := []byte("256-bit key for AES 256 GCM encr") + plantext, err := Decrypt(cipherText, CipherKey(secretKey)) + if err != nil { + println(err.Error()) + } + println(string(plantext)) +} From de1ba85346bd2af41ebb98f152bb769e2b630139 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 14 Mar 2020 00:27:57 -0700 Subject: [PATCH 0244/2432] HDFS support encrypted data storage --- other/java/client/pom.xml | 15 +- .../seaweedfs/client/FilerGrpcClient.java | 33 ++++- .../src/main/java/seaweedfs/client/Gzip.java | 37 +++++ .../java/seaweedfs/client/SeaweedCipher.java | 55 +++++++ .../java/seaweedfs/client/SeaweedRead.java | 137 +++++++++++++----- .../java/seaweedfs/client/SeaweedWrite.java | 40 ++++- .../seaweedfs/client/SeaweedCipherTest.java | 42 ++++++ other/java/hdfs2/dependency-reduced-pom.xml | 2 +- other/java/hdfs2/pom.xml | 2 +- other/java/hdfs3/dependency-reduced-pom.xml | 2 +- other/java/hdfs3/pom.xml | 2 +- 11 files changed, 308 insertions(+), 59 deletions(-) create mode 100644 other/java/client/src/main/java/seaweedfs/client/Gzip.java create mode 100644 other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java create mode 100644 other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 0c585a941..945071336 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -1,10 +1,11 @@ - + 4.0.0 com.github.chrislusf seaweedfs-client - 1.2.4 + 1.2.5 org.sonatype.oss @@ -88,8 +89,8 @@ org.apache.maven.plugins maven-compiler-plugin - 7 - 7 + 8 + 8 @@ -97,9 +98,11 @@ protobuf-maven-plugin 0.6.1 - com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + grpc-java - io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java index 3626c76de..3f5d1e8e9 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java @@ -14,12 +14,6 @@ import java.util.concurrent.TimeUnit; public class FilerGrpcClient { private static final Logger logger = LoggerFactory.getLogger(FilerGrpcClient.class); - - private final ManagedChannel channel; - private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub; - private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub; - private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub; - static SslContext sslContext; static { @@ -30,6 +24,14 @@ public class FilerGrpcClient { } } + private final ManagedChannel channel; + private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub; + private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub; + private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub; + private boolean cipher = false; + private String collection = ""; + private String replication = ""; + public FilerGrpcClient(String host, int grpcPort) { this(host, grpcPort, sslContext); } @@ -42,6 +44,13 @@ public class FilerGrpcClient { .negotiationType(NegotiationType.TLS) .sslContext(sslContext)); + FilerProto.GetFilerConfigurationResponse filerConfigurationResponse = + this.getBlockingStub().getFilerConfiguration( + FilerProto.GetFilerConfigurationRequest.newBuilder().build()); + cipher = filerConfigurationResponse.getCipher(); + collection = filerConfigurationResponse.getCollection(); + replication = filerConfigurationResponse.getReplication(); + } public FilerGrpcClient(ManagedChannelBuilder channelBuilder) { @@ -51,6 +60,18 @@ public class FilerGrpcClient { futureStub = SeaweedFilerGrpc.newFutureStub(channel); } + public boolean isCipher() { + return cipher; + } + + public String getCollection() { + return collection; + } + + public String getReplication() { + return replication; + } + public void shutdown() throws InterruptedException { channel.shutdown().awaitTermination(5, TimeUnit.SECONDS); } diff --git a/other/java/client/src/main/java/seaweedfs/client/Gzip.java b/other/java/client/src/main/java/seaweedfs/client/Gzip.java new file mode 100644 index 000000000..248285dd3 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/Gzip.java @@ -0,0 +1,37 @@ +package seaweedfs.client; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +public class Gzip { + public static byte[] compress(byte[] data) throws IOException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(data.length); + GZIPOutputStream gzip = new GZIPOutputStream(bos); + gzip.write(data); + gzip.close(); + byte[] compressed = bos.toByteArray(); + bos.close(); + return compressed; + } + + public static byte[] decompress(byte[] compressed) throws IOException { + ByteArrayInputStream bis = new ByteArrayInputStream(compressed); + GZIPInputStream gis = new GZIPInputStream(bis); + return readAll(gis); + } + + private static byte[] readAll(InputStream input) throws IOException { + try( ByteArrayOutputStream output = new ByteArrayOutputStream()){ + byte[] buffer = new byte[4096]; + int n; + while (-1 != (n = input.read(buffer))) { + output.write(buffer, 0, n); + } + return output.toByteArray(); + } + } +} diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java new file mode 100644 index 000000000..8d0ebd755 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java @@ -0,0 +1,55 @@ +package seaweedfs.client; + +import javax.crypto.Cipher; +import javax.crypto.spec.GCMParameterSpec; +import javax.crypto.spec.SecretKeySpec; +import java.security.SecureRandom; + +public class SeaweedCipher { + // AES-GCM parameters + public static final int AES_KEY_SIZE = 256; // in bits + public static final int GCM_NONCE_LENGTH = 12; // in bytes + public static final int GCM_TAG_LENGTH = 16; // in bytes + + private static SecureRandom random = new SecureRandom(); + + public static byte[] genCipherKey() throws Exception { + byte[] key = new byte[AES_KEY_SIZE / 8]; + random.nextBytes(key); + return key; + } + + public static byte[] encrypt(byte[] clearTextbytes, byte[] cipherKey) throws Exception { + return encrypt(clearTextbytes, 0, clearTextbytes.length, cipherKey); + } + + public static byte[] encrypt(byte[] clearTextbytes, int offset, int length, byte[] cipherKey) throws Exception { + + final byte[] nonce = new byte[GCM_NONCE_LENGTH]; + random.nextBytes(nonce); + GCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, nonce); + SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES"); + + Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding"); + AES_cipherInstance.init(Cipher.ENCRYPT_MODE, keySpec, spec); + + byte[] encryptedText = AES_cipherInstance.doFinal(clearTextbytes, offset, length); + + byte[] iv = AES_cipherInstance.getIV(); + byte[] message = new byte[GCM_NONCE_LENGTH + clearTextbytes.length + GCM_TAG_LENGTH]; + System.arraycopy(iv, 0, message, 0, GCM_NONCE_LENGTH); + System.arraycopy(encryptedText, 0, message, GCM_NONCE_LENGTH, encryptedText.length); + + return message; + } + + public static byte[] decrypt(byte[] encryptedText, byte[] cipherKey) throws Exception { + final Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding"); + GCMParameterSpec params = new GCMParameterSpec(GCM_TAG_LENGTH * 8, encryptedText, 0, GCM_NONCE_LENGTH); + SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES"); + AES_cipherInstance.init(Cipher.DECRYPT_MODE, keySpec, params); + byte[] decryptedText = AES_cipherInstance.doFinal(encryptedText, GCM_NONCE_LENGTH, encryptedText.length - GCM_NONCE_LENGTH); + return decryptedText; + } + +} diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java index b08c14467..d2717056f 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java @@ -6,6 +6,7 @@ import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.util.EntityUtils; import java.io.Closeable; import java.io.IOException; @@ -31,7 +32,7 @@ public class SeaweedRead { } FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient - .getBlockingStub().lookupVolume(lookupRequest.build()); + .getBlockingStub().lookupVolume(lookupRequest.build()); Map vid2Locations = lookupResponse.getLocationsMapMap(); @@ -56,14 +57,18 @@ public class SeaweedRead { } private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException { + if (chunkView.cipherKey != null) { + return readEncryptedChunkView(position, buffer, startOffset, chunkView, locations); + } + HttpClient client = new DefaultHttpClient(); HttpGet request = new HttpGet( - String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId)); + String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId)); if (!chunkView.isFullChunk) { request.setHeader(HttpHeaders.ACCEPT_ENCODING, ""); request.setHeader(HttpHeaders.RANGE, - String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size - 1)); + String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size - 1)); } try { @@ -85,6 +90,44 @@ public class SeaweedRead { } } + private static int readEncryptedChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException { + HttpClient client = new DefaultHttpClient(); + HttpGet request = new HttpGet( + String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId)); + + request.setHeader(HttpHeaders.ACCEPT_ENCODING, ""); + + byte[] data = null; + + try { + HttpResponse response = client.execute(request); + HttpEntity entity = response.getEntity(); + + data = EntityUtils.toByteArray(entity); + + } finally { + if (client instanceof Closeable) { + Closeable t = (Closeable) client; + t.close(); + } + } + + if (chunkView.isGzipped) { + data = Gzip.decompress(data); + } + + try { + data = SeaweedCipher.decrypt(data, chunkView.cipherKey); + } catch (Exception e) { + throw new IOException("fail to decrypt", e); + } + + int len = (int) (chunkView.logicOffset - position + chunkView.size); + System.arraycopy(data, (int) chunkView.offset, buffer, startOffset, len); + return len; + + } + protected static List viewFromVisibles(List visibleIntervals, long offset, long size) { List views = new ArrayList<>(); @@ -93,11 +136,13 @@ public class SeaweedRead { if (chunk.start <= offset && offset < chunk.stop && offset < stop) { boolean isFullChunk = chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop; views.add(new ChunkView( - chunk.fileId, - offset - chunk.start, - Math.min(chunk.stop, stop) - offset, - offset, - isFullChunk + chunk.fileId, + offset - chunk.start, + Math.min(chunk.stop, stop) - offset, + offset, + isFullChunk, + chunk.cipherKey, + chunk.isGzipped )); offset = Math.min(chunk.stop, stop); } @@ -127,11 +172,13 @@ public class SeaweedRead { List newVisibles, FilerProto.FileChunk chunk) { VisibleInterval newV = new VisibleInterval( - chunk.getOffset(), - chunk.getOffset() + chunk.getSize(), - chunk.getFileId(), - chunk.getMtime(), - true + chunk.getOffset(), + chunk.getOffset() + chunk.getSize(), + chunk.getFileId(), + chunk.getMtime(), + true, + chunk.getCipherKey().toByteArray(), + chunk.getIsGzipped() ); // easy cases to speed up @@ -147,21 +194,25 @@ public class SeaweedRead { for (VisibleInterval v : visibles) { if (v.start < chunk.getOffset() && chunk.getOffset() < v.stop) { newVisibles.add(new VisibleInterval( - v.start, - chunk.getOffset(), - v.fileId, - v.modifiedTime, - false + v.start, + chunk.getOffset(), + v.fileId, + v.modifiedTime, + false, + v.cipherKey, + v.isGzipped )); } long chunkStop = chunk.getOffset() + chunk.getSize(); if (v.start < chunkStop && chunkStop < v.stop) { newVisibles.add(new VisibleInterval( - chunkStop, - v.stop, - v.fileId, - v.modifiedTime, - false + chunkStop, + v.stop, + v.fileId, + v.modifiedTime, + false, + v.cipherKey, + v.isGzipped )); } if (chunkStop <= v.start || v.stop <= chunk.getOffset()) { @@ -208,24 +259,30 @@ public class SeaweedRead { public final long modifiedTime; public final String fileId; public final boolean isFullChunk; + public final byte[] cipherKey; + public final boolean isGzipped; - public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk) { + public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) { this.start = start; this.stop = stop; this.modifiedTime = modifiedTime; this.fileId = fileId; this.isFullChunk = isFullChunk; + this.cipherKey = cipherKey; + this.isGzipped = isGzipped; } @Override public String toString() { return "VisibleInterval{" + - "start=" + start + - ", stop=" + stop + - ", modifiedTime=" + modifiedTime + - ", fileId='" + fileId + '\'' + - ", isFullChunk=" + isFullChunk + - '}'; + "start=" + start + + ", stop=" + stop + + ", modifiedTime=" + modifiedTime + + ", fileId='" + fileId + '\'' + + ", isFullChunk=" + isFullChunk + + ", cipherKey=" + Arrays.toString(cipherKey) + + ", isGzipped=" + isGzipped + + '}'; } } @@ -235,24 +292,30 @@ public class SeaweedRead { public final long size; public final long logicOffset; public final boolean isFullChunk; + public final byte[] cipherKey; + public final boolean isGzipped; - public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk) { + public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) { this.fileId = fileId; this.offset = offset; this.size = size; this.logicOffset = logicOffset; this.isFullChunk = isFullChunk; + this.cipherKey = cipherKey; + this.isGzipped = isGzipped; } @Override public String toString() { return "ChunkView{" + - "fileId='" + fileId + '\'' + - ", offset=" + offset + - ", size=" + size + - ", logicOffset=" + logicOffset + - ", isFullChunk=" + isFullChunk + - '}'; + "fileId='" + fileId + '\'' + + ", offset=" + offset + + ", size=" + size + + ", logicOffset=" + logicOffset + + ", isFullChunk=" + isFullChunk + + ", cipherKey=" + Arrays.toString(cipherKey) + + ", isGzipped=" + isGzipped + + '}'; } } diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java index 0663e8d98..06c1bdd9f 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java @@ -1,5 +1,6 @@ package seaweedfs.client; +import com.google.protobuf.ByteString; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpPost; @@ -11,9 +12,12 @@ import java.io.ByteArrayInputStream; import java.io.Closeable; import java.io.IOException; import java.io.InputStream; +import java.security.SecureRandom; public class SeaweedWrite { + private static SecureRandom random = new SecureRandom(); + public static void writeData(FilerProto.Entry.Builder entry, final String replication, final FilerGrpcClient filerGrpcClient, @@ -22,10 +26,9 @@ public class SeaweedWrite { final long bytesOffset, final long bytesLength) throws IOException { FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume( FilerProto.AssignVolumeRequest.newBuilder() - .setCollection("") - .setReplication(replication) + .setCollection(filerGrpcClient.getCollection()) + .setReplication(replication == null ? filerGrpcClient.getReplication() : replication) .setDataCenter("") - .setReplication("") .setTtlSec(0) .build()); String fileId = response.getFileId(); @@ -33,7 +36,14 @@ public class SeaweedWrite { String auth = response.getAuth(); String targetUrl = String.format("http://%s/%s", url, fileId); - String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength); + ByteString cipherKeyString = null; + byte[] cipherKey = null; + if (filerGrpcClient.isCipher()) { + cipherKey = genCipherKey(); + cipherKeyString = ByteString.copyFrom(cipherKey); + } + + String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey); entry.addChunks(FilerProto.FileChunk.newBuilder() .setFileId(fileId) @@ -41,6 +51,7 @@ public class SeaweedWrite { .setSize(bytesLength) .setMtime(System.currentTimeMillis() / 10000L) .setETag(etag) + .setCipherKey(cipherKeyString) ); } @@ -58,11 +69,22 @@ public class SeaweedWrite { private static String multipartUpload(String targetUrl, String auth, final byte[] bytes, - final long bytesOffset, final long bytesLength) throws IOException { + final long bytesOffset, final long bytesLength, + byte[] cipherKey) throws IOException { HttpClient client = new DefaultHttpClient(); - InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength); + InputStream inputStream = null; + if (cipherKey == null) { + inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength); + } else { + try { + byte[] encryptedBytes = SeaweedCipher.encrypt(bytes, (int) bytesOffset, (int) bytesLength, cipherKey); + inputStream = new ByteArrayInputStream(encryptedBytes, 0, encryptedBytes.length); + } catch (Exception e) { + throw new IOException("fail to encrypt data", e); + } + } HttpPost post = new HttpPost(targetUrl); if (auth != null && auth.length() != 0) { @@ -92,4 +114,10 @@ public class SeaweedWrite { } } + + private static byte[] genCipherKey() { + byte[] b = new byte[32]; + random.nextBytes(b); + return b; + } } diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java new file mode 100644 index 000000000..7b5e53e19 --- /dev/null +++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java @@ -0,0 +1,42 @@ +package seaweedfs.client; + +import org.junit.Test; + +import java.util.Base64; + +import static seaweedfs.client.SeaweedCipher.decrypt; +import static seaweedfs.client.SeaweedCipher.encrypt; + +public class SeaweedCipherTest { + + @Test + public void testSameAsGoImplemnetation() throws Exception { + byte[] secretKey = "256-bit key for AES 256 GCM encr".getBytes(); + + String plainText = "Now we need to generate a 256-bit key for AES 256 GCM"; + + System.out.println("Original Text : " + plainText); + + byte[] cipherText = encrypt(plainText.getBytes(), secretKey); + System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText)); + + byte[] decryptedText = decrypt(cipherText, secretKey); + System.out.println("DeCrypted Text : " + new String(decryptedText)); + } + + @Test + public void testEncryptDecrypt() throws Exception { + byte[] secretKey = SeaweedCipher.genCipherKey(); + + String plainText = "Now we need to generate a 256-bit key for AES 256 GCM"; + + System.out.println("Original Text : " + plainText); + + byte[] cipherText = encrypt(plainText.getBytes(), secretKey); + System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText)); + + byte[] decryptedText = decrypt(cipherText, secretKey); + System.out.println("DeCrypted Text : " + new String(decryptedText)); + } + +} diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml index d818bc878..939f8a1e0 100644 --- a/other/java/hdfs2/dependency-reduced-pom.xml +++ b/other/java/hdfs2/dependency-reduced-pom.xml @@ -127,7 +127,7 @@ - 1.2.4 + 1.2.5 2.9.2 diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml index b8c8cb891..be8751abe 100644 --- a/other/java/hdfs2/pom.xml +++ b/other/java/hdfs2/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.2.4 + 1.2.5 2.9.2 diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml index ca53ffd22..cf9948945 100644 --- a/other/java/hdfs3/dependency-reduced-pom.xml +++ b/other/java/hdfs3/dependency-reduced-pom.xml @@ -127,7 +127,7 @@ - 1.2.4 + 1.2.5 3.1.1 diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml index f5207213c..32415b4c1 100644 --- a/other/java/hdfs3/pom.xml +++ b/other/java/hdfs3/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.2.4 + 1.2.5 3.1.1 From 91e0a987e451424d1136a0e543b90b3e68dc30f9 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 14 Mar 2020 01:03:59 -0700 Subject: [PATCH 0245/2432] 1.62 --- k8s/seaweedfs/Chart.yaml | 2 +- k8s/seaweedfs/values.yaml | 2 +- weed/util/constants.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index 136d91e20..fb59c41f5 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: 1.61 +version: 1.62 diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index d3e030a08..f88ab438e 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - imageTag: "1.61" + imageTag: "1.62" imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/weed/util/constants.go b/weed/util/constants.go index c23bc11f6..1f31ed9ca 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 61) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 62) ) From 17efa361f9aa0aeed4ec751fa084ea556f748523 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 14 Mar 2020 13:53:03 -0700 Subject: [PATCH 0246/2432] fix possible nil --- weed/operation/upload_content.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 52f8f9e2b..3101dc191 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -120,9 +120,11 @@ func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, i }, filename, contentIsGzipped, mtype, pairMap, jwt) } - uploadResult.Size = uint32(clearDataLen) - if contentIsGzipped { - uploadResult.Gzip = 1 + if uploadResult != nil { + uploadResult.Size = uint32(clearDataLen) + if contentIsGzipped { + uploadResult.Gzip = 1 + } } return uploadResult, err From 18cda6adbb67e9963ffe21856cd152ce0333a0df Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 14 Mar 2020 13:55:32 -0700 Subject: [PATCH 0247/2432] cleaner fix --- weed/operation/upload_content.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 3101dc191..6806f7cf8 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -120,11 +120,13 @@ func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, i }, filename, contentIsGzipped, mtype, pairMap, jwt) } - if uploadResult != nil { - uploadResult.Size = uint32(clearDataLen) - if contentIsGzipped { - uploadResult.Gzip = 1 - } + if uploadResult == nil { + return + } + + uploadResult.Size = uint32(clearDataLen) + if contentIsGzipped { + uploadResult.Gzip = 1 } return uploadResult, err From ee635dcc3600ffe2fad8e36f9b79bbc4c3576b3c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 14 Mar 2020 14:08:00 -0700 Subject: [PATCH 0248/2432] refactoring --- weed/operation/upload_content.go | 78 +++----------------------------- 1 file changed, 7 insertions(+), 71 deletions(-) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 6806f7cf8..0fc3c37bf 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -2,7 +2,6 @@ package operation import ( "bytes" - "compress/flate" "crypto/md5" "encoding/json" "errors" @@ -59,80 +58,23 @@ func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isI func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { hash := md5.New() reader = io.TeeReader(reader, hash) - uploadResult, err = doUpload(uploadUrl, filename, cipher, reader, isInputGzipped, mtype, pairMap, flate.BestSpeed, jwt) + uploadResult, err = doUpload(uploadUrl, filename, cipher, reader, isInputGzipped, mtype, pairMap, jwt) if uploadResult != nil { uploadResult.Md5 = fmt.Sprintf("%x", hash.Sum(nil)) } return } -func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { - contentIsGzipped := isInputGzipped - shouldGzipNow := false - if !isInputGzipped { - if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); mtype == "" || iAmSure && shouldBeZipped { - shouldGzipNow = true - contentIsGzipped = true - } - } - - var clearDataLen int - - // gzip if possible - // this could be double copying - clearDataLen = len(data) - if shouldGzipNow { - data, err = util.GzipData(data) - } else if isInputGzipped { - // just to get the clear data length - clearData, err := util.UnGzipData(data) - if err == nil { - clearDataLen = len(clearData) - } - } - - if cipher { - // encrypt(gzip(data)) - - // encrypt - cipherKey := util.GenCipherKey() - encryptedData, encryptionErr := util.Encrypt(data, cipherKey) - if encryptionErr != nil { - err = fmt.Errorf("encrypt input: %v", encryptionErr) - return - } - - // upload data - uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { - _, err = w.Write(encryptedData) - return - }, "", false, "", nil, jwt) - if uploadResult != nil { - uploadResult.Name = filename - uploadResult.Mime = mtype - uploadResult.CipherKey = cipherKey - } - } else { - // upload data - uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { - _, err = w.Write(data) - return - }, filename, contentIsGzipped, mtype, pairMap, jwt) - } - - if uploadResult == nil { +func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + data, readErr := ioutil.ReadAll(reader) + if readErr != nil { + err = fmt.Errorf("read input: %v", readErr) return } - - uploadResult.Size = uint32(clearDataLen) - if contentIsGzipped { - uploadResult.Gzip = 1 - } - - return uploadResult, err + return doUploadData(uploadUrl, filename, cipher, data, isInputGzipped, mtype, pairMap, jwt) } -func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { +func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { contentIsGzipped := isInputGzipped shouldGzipNow := false if !isInputGzipped { @@ -146,11 +88,6 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, // gzip if possible // this could be double copying - data, readErr := ioutil.ReadAll(reader) - if readErr != nil { - err = fmt.Errorf("read input: %v", readErr) - return - } clearDataLen = len(data) if shouldGzipNow { data, err = util.GzipData(data) @@ -182,7 +119,6 @@ func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, uploadResult.Name = filename uploadResult.Mime = mtype uploadResult.CipherKey = cipherKey - uploadResult.Size = uint32(clearDataLen) } } else { // upload data From cc52e8ca870bf9c51a54cf7adbab4975e09d3cba Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 14 Mar 2020 14:25:07 -0700 Subject: [PATCH 0249/2432] 1.63 --- k8s/seaweedfs/Chart.yaml | 2 +- k8s/seaweedfs/values.yaml | 2 +- weed/util/constants.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index fb59c41f5..95420c730 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: 1.62 +version: c1.63 diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index f88ab438e..7ff7b6049 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - imageTag: "1.62" + imageTag: "c1.63" imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/weed/util/constants.go b/weed/util/constants.go index 1f31ed9ca..cbf1c50ba 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 62) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 63) ) From 43ed730e90f04d9b58b912a59670bda8d8de735b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 14 Mar 2020 15:05:21 -0700 Subject: [PATCH 0250/2432] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d4ac9177d..9b76a8acf 100644 --- a/README.md +++ b/README.md @@ -117,7 +117,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a * [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs. * [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. * [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. -* [Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. +* [256bit AES Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. * [File TTL][FilerTTL] automatically purge file metadata and actual file data. [Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files From 0576a27f44a2264f39a2cc6bcfe13dcb58dda55a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 14 Mar 2020 16:06:03 -0700 Subject: [PATCH 0251/2432] protect against possible nil which is unlikely to happen though --- weed/topology/store_replicate.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index 8c4996d45..24c32ef62 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -154,7 +154,11 @@ func distributedOperation(locations []operation.Location, store *storage.Store, func getWritableRemoteReplications(s *storage.Store, volumeId needle.VolumeId, masterNode string) ( remoteLocations []operation.Location, err error) { - copyCount := s.GetVolume(volumeId).ReplicaPlacement.GetCopyCount() + volume := s.GetVolume(volumeId) + if volume == nil { + return nil, fmt.Errorf("fail to find volume %d", volumeId) + } + copyCount := v.ReplicaPlacement.GetCopyCount() if copyCount > 1 { if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil { if len(lookupResult.Locations) < copyCount { From d022b6bc0ec908b58f4a073382b28eac4bba03ef Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 14 Mar 2020 16:32:16 -0700 Subject: [PATCH 0252/2432] fix compilation --- weed/topology/store_replicate.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index 24c32ef62..c7738311c 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -154,8 +154,8 @@ func distributedOperation(locations []operation.Location, store *storage.Store, func getWritableRemoteReplications(s *storage.Store, volumeId needle.VolumeId, masterNode string) ( remoteLocations []operation.Location, err error) { - volume := s.GetVolume(volumeId) - if volume == nil { + v := s.GetVolume(volumeId) + if v == nil { return nil, fmt.Errorf("fail to find volume %d", volumeId) } copyCount := v.ReplicaPlacement.GetCopyCount() From 7b37178716c9d25667d31b73ef4d2d34758b9289 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 14 Mar 2020 20:30:26 -0700 Subject: [PATCH 0253/2432] filer: close stores if interrupted --- weed/filer2/abstract_sql/abstract_sql_store.go | 4 ++++ weed/filer2/cassandra/cassandra_store.go | 4 ++++ weed/filer2/etcd/etcd_store.go | 4 ++++ weed/filer2/filer.go | 4 ++++ weed/filer2/filerstore.go | 6 ++++++ weed/filer2/leveldb/leveldb_store.go | 4 ++++ weed/filer2/leveldb2/leveldb2_store.go | 6 ++++++ weed/filer2/redis/universal_redis_store.go | 5 +++++ weed/server/filer_server.go | 4 ++++ 9 files changed, 41 insertions(+) diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index 864c858d3..ff041d0a3 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -184,3 +184,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat return entries, nil } + +func (store *AbstractSqlStore) Shutdown() { + store.DB.Close() +} diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index 6f25fffec..d57df23eb 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -154,3 +154,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath return entries, err } + +func (store *CassandraStore) Shutdown() { + store.session.Close() +} diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer2/etcd/etcd_store.go index 83a6ddc5d..6c352c8d0 100644 --- a/weed/filer2/etcd/etcd_store.go +++ b/weed/filer2/etcd/etcd_store.go @@ -196,3 +196,7 @@ func getNameFromKey(key []byte) string { return string(key[sepIndex+1:]) } + +func (store *EtcdStore) Shutdown() { + store.client.Close() +} diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index d3343f610..e226552ad 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -308,3 +308,7 @@ func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) { f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute) } + +func (f *Filer) Shutdown() { + f.store.Shutdown() +} diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go index f724f79c2..e3476aa96 100644 --- a/weed/filer2/filerstore.go +++ b/weed/filer2/filerstore.go @@ -25,6 +25,8 @@ type FilerStore interface { BeginTransaction(ctx context.Context) (context.Context, error) CommitTransaction(ctx context.Context) error RollbackTransaction(ctx context.Context) error + + Shutdown() } type FilerStoreWrapper struct { @@ -133,3 +135,7 @@ func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error { func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error { return fsw.actualStore.RollbackTransaction(ctx) } + +func (fsw *FilerStoreWrapper) Shutdown() { + fsw.actualStore.Shutdown() +} diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 807fcb56f..9ddb9bacb 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -216,3 +216,7 @@ func getNameFromKey(key []byte) string { return string(key[sepIndex+1:]) } + +func (store *LevelDBStore) Shutdown() { + store.db.Close() +} diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go index 0b07c6833..1e6827356 100644 --- a/weed/filer2/leveldb2/leveldb2_store.go +++ b/weed/filer2/leveldb2/leveldb2_store.go @@ -236,3 +236,9 @@ func hashToBytes(dir string, dbCount int) ([]byte, int) { return b, int(x) % dbCount } + +func (store *LevelDB2Store) Shutdown() { + for d := 0; d < store.dbCount; d++ { + store.dbs[d].Close() + } +} diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index c5b9d9416..1f8a0413a 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -180,3 +180,8 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full func genDirectoryListKey(dir string) (dirList string) { return dir + DIR_LIST_MARKER } + + +func (store *UniversalRedisStore) Shutdown() { + store.Client.Close() +} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 656bb2ed8..c3b959c7c 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -102,6 +102,10 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) maybeStartMetrics(fs, option) + util.OnInterrupt(func() { + fs.filer.Shutdown() + }) + return fs, nil } From 7edbee6f57f9e3ba90df46255d0b836fbf0f6976 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 15 Mar 2020 02:50:42 -0700 Subject: [PATCH 0254/2432] volume: proxy writes to remote volume server, with replication or not the panic is triggered by uploading a file to a volume server not holding the designated replica. 2020-03-15 10:20:14.365488 I | http: panic serving 127.0.0.1:57124: runtime error: invalid memory address or nil pointer dereference goroutine 119 [running]: net/http.(*conn).serve.func1(0xc0001a8000) /home/travis/.gimme/versions/go1.14.linux.amd64/src/net/http/server.go:1772 +0x139 panic(0x2316fe0, 0x3662900) /home/travis/.gimme/versions/go1.14.linux.amd64/src/runtime/panic.go:973 +0x396 github.com/chrislusf/seaweedfs/weed/topology.getWritableRemoteReplications(0xc00009c000, 0x2, 0x7ffeefbffbd2, 0xe, 0x0, 0xa, 0x0, 0x0, 0xbb4bf1f7) /home/travis/gopath/src/github.com/chrislusf/seaweedfs/weed/topology/store_replicate.go:157 +0x53 github.com/chrislusf/seaweedfs/weed/topology.ReplicatedWrite(0x7ffeefbffbd2, 0xe, 0xc00009c000, 0xc000000002, 0xc000472750, 0xc0001b2200, 0x0, 0x1, 0x0) /home/travis/gopath/src/github.com/chrislusf/seaweedfs/weed/topology/store_replicate.go:29 +0xc7 github.com/chrislusf/seaweedfs/weed/server.(*VolumeServer).PostHandler(0xc0001513f0, 0x292bde0, 0xc0001fe2a0, 0xc0001b2200) /home/travis/gopath/src/github.com/chrislusf/seaweedfs/weed/server/volume_server_handlers_write.go:52 +0x56f github.com/chrislusf/seaweedfs/weed/server.(*VolumeServer).privateStoreHandler(0xc0001513f0, 0x292bde0, 0xc0001fe2a0, 0xc0001b2200) /home/travis/gopath/src/github.com/chrislusf/seaweedfs/weed/server/volume_server_handlers.go:37 +0x21f net/http.HandlerFunc.ServeHTTP(0xc0004420e0, 0x292bde0, 0xc0001fe2a0, 0xc0001b2200) /home/travis/.gimme/versions/go1.14.linux.amd64/src/net/http/server.go:2012 +0x44 net/http.(*ServeMux).ServeHTTP(0xc0001fc800, 0x292bde0, 0xc0001fe2a0, 0xc0001b2200) /home/travis/.gimme/versions/go1.14.linux.amd64/src/net/http/server.go:2387 +0x1a5 net/http.serverHandler.ServeHTTP(0xc0001781c0, 0x292bde0, 0xc0001fe2a0, 0xc0001b2200) /home/travis/.gimme/versions/go1.14.linux.amd64/src/net/http/server.go:2807 +0xa3 net/http.(*conn).serve(0xc0001a8000, 0x2934420, 0xc000212400) /home/travis/.gimme/versions/go1.14.linux.amd64/src/net/http/server.go:1895 +0x86c created by net/http.(*Server).Serve /home/travis/.gimme/versions/go1.14.linux.amd64/src/net/http/server.go:2933 +0x35c Eg: server A (datacenter 1) and server B (datacenter 2) hold replica (100) for volume 1. If you upload a file with a key 1,xxxxx to server C (datacenter 3) will trigger the panic on server C. The server C should either proxy upload file to the correct volume server or should return an HTTP error code and not panic. --- weed/topology/store_replicate.go | 41 ++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index c7738311c..495c38cfa 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -154,27 +154,32 @@ func distributedOperation(locations []operation.Location, store *storage.Store, func getWritableRemoteReplications(s *storage.Store, volumeId needle.VolumeId, masterNode string) ( remoteLocations []operation.Location, err error) { + v := s.GetVolume(volumeId) - if v == nil { - return nil, fmt.Errorf("fail to find volume %d", volumeId) + if v != nil && v.ReplicaPlacement.GetCopyCount() == 1 { + return } - copyCount := v.ReplicaPlacement.GetCopyCount() - if copyCount > 1 { - if lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()); lookupErr == nil { - if len(lookupResult.Locations) < copyCount { - err = fmt.Errorf("replicating opetations [%d] is less than volume %d replication copy count [%d]", - len(lookupResult.Locations), volumeId, copyCount) - return - } - selfUrl := s.Ip + ":" + strconv.Itoa(s.Port) - for _, location := range lookupResult.Locations { - if location.Url != selfUrl { - remoteLocations = append(remoteLocations, location) - } + + // not on local store, or has replications + lookupResult, lookupErr := operation.Lookup(masterNode, volumeId.String()) + if lookupErr == nil { + selfUrl := s.Ip + ":" + strconv.Itoa(s.Port) + for _, location := range lookupResult.Locations { + if location.Url != selfUrl { + remoteLocations = append(remoteLocations, location) } - } else { - err = fmt.Errorf("failed to lookup for %d: %v", volumeId, lookupErr) - return + } + } else { + err = fmt.Errorf("failed to lookup for %d: %v", volumeId, lookupErr) + return + } + + if v != nil { + // has one local and has remote replications + copyCount := v.ReplicaPlacement.GetCopyCount() + if len(lookupResult.Locations) < copyCount { + err = fmt.Errorf("replicating opetations [%d] is less than volume %d replication copy count [%d]", + len(lookupResult.Locations), volumeId, copyCount) } } From 560df51def0713522f311e971b74877c6a9fdcd7 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 15 Mar 2020 03:11:26 -0700 Subject: [PATCH 0255/2432] refactoring --- weed/server/volume_grpc_tail.go | 2 +- weed/server/volume_server_handlers_write.go | 2 +- weed/storage/store.go | 7 ++++--- weed/topology/store_replicate.go | 17 ++++++++--------- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go index c26d6ed8f..c3b66c5e7 100644 --- a/weed/server/volume_grpc_tail.go +++ b/weed/server/volume_grpc_tail.go @@ -90,7 +90,7 @@ func (vs *VolumeServer) VolumeTailReceiver(ctx context.Context, req *volume_serv defer glog.V(1).Infof("receive tailing volume %d finished", v.Id) return resp, operation.TailVolumeFromSource(req.SourceVolumeServer, vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error { - _, _, err := vs.store.WriteVolumeNeedle(v.Id, n) + _, err := vs.store.WriteVolumeNeedle(v.Id, n) return err }) diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index 101be4c43..56cebf50f 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -49,7 +49,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { } ret := operation.UploadResult{} - _, isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, needle, r) + isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, needle, r) // http 204 status code does not allow body if writeError == nil && isUnchanged { diff --git a/weed/storage/store.go b/weed/storage/store.go index e29680f6f..19dbcb70e 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -227,14 +227,15 @@ func (s *Store) Close() { } } -func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (size uint32, isUnchanged bool, err error) { +func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (isUnchanged bool, err error) { if v := s.findVolume(i); v != nil { if v.noWriteOrDelete || v.noWriteCanDelete { err = fmt.Errorf("volume %d is read only", i) return } - if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(size, v.Version())) { - _, size, isUnchanged, err = v.writeNeedle(n) + // using len(n.Data) here instead of n.Size before n.Size is populated in v.writeNeedle(n) + if MaxPossibleVolumeSize >= v.ContentSize()+uint64(needle.GetActualSize(uint32(len(n.Data)), v.Version())) { + _, _, isUnchanged, err = v.writeNeedle(n) } else { err = fmt.Errorf("volume size limit %d exceeded! current size is %d", s.GetVolumeSizeLimit(), v.ContentSize()) } diff --git a/weed/topology/store_replicate.go b/weed/topology/store_replicate.go index 495c38cfa..6f043a601 100644 --- a/weed/topology/store_replicate.go +++ b/weed/topology/store_replicate.go @@ -17,9 +17,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/util" ) -func ReplicatedWrite(masterNode string, s *storage.Store, - volumeId needle.VolumeId, n *needle.Needle, - r *http.Request) (size uint32, isUnchanged bool, err error) { +func ReplicatedWrite(masterNode string, s *storage.Store, volumeId needle.VolumeId, n *needle.Needle, r *http.Request) (isUnchanged bool, err error) { //check JWT jwt := security.GetJwt(r) @@ -33,11 +31,13 @@ func ReplicatedWrite(masterNode string, s *storage.Store, } } - size, isUnchanged, err = s.WriteVolumeNeedle(volumeId, n) - if err != nil { - err = fmt.Errorf("failed to write to local disk: %v", err) - glog.V(0).Infoln(err) - return + if s.GetVolume(volumeId) != nil { + isUnchanged, err = s.WriteVolumeNeedle(volumeId, n) + if err != nil { + err = fmt.Errorf("failed to write to local disk: %v", err) + glog.V(0).Infoln(err) + return + } } if len(remoteLocations) > 0 { //send to other replica locations @@ -75,7 +75,6 @@ func ReplicatedWrite(masterNode string, s *storage.Store, _, err := operation.UploadData(u.String(), string(n.Name), false, n.Data, n.IsGzipped(), string(n.Mime), pairMap, jwt) return err }); err != nil { - size = 0 err = fmt.Errorf("failed to write to replicas for volume %d: %v", volumeId, err) glog.V(0).Infoln(err) } From 22400c66331e90fd5aa430c243af2eadfdfe9b12 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 15 Mar 2020 04:15:40 -0700 Subject: [PATCH 0256/2432] consistent gzip logic local store gzip same as replicated writes --- weed/storage/needle/needle_parse_upload.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/storage/needle/needle_parse_upload.go b/weed/storage/needle/needle_parse_upload.go index 85526aaa8..0babeda20 100644 --- a/weed/storage/needle/needle_parse_upload.go +++ b/weed/storage/needle/needle_parse_upload.go @@ -55,7 +55,7 @@ func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) { pu.OriginalDataSize = len(unzipped) pu.UncompressedData = unzipped } - } else if shouldGzip, _ := util.IsGzippableFileType("", pu.MimeType); shouldGzip { + } else if shouldGzip, _ := util.IsGzippableFileType("", pu.MimeType); pu.MimeType == "" || shouldGzip { if compressedData, err := util.GzipData(pu.Data); err == nil { pu.Data = compressedData pu.IsGzipped = true From f9b3750ad16e0907ad488a0daf249828fc6011f6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 15 Mar 2020 04:16:00 -0700 Subject: [PATCH 0257/2432] volume: handle repeated writes response --- weed/operation/upload_content.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 0fc3c37bf..75775d7d0 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -188,12 +188,17 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error return nil, post_err } defer resp.Body.Close() + + var ret UploadResult etag := getEtag(resp) + if resp.StatusCode == http.StatusNoContent { + ret.ETag = etag + return &ret, nil + } resp_body, ra_err := ioutil.ReadAll(resp.Body) if ra_err != nil { return nil, ra_err } - var ret UploadResult unmarshal_err := json.Unmarshal(resp_body, &ret) if unmarshal_err != nil { glog.V(0).Infoln("failing to read upload response", uploadUrl, string(resp_body)) From 08bea09841343c185587f103c9a99c32d65b1fb5 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 16 Mar 2020 00:03:56 -0700 Subject: [PATCH 0258/2432] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9b76a8acf..194c0625d 100644 --- a/README.md +++ b/README.md @@ -117,7 +117,7 @@ On top of the object store, optional [Filer] can support directories and POSIX a * [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs. * [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. * [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. -* [256bit AES Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. +* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. * [File TTL][FilerTTL] automatically purge file metadata and actual file data. [Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files From 1e251b8cdff5f3f0e562ceae70d0ec74631c1f05 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 16 Mar 2020 15:34:23 -0700 Subject: [PATCH 0259/2432] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 194c0625d..9b313569b 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,7 @@ Your support will be really appreciated by me and other supporters! - [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest) - [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY) +- [SeaweedFS on Twitter] (https://twitter.com/SeaweedFS) - [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs) - [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki) - [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction) From 12df236defbc537ad12bc43e8c437f888b4eb5e6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 16 Mar 2020 15:41:15 -0700 Subject: [PATCH 0260/2432] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9b313569b..5e6ad16a1 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ Your support will be really appreciated by me and other supporters! - [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest) - [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY) -- [SeaweedFS on Twitter] (https://twitter.com/SeaweedFS) +- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS) - [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs) - [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki) - [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction) From c3cb6fa1d75916dc463c258ccdd91ba7a0dbd5da Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 17 Mar 2020 09:43:57 -0700 Subject: [PATCH 0261/2432] volume: compaction can cause readonly volumes address https://github.com/chrislusf/seaweedfs/issues/1233 --- weed/pb/volume_server.proto | 1 + weed/pb/volume_server_pb/volume_server.pb.go | 422 ++++++++++--------- weed/server/volume_grpc_vacuum.go | 5 + weed/storage/store.go | 4 +- weed/storage/volume.go | 6 +- weed/storage/volume_checking.go | 2 +- weed/storage/volume_loading.go | 2 +- weed/topology/topology_vacuum.go | 8 +- weed/topology/volume_layout.go | 4 +- 9 files changed, 239 insertions(+), 215 deletions(-) diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index ce53fdc96..683181e4f 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -144,6 +144,7 @@ message VacuumVolumeCommitRequest { uint32 volume_id = 1; } message VacuumVolumeCommitResponse { + bool is_read_only = 1; } message VacuumVolumeCleanupRequest { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 56baa0cf7..bcc31de16 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -377,6 +377,7 @@ func (m *VacuumVolumeCommitRequest) GetVolumeId() uint32 { } type VacuumVolumeCommitResponse struct { + IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly" json:"is_read_only,omitempty"` } func (m *VacuumVolumeCommitResponse) Reset() { *m = VacuumVolumeCommitResponse{} } @@ -384,6 +385,13 @@ func (m *VacuumVolumeCommitResponse) String() string { return proto.C func (*VacuumVolumeCommitResponse) ProtoMessage() {} func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (m *VacuumVolumeCommitResponse) GetIsReadOnly() bool { + if m != nil { + return m.IsReadOnly + } + return false +} + type VacuumVolumeCleanupRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` } @@ -3667,210 +3675,212 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 3280 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x3b, 0x4d, 0x6f, 0x1c, 0xc7, - 0xb1, 0x1c, 0x2e, 0x3f, 0x76, 0x6b, 0x77, 0x45, 0xaa, 0x49, 0x53, 0xeb, 0x21, 0x29, 0x51, 0x23, - 0x7f, 0x90, 0xb2, 0x45, 0xca, 0xb4, 0xfd, 0xac, 0x27, 0x3f, 0xfb, 0x59, 0xa2, 0x44, 0x59, 0xb6, - 0x48, 0xd9, 0x43, 0x59, 0x7e, 0x2f, 0x36, 0x32, 0x18, 0xce, 0xf4, 0x92, 0x63, 0xce, 0xce, 0x8c, - 0x66, 0x7a, 0x69, 0xad, 0xe0, 0x9c, 0x1c, 0x20, 0x01, 0x82, 0xe4, 0x10, 0xe4, 0x92, 0x4b, 0x80, - 0x20, 0xf7, 0x5c, 0xf3, 0x17, 0xfc, 0x07, 0x02, 0xe4, 0x94, 0x4b, 0xce, 0x39, 0xe4, 0x10, 0x20, - 0x40, 0x2e, 0x41, 0x7f, 0xcd, 0xce, 0x27, 0x77, 0x18, 0x31, 0x08, 0x72, 0x9b, 0xae, 0xae, 0xae, - 0xea, 0xaa, 0xae, 0xaa, 0xae, 0xae, 0xda, 0x85, 0xb9, 0x63, 0xdf, 0xed, 0xf7, 0xb0, 0x11, 0xe1, - 0xf0, 0x18, 0x87, 0xeb, 0x41, 0xe8, 0x13, 0x1f, 0xcd, 0xa6, 0x80, 0x46, 0xb0, 0xaf, 0x6d, 0x00, - 0xba, 0x6d, 0x12, 0xeb, 0xf0, 0x0e, 0x76, 0x31, 0xc1, 0x3a, 0x7e, 0xd2, 0xc7, 0x11, 0x41, 0x2f, - 0x42, 0xbd, 0xeb, 0xb8, 0xd8, 0x70, 0xec, 0xa8, 0xa3, 0xac, 0xd4, 0x56, 0x1b, 0xfa, 0x34, 0x1d, - 0xdf, 0xb7, 0x23, 0xed, 0x21, 0xcc, 0xa5, 0x16, 0x44, 0x81, 0xef, 0x45, 0x18, 0xdd, 0x80, 0xe9, - 0x10, 0x47, 0x7d, 0x97, 0xf0, 0x05, 0xcd, 0xcd, 0x8b, 0xeb, 0x59, 0x5e, 0xeb, 0xf1, 0x92, 0xbe, - 0x4b, 0x74, 0x89, 0xae, 0x7d, 0xab, 0x40, 0x2b, 0x39, 0x83, 0x2e, 0xc0, 0xb4, 0x60, 0xde, 0x51, - 0x56, 0x94, 0xd5, 0x86, 0x3e, 0xc5, 0x79, 0xa3, 0x05, 0x98, 0x8a, 0x88, 0x49, 0xfa, 0x51, 0x67, - 0x7c, 0x45, 0x59, 0x9d, 0xd4, 0xc5, 0x08, 0xcd, 0xc3, 0x24, 0x0e, 0x43, 0x3f, 0xec, 0xd4, 0x18, - 0x3a, 0x1f, 0x20, 0x04, 0x13, 0x91, 0xf3, 0x0c, 0x77, 0x26, 0x56, 0x94, 0xd5, 0xb6, 0xce, 0xbe, - 0x51, 0x07, 0xa6, 0x8f, 0x71, 0x18, 0x39, 0xbe, 0xd7, 0x99, 0x64, 0x60, 0x39, 0xd4, 0x3e, 0x82, - 0x73, 0xdb, 0x8e, 0x8b, 0xef, 0x61, 0x22, 0x75, 0x50, 0xba, 0x8d, 0x4b, 0xd0, 0x34, 0x2d, 0x0b, - 0x07, 0xc4, 0x38, 0x78, 0xe6, 0x04, 0x6c, 0x2f, 0x75, 0x1d, 0x38, 0xe8, 0xde, 0x33, 0x27, 0xd0, - 0x7e, 0x54, 0x83, 0x99, 0x98, 0x98, 0xd0, 0x0f, 0x82, 0x09, 0xdb, 0x24, 0x26, 0x23, 0xd5, 0xd2, - 0xd9, 0x37, 0x7a, 0x19, 0xce, 0x59, 0xbe, 0x47, 0xb0, 0x47, 0x0c, 0x17, 0x7b, 0x07, 0xe4, 0x90, - 0xd1, 0x6a, 0xeb, 0x6d, 0x01, 0x7d, 0xc0, 0x80, 0xe8, 0x32, 0xb4, 0x24, 0x1a, 0x19, 0x04, 0x58, - 0x48, 0xd9, 0x14, 0xb0, 0x47, 0x83, 0x00, 0xa3, 0x2b, 0xd0, 0x76, 0xcd, 0x88, 0x18, 0x3d, 0xdf, - 0x76, 0xba, 0x0e, 0xb6, 0x99, 0xd0, 0x13, 0x7a, 0x8b, 0x02, 0x77, 0x04, 0x0c, 0xa9, 0xfc, 0x50, - 0x3d, 0xb3, 0x87, 0x99, 0xf4, 0x0d, 0x3d, 0x1e, 0xd3, 0xed, 0x61, 0x62, 0x1e, 0x74, 0xa6, 0x18, - 0x9c, 0x7d, 0xa3, 0x65, 0x00, 0x27, 0x62, 0x32, 0x06, 0xd8, 0xee, 0x4c, 0x33, 0x31, 0x1b, 0x4e, - 0x74, 0x8f, 0x03, 0xd0, 0x87, 0x30, 0x7d, 0x88, 0x4d, 0x1b, 0x87, 0x51, 0xa7, 0xce, 0x4e, 0x7c, - 0x3d, 0x7f, 0xe2, 0x19, 0x2d, 0xac, 0x7f, 0xc8, 0x17, 0xdc, 0xf5, 0x48, 0x38, 0xd0, 0xe5, 0x72, - 0xb4, 0x04, 0x0d, 0x76, 0x64, 0x5b, 0xbe, 0x8d, 0x3b, 0x0d, 0x76, 0xb4, 0x43, 0x80, 0x7a, 0x13, - 0x5a, 0xc9, 0x65, 0x68, 0x16, 0x6a, 0x47, 0x78, 0x20, 0xce, 0x84, 0x7e, 0xd2, 0xf3, 0x3f, 0x36, - 0xdd, 0x3e, 0x66, 0xea, 0x6b, 0xe8, 0x7c, 0x70, 0x73, 0xfc, 0x86, 0xa2, 0x4d, 0xc3, 0xe4, 0xdd, - 0x5e, 0x40, 0x06, 0xda, 0x3b, 0xd0, 0x79, 0x6c, 0x5a, 0xfd, 0x7e, 0xef, 0x31, 0xdb, 0xe2, 0xd6, - 0x21, 0xb6, 0x8e, 0xe4, 0x41, 0x2f, 0x42, 0x43, 0x6c, 0x5c, 0x1c, 0x75, 0x5b, 0xaf, 0x73, 0xc0, - 0x7d, 0x5b, 0xfb, 0x00, 0x5e, 0x2c, 0x58, 0x28, 0x0e, 0xf5, 0x0a, 0xb4, 0x0f, 0xcc, 0x70, 0xdf, - 0x3c, 0xc0, 0x46, 0x68, 0x12, 0xc7, 0x67, 0xab, 0x15, 0xbd, 0x25, 0x80, 0x3a, 0x85, 0x69, 0x5f, - 0x80, 0x9a, 0xa2, 0xe0, 0xf7, 0x02, 0xd3, 0x22, 0x55, 0x98, 0xa3, 0x15, 0x68, 0x06, 0x21, 0x36, - 0x5d, 0xd7, 0xb7, 0x4c, 0xc2, 0xc5, 0xab, 0xe9, 0x49, 0x90, 0xb6, 0x0c, 0x8b, 0x85, 0xc4, 0xf9, - 0x06, 0xb5, 0x1b, 0x99, 0xdd, 0xfb, 0xbd, 0x9e, 0x53, 0x89, 0xb5, 0xb6, 0x94, 0xdb, 0x35, 0x5b, - 0x29, 0xe8, 0xfe, 0x77, 0x66, 0xd6, 0xc5, 0xa6, 0xd7, 0x0f, 0x2a, 0x11, 0xce, 0xee, 0x58, 0x2e, - 0x8d, 0x29, 0x5f, 0xe0, 0xc1, 0x60, 0xcb, 0x77, 0x5d, 0x6c, 0x11, 0xc7, 0xf7, 0x24, 0xd9, 0x8b, - 0x00, 0x56, 0x0c, 0x14, 0xe7, 0x9f, 0x80, 0x68, 0x2a, 0x74, 0xf2, 0x4b, 0x05, 0xd9, 0x3f, 0x2a, - 0xf0, 0xc2, 0x2d, 0xa1, 0x34, 0xce, 0xb8, 0xd2, 0x01, 0xa4, 0x59, 0x8e, 0x67, 0x59, 0x66, 0x0f, - 0xa8, 0x96, 0x3b, 0x20, 0x8a, 0x11, 0xe2, 0xc0, 0x75, 0x2c, 0x93, 0x91, 0x98, 0xe0, 0xbe, 0x9b, - 0x00, 0x51, 0x7b, 0x26, 0xc4, 0x15, 0x1e, 0x49, 0x3f, 0xd1, 0x26, 0x2c, 0xf4, 0x70, 0xcf, 0x0f, - 0x07, 0x46, 0xcf, 0x0c, 0x8c, 0x9e, 0xf9, 0xd4, 0xa0, 0xc1, 0xcb, 0xe8, 0xed, 0x33, 0xf7, 0x6c, - 0xeb, 0x88, 0xcf, 0xee, 0x98, 0xc1, 0x8e, 0xf9, 0x74, 0xcf, 0x79, 0x86, 0x77, 0xf6, 0xb5, 0x0e, - 0x2c, 0x64, 0xe5, 0x13, 0xa2, 0xff, 0x17, 0x5c, 0xe0, 0x90, 0xbd, 0x81, 0x67, 0xed, 0xb1, 0x88, - 0x59, 0xe9, 0xa0, 0xfe, 0xae, 0x40, 0x27, 0xbf, 0x50, 0x58, 0xfe, 0xf3, 0x6a, 0xed, 0xd4, 0x3a, - 0xb9, 0x04, 0x4d, 0x62, 0x3a, 0xae, 0xe1, 0x77, 0xbb, 0x11, 0x26, 0x4c, 0x11, 0x13, 0x3a, 0x50, - 0xd0, 0x43, 0x06, 0x41, 0x6b, 0x30, 0x6b, 0x71, 0xeb, 0x37, 0x42, 0x7c, 0xec, 0xb0, 0x18, 0x3f, - 0xcd, 0x36, 0x36, 0x63, 0x49, 0xaf, 0xe0, 0x60, 0xa4, 0x41, 0xdb, 0xb1, 0x9f, 0x1a, 0x2c, 0xba, - 0xb3, 0x2b, 0xa2, 0xce, 0xa8, 0x35, 0x1d, 0xfb, 0x29, 0x0d, 0x58, 0x54, 0xa3, 0xda, 0x63, 0x58, - 0xe2, 0xc2, 0xdf, 0xf7, 0xac, 0x10, 0xf7, 0xb0, 0x47, 0x4c, 0x77, 0xcb, 0x0f, 0x06, 0x95, 0xcc, - 0xe6, 0x45, 0xa8, 0x47, 0x8e, 0x67, 0x61, 0xc3, 0xe3, 0x57, 0xd5, 0x84, 0x3e, 0xcd, 0xc6, 0xbb, - 0x91, 0x76, 0x1b, 0x96, 0x4b, 0xe8, 0x0a, 0xcd, 0x5e, 0x86, 0x16, 0xdb, 0x98, 0x08, 0xef, 0xe2, - 0xc2, 0x68, 0x52, 0xd8, 0x16, 0x07, 0x69, 0x6f, 0x00, 0xe2, 0x34, 0x76, 0xfc, 0xbe, 0x57, 0xcd, - 0x9d, 0x5f, 0x80, 0xb9, 0xd4, 0x12, 0x61, 0x1b, 0x6f, 0xc2, 0x3c, 0x07, 0x7f, 0xe6, 0xf5, 0x2a, - 0xd3, 0xba, 0x00, 0x2f, 0x64, 0x16, 0x09, 0x6a, 0x9b, 0x92, 0x49, 0x3a, 0x99, 0x38, 0x91, 0xd8, - 0x82, 0xdc, 0x41, 0x3a, 0x9f, 0x60, 0x91, 0x8b, 0x6f, 0xd8, 0x0c, 0x8f, 0x74, 0x6c, 0xda, 0xbe, - 0xe7, 0x0e, 0x2a, 0x47, 0xae, 0x82, 0x95, 0x82, 0xee, 0xe7, 0xb0, 0x20, 0x23, 0x9a, 0xd7, 0x75, - 0x0e, 0xfa, 0x21, 0xae, 0x1a, 0x89, 0x93, 0x26, 0x3b, 0x9e, 0x33, 0x59, 0x6d, 0x43, 0xba, 0x59, - 0x82, 0xb0, 0x38, 0xd2, 0x38, 0x3f, 0x51, 0x12, 0xf9, 0x89, 0xf6, 0x5b, 0x05, 0xce, 0xcb, 0x15, - 0x15, 0xed, 0xea, 0x94, 0x8e, 0x55, 0x2b, 0x75, 0xac, 0x89, 0xa1, 0x63, 0xad, 0xc2, 0x6c, 0xe4, - 0xf7, 0x43, 0x0b, 0x1b, 0x34, 0x27, 0x31, 0x3c, 0x7a, 0x07, 0x73, 0xbf, 0x3b, 0xc7, 0xe1, 0x77, - 0x4c, 0x62, 0xee, 0xfa, 0x36, 0xd6, 0xfe, 0x57, 0x9a, 0x5d, 0xca, 0x5e, 0xd7, 0xe0, 0x3c, 0x4b, - 0x3d, 0xcc, 0x20, 0xc0, 0x9e, 0x6d, 0x98, 0x84, 0x1a, 0xbd, 0xc2, 0x8c, 0xfe, 0x1c, 0x9d, 0xb8, - 0xc5, 0xe0, 0xb7, 0xc8, 0x6e, 0xa4, 0xfd, 0x62, 0x1c, 0x66, 0xe8, 0x5a, 0xea, 0x64, 0x95, 0xe4, - 0x9d, 0x85, 0x1a, 0x7e, 0x4a, 0x84, 0xa0, 0xf4, 0x13, 0x6d, 0xc0, 0x9c, 0xf0, 0x66, 0xc7, 0xf7, - 0x86, 0x8e, 0x5e, 0xe3, 0x71, 0x71, 0x38, 0x15, 0xfb, 0xfa, 0x25, 0x68, 0x46, 0xc4, 0x0f, 0x64, - 0xdc, 0xe0, 0x79, 0x11, 0x50, 0x90, 0x88, 0x1b, 0x69, 0x9d, 0x4e, 0x16, 0xe8, 0xb4, 0xe5, 0x44, - 0x06, 0xb6, 0x0c, 0xbe, 0x2b, 0x16, 0x79, 0xea, 0x3a, 0x38, 0xd1, 0x5d, 0x8b, 0x6b, 0x03, 0xbd, - 0x0f, 0x4b, 0xce, 0x81, 0xe7, 0x87, 0xd8, 0x10, 0x8a, 0x64, 0xfe, 0xeb, 0xf9, 0xc4, 0xe8, 0xfa, - 0x7d, 0x4f, 0x66, 0x4e, 0x1d, 0x8e, 0xb3, 0xc7, 0x50, 0xa8, 0x06, 0x76, 0x7d, 0xb2, 0x4d, 0xe7, - 0xb5, 0xb7, 0x61, 0x76, 0xa8, 0x95, 0xea, 0x51, 0xe0, 0x5b, 0x45, 0x5a, 0xdc, 0x23, 0xd3, 0x71, - 0xf7, 0xb0, 0x67, 0xe3, 0xf0, 0x39, 0xa3, 0x13, 0xba, 0x0e, 0xf3, 0x8e, 0xed, 0x62, 0x83, 0x38, - 0x3d, 0xec, 0xf7, 0x89, 0x11, 0x61, 0xcb, 0xf7, 0xec, 0x48, 0xea, 0x97, 0xce, 0x3d, 0xe2, 0x53, - 0x7b, 0x7c, 0x46, 0xfb, 0x61, 0x7c, 0x4b, 0x24, 0x77, 0x31, 0xcc, 0x8f, 0x3c, 0x8c, 0x29, 0x41, - 0x9e, 0xea, 0x09, 0x31, 0x5a, 0x1c, 0xc8, 0xb3, 0x3a, 0x7a, 0x42, 0x02, 0x69, 0xdf, 0xb7, 0x07, - 0x6c, 0x47, 0x2d, 0x1d, 0x38, 0xe8, 0xb6, 0x6f, 0x0f, 0x58, 0xb8, 0x8e, 0x0c, 0x66, 0x64, 0xd6, - 0x61, 0xdf, 0x3b, 0x62, 0xbb, 0xa9, 0xeb, 0x4d, 0x27, 0x7a, 0x60, 0x46, 0x64, 0x8b, 0x82, 0xb4, - 0xdf, 0x29, 0x32, 0x5e, 0xd0, 0x6d, 0xe8, 0xd8, 0xc2, 0xce, 0xf1, 0xbf, 0x41, 0x1d, 0x74, 0x85, - 0x30, 0x82, 0x54, 0x2e, 0x2c, 0x1c, 0x0e, 0xf1, 0x39, 0x71, 0xab, 0xb2, 0x99, 0x61, 0xb8, 0x4a, - 0x6f, 0x5c, 0x84, 0xab, 0x2f, 0xe5, 0x75, 0x71, 0xd7, 0xda, 0x3b, 0x34, 0x43, 0x3b, 0xba, 0x87, - 0x3d, 0x1c, 0x9a, 0xe4, 0x4c, 0xd2, 0x17, 0x6d, 0x05, 0x2e, 0x96, 0x51, 0x17, 0xfc, 0xbf, 0x90, - 0xd7, 0xa0, 0xc4, 0xd0, 0xf1, 0x7e, 0xdf, 0x71, 0xed, 0x33, 0x61, 0xff, 0x71, 0x56, 0xb8, 0x98, - 0xb8, 0xb0, 0x9f, 0xab, 0x70, 0x3e, 0x64, 0x20, 0x62, 0x44, 0x14, 0x21, 0x7e, 0x8f, 0xb6, 0xf5, - 0x19, 0x31, 0xc1, 0x16, 0xd2, 0x77, 0xe9, 0x4f, 0xc6, 0xa5, 0x05, 0x48, 0x6a, 0x67, 0x16, 0x56, - 0x17, 0xa1, 0x31, 0x64, 0x5f, 0x63, 0xec, 0xeb, 0x91, 0xe0, 0x4b, 0xad, 0xd3, 0xf2, 0x83, 0x81, - 0x81, 0x2d, 0x9e, 0x51, 0xb0, 0xa3, 0xae, 0xd3, 0xe7, 0x59, 0x30, 0xb8, 0x6b, 0xb1, 0x84, 0xa2, - 0x7a, 0x8c, 0x4d, 0x50, 0xfb, 0x8a, 0x53, 0x9b, 0x4a, 0x52, 0xfb, 0x8a, 0x51, 0x93, 0x38, 0xc7, - 0x4e, 0x97, 0xe3, 0x4c, 0x0f, 0x71, 0x1e, 0x3b, 0x5d, 0x8a, 0x33, 0xb4, 0xaa, 0xb4, 0x32, 0xc4, - 0xa9, 0x7e, 0x0d, 0x8b, 0xe9, 0xd9, 0xea, 0x17, 0xf6, 0x73, 0x29, 0x4b, 0xbb, 0x98, 0x35, 0xa7, - 0xcc, 0xad, 0x7f, 0x9c, 0xdd, 0x76, 0xe5, 0x0c, 0xe7, 0xf9, 0xf6, 0xb5, 0x9c, 0x55, 0x48, 0x3a, - 0x4d, 0xfa, 0xbf, 0xec, 0xb6, 0x4f, 0x91, 0x2e, 0x9d, 0xcc, 0xf8, 0x52, 0xd6, 0x05, 0xb2, 0x39, - 0xd5, 0x2f, 0xe3, 0xf8, 0x2a, 0x30, 0x68, 0x46, 0x53, 0x39, 0xae, 0x09, 0xbe, 0xa2, 0xae, 0x30, - 0x2d, 0xd8, 0xa2, 0x05, 0x98, 0x12, 0xf7, 0x21, 0x7f, 0xb1, 0x88, 0x51, 0xaa, 0x64, 0x52, 0x13, - 0x25, 0x13, 0x59, 0x0a, 0xa2, 0x6f, 0xee, 0x49, 0x1e, 0x1e, 0xe9, 0xf8, 0x63, 0x3c, 0xd0, 0x76, - 0x33, 0x1e, 0xc7, 0xb7, 0x76, 0x42, 0xc1, 0x83, 0x57, 0x14, 0x6c, 0x76, 0xe6, 0xb6, 0x28, 0x9c, - 0x34, 0x1c, 0x61, 0x04, 0xb6, 0xf6, 0x53, 0x65, 0x48, 0xf0, 0xb6, 0xeb, 0xef, 0x9f, 0xa1, 0x55, - 0x26, 0xa5, 0xa8, 0xa5, 0xa4, 0x48, 0xd6, 0x84, 0x26, 0xd2, 0x35, 0xa1, 0x84, 0x13, 0x25, 0xb7, - 0x53, 0x16, 0x9a, 0x1f, 0xf9, 0x67, 0xf7, 0xb2, 0xcc, 0x87, 0xe6, 0x21, 0x75, 0xc1, 0xff, 0x26, - 0x2c, 0x52, 0x85, 0x73, 0x28, 0x7b, 0xb7, 0x54, 0x7f, 0xdb, 0xfd, 0x79, 0x1c, 0x96, 0x8a, 0x17, - 0x57, 0x79, 0xdf, 0xbd, 0x0b, 0x6a, 0xfc, 0x7e, 0xa2, 0x57, 0x63, 0x44, 0xcc, 0x5e, 0x10, 0x5f, - 0x8e, 0xfc, 0x0e, 0xbd, 0x20, 0x1e, 0x53, 0x8f, 0xe4, 0xbc, 0xbc, 0x21, 0x73, 0x8f, 0xaf, 0x5a, - 0xee, 0xf1, 0x45, 0x19, 0xd8, 0x26, 0x29, 0x63, 0xc0, 0x73, 0xb8, 0x0b, 0xb6, 0x49, 0xca, 0x18, - 0xc4, 0x8b, 0x19, 0x03, 0x6e, 0xb5, 0x4d, 0x81, 0xcf, 0x18, 0x2c, 0x03, 0x88, 0xf4, 0xaa, 0xef, - 0xc9, 0xc7, 0x64, 0x83, 0x27, 0x57, 0x7d, 0xaf, 0x34, 0xcb, 0x9c, 0x2e, 0xcd, 0x32, 0xd3, 0xa7, - 0x59, 0xcf, 0x9d, 0xe6, 0xaf, 0x14, 0x80, 0x3b, 0x4e, 0x74, 0xc4, 0xb5, 0x4c, 0xf3, 0x5a, 0xdb, - 0x91, 0xcf, 0x01, 0xfa, 0x49, 0x21, 0xa6, 0xeb, 0x0a, 0xdd, 0xd1, 0x4f, 0xea, 0x3f, 0xfd, 0x08, - 0xdb, 0x42, 0x3d, 0xec, 0x9b, 0xc2, 0xba, 0x21, 0xc6, 0x42, 0x03, 0xec, 0x9b, 0x66, 0x8a, 0x01, - 0x0e, 0x2d, 0xec, 0x11, 0x83, 0xcd, 0x51, 0x69, 0xc7, 0xf5, 0xa6, 0x80, 0x6d, 0x67, 0x50, 0x18, - 0xc9, 0xa9, 0x14, 0xca, 0x67, 0x11, 0xb6, 0xb5, 0xdf, 0x28, 0xd0, 0xd8, 0xc1, 0x3d, 0xb1, 0xbf, - 0x8b, 0x00, 0x07, 0x7e, 0xe8, 0xf7, 0x89, 0xe3, 0x61, 0x9e, 0xcc, 0x4f, 0xea, 0x09, 0xc8, 0x73, - 0xec, 0x96, 0x46, 0x18, 0xec, 0x76, 0xc5, 0x99, 0xb0, 0x6f, 0x0a, 0x3b, 0xc4, 0x66, 0x20, 0x8e, - 0x81, 0x7d, 0xd3, 0x27, 0x53, 0x44, 0x4c, 0xeb, 0x88, 0xe9, 0x7c, 0x42, 0xe7, 0x03, 0xed, 0x0f, - 0x0a, 0x80, 0x8e, 0x7b, 0x3e, 0x61, 0x26, 0x4b, 0xe5, 0xda, 0x37, 0xad, 0x23, 0xfa, 0xec, 0x60, - 0x85, 0x51, 0xae, 0xcf, 0xa6, 0x80, 0xb1, 0xc2, 0xe8, 0x32, 0x80, 0x44, 0x11, 0x61, 0xb0, 0xa1, - 0x37, 0x04, 0x84, 0x3f, 0x30, 0x64, 0x44, 0x10, 0xb5, 0xc4, 0x61, 0x68, 0xe4, 0xdb, 0x96, 0xa1, - 0x71, 0x11, 0x1a, 0x59, 0x8b, 0x62, 0x11, 0x85, 0x99, 0xd3, 0x15, 0x68, 0xcb, 0xca, 0x2b, 0xb3, - 0x57, 0x21, 0x4a, 0x4b, 0x02, 0xa9, 0x8d, 0xb2, 0x2a, 0xe7, 0x53, 0x82, 0xbd, 0xd8, 0x94, 0x1a, - 0xfa, 0x10, 0xa0, 0x7d, 0x03, 0x20, 0xeb, 0x02, 0x5d, 0x1f, 0x6d, 0xc2, 0x24, 0x25, 0x2e, 0x6b, - 0xe9, 0x4b, 0xf9, 0xca, 0xea, 0x50, 0x0d, 0x3a, 0x47, 0x4d, 0xc6, 0xb1, 0xf1, 0x54, 0x1c, 0x1b, - 0xfd, 0x2c, 0xd4, 0xbe, 0x53, 0x60, 0x45, 0x64, 0xa1, 0x0e, 0x0e, 0x77, 0xfc, 0x63, 0x9a, 0x91, - 0x3c, 0xf2, 0x39, 0x93, 0x33, 0x09, 0xc0, 0x37, 0xa0, 0x63, 0xe3, 0x88, 0x38, 0x1e, 0x63, 0x68, - 0xc8, 0x43, 0x61, 0xc5, 0x68, 0xbe, 0xa1, 0x85, 0xc4, 0xfc, 0x6d, 0x3e, 0xbd, 0x6b, 0xf6, 0x30, - 0xba, 0x06, 0x73, 0x47, 0x18, 0x07, 0x86, 0xeb, 0x5b, 0xa6, 0x6b, 0x48, 0xd7, 0x16, 0x69, 0xd6, - 0x2c, 0x9d, 0x7a, 0x40, 0x67, 0xee, 0x70, 0xf7, 0xd6, 0x22, 0xb8, 0x7c, 0x82, 0x24, 0x22, 0xbc, - 0x2d, 0x41, 0x23, 0x08, 0x7d, 0x0b, 0x47, 0xd4, 0x66, 0x15, 0x76, 0xdb, 0x0d, 0x01, 0xe8, 0x3a, - 0xcc, 0xc5, 0x83, 0x4f, 0xb8, 0x93, 0x98, 0x07, 0xbc, 0xfc, 0x3a, 0xae, 0x17, 0x4d, 0x69, 0x3f, - 0x57, 0x40, 0xcb, 0x71, 0xdd, 0x0e, 0xfd, 0xde, 0x19, 0x6a, 0x70, 0x03, 0xe6, 0x99, 0x1e, 0x42, - 0x46, 0x72, 0xa8, 0x08, 0xfe, 0x1a, 0x3a, 0x4f, 0xe7, 0x38, 0x37, 0xa9, 0x89, 0x3e, 0x5c, 0x39, - 0x71, 0x4f, 0xff, 0x22, 0x5d, 0x2c, 0xca, 0x4b, 0x9c, 0x3f, 0x70, 0x52, 0xb7, 0x92, 0xf6, 0x6b, - 0x45, 0xde, 0xa9, 0xe9, 0x59, 0xb1, 0x97, 0x5b, 0xd0, 0xb6, 0x9d, 0xe8, 0xc8, 0xe0, 0x8d, 0x9d, - 0x93, 0xec, 0x7f, 0x18, 0x4d, 0xf5, 0x96, 0x1d, 0x7f, 0xe3, 0x08, 0x7d, 0x00, 0x6d, 0x51, 0x3c, - 0x4d, 0xf4, 0x8a, 0x9a, 0x9b, 0x8b, 0x79, 0x12, 0x71, 0xbc, 0xd3, 0x5b, 0x7c, 0x05, 0x1f, 0x69, - 0x7f, 0x6b, 0x41, 0xeb, 0xd3, 0x3e, 0x0e, 0x07, 0x89, 0xc2, 0x73, 0x84, 0xc5, 0x31, 0xc8, 0x7e, - 0x58, 0x02, 0x42, 0x6f, 0x9c, 0x6e, 0xe8, 0xf7, 0x8c, 0xb8, 0x65, 0x36, 0xce, 0x50, 0x9a, 0x14, - 0xb8, 0xcd, 0xdb, 0x66, 0xe8, 0x3d, 0x98, 0xea, 0x3a, 0x2e, 0xc1, 0xbc, 0x49, 0xd5, 0xdc, 0x7c, - 0x39, 0xbf, 0x9f, 0x24, 0xcf, 0xf5, 0x6d, 0x86, 0xac, 0x8b, 0x45, 0x68, 0x1f, 0xe6, 0x1c, 0x2f, - 0x60, 0x4f, 0xd0, 0xd0, 0x31, 0x5d, 0xe7, 0xd9, 0xb0, 0x74, 0xda, 0xdc, 0x7c, 0x63, 0x04, 0xad, - 0xfb, 0x74, 0xe5, 0x5e, 0x72, 0xa1, 0x8e, 0x9c, 0x1c, 0x0c, 0x61, 0x98, 0xf7, 0xfb, 0x24, 0xcf, - 0x64, 0x92, 0x31, 0xd9, 0x1c, 0xc1, 0xe4, 0x21, 0x5b, 0x9a, 0xe6, 0x32, 0xe7, 0xe7, 0x81, 0xea, - 0x2e, 0x4c, 0x71, 0xe1, 0x68, 0x90, 0xef, 0x3a, 0xd8, 0x95, 0xfd, 0x35, 0x3e, 0xa0, 0x71, 0xcc, - 0x0f, 0x70, 0x68, 0x7a, 0x32, 0x5e, 0xcb, 0xe1, 0xb0, 0xcf, 0x53, 0x4b, 0xf4, 0x79, 0xd4, 0xdf, - 0x4f, 0x02, 0xca, 0x4b, 0x28, 0xeb, 0xc1, 0x21, 0x8e, 0x68, 0x0c, 0x4c, 0x5e, 0x10, 0x33, 0x09, - 0x38, 0xbb, 0x24, 0x3e, 0x87, 0x86, 0x15, 0x1d, 0x1b, 0x4c, 0x25, 0xc2, 0x5c, 0x6e, 0x9e, 0x5a, - 0xa5, 0xeb, 0x5b, 0x7b, 0x8f, 0x19, 0x54, 0xaf, 0x5b, 0xd1, 0x31, 0xfb, 0x42, 0xdf, 0x03, 0xf8, - 0x2a, 0xf2, 0x3d, 0x41, 0x99, 0x1f, 0xfc, 0xbb, 0xa7, 0xa7, 0xfc, 0xd1, 0xde, 0xc3, 0x5d, 0x4e, - 0xba, 0x41, 0xc9, 0x71, 0xda, 0x16, 0xb4, 0x03, 0x33, 0x7c, 0xd2, 0xc7, 0x44, 0x90, 0xe7, 0xb6, - 0xf0, 0xfe, 0xe9, 0xc9, 0x7f, 0xc2, 0xc9, 0x70, 0x0e, 0xad, 0x20, 0x31, 0x52, 0xbf, 0x1b, 0x87, - 0xba, 0x94, 0x8b, 0xbe, 0x62, 0x99, 0x85, 0xf3, 0x5a, 0x8e, 0xe1, 0x78, 0x5d, 0x5f, 0x68, 0xf4, - 0x1c, 0x85, 0xf3, 0x72, 0x0e, 0xbb, 0xbe, 0xd6, 0x60, 0x36, 0xc4, 0x96, 0x1f, 0xda, 0x34, 0xd7, - 0x77, 0x7a, 0x0e, 0x35, 0x7b, 0x7e, 0x96, 0x33, 0x1c, 0x7e, 0x47, 0x82, 0xd1, 0xab, 0x30, 0xc3, - 0x8e, 0x3d, 0x81, 0x59, 0x93, 0x34, 0xb1, 0x9b, 0x40, 0x5c, 0x83, 0xd9, 0x27, 0x7d, 0x1a, 0xf8, - 0xac, 0x43, 0x33, 0x34, 0x2d, 0xe2, 0xc7, 0x55, 0x95, 0x19, 0x06, 0xdf, 0x8a, 0xc1, 0xe8, 0x2d, - 0x58, 0xe0, 0xa8, 0x38, 0xb2, 0xcc, 0x20, 0x5e, 0x81, 0x43, 0xf1, 0xe8, 0x9e, 0x67, 0xb3, 0x77, - 0xd9, 0xe4, 0x96, 0x9c, 0x43, 0x2a, 0xd4, 0x2d, 0xbf, 0xd7, 0xc3, 0x1e, 0x89, 0x44, 0x1b, 0x34, - 0x1e, 0xa3, 0x5b, 0xb0, 0x6c, 0xba, 0xae, 0xff, 0xb5, 0xc1, 0x56, 0xda, 0x46, 0x4e, 0x3a, 0xfe, - 0x04, 0x57, 0x19, 0xd2, 0xa7, 0x0c, 0x47, 0x4f, 0x0b, 0xaa, 0x5e, 0x82, 0x46, 0x7c, 0x8e, 0x34, - 0xe5, 0x49, 0x18, 0x24, 0xfb, 0x56, 0xcf, 0x41, 0x2b, 0x79, 0x12, 0xea, 0x5f, 0x6a, 0x30, 0x57, - 0xe0, 0x54, 0xe8, 0x0b, 0x00, 0x6a, 0xad, 0xdc, 0xb5, 0x84, 0xb9, 0xfe, 0xcf, 0xe9, 0x9d, 0x93, - 0xda, 0x2b, 0x07, 0xeb, 0xd4, 0xfa, 0xf9, 0x27, 0xfa, 0x3e, 0x34, 0x99, 0xc5, 0x0a, 0xea, 0xdc, - 0x64, 0xdf, 0xfb, 0x27, 0xa8, 0x53, 0x59, 0x05, 0x79, 0xe6, 0x03, 0xfc, 0x5b, 0xfd, 0x93, 0x02, - 0x8d, 0x98, 0x31, 0x4d, 0xe0, 0xf8, 0x41, 0xb1, 0xb3, 0x8e, 0x64, 0x02, 0xc7, 0x60, 0xdb, 0x0c, - 0xf4, 0x1f, 0x69, 0x4a, 0xea, 0x3b, 0x00, 0x43, 0xf9, 0x0b, 0x45, 0x50, 0x0a, 0x45, 0xd0, 0xd6, - 0xa0, 0x4d, 0x35, 0xeb, 0x60, 0x7b, 0x8f, 0x84, 0x4e, 0xc0, 0x7e, 0xb0, 0xc0, 0x71, 0x22, 0xf1, - 0x90, 0x96, 0xc3, 0xcd, 0xbf, 0x2e, 0x41, 0x2b, 0x79, 0x93, 0xa2, 0x2f, 0xa1, 0x99, 0xf8, 0x61, - 0x06, 0x7a, 0x29, 0x7f, 0x68, 0xf9, 0x1f, 0x7a, 0xa8, 0x2f, 0x8f, 0xc0, 0x12, 0x6f, 0xcd, 0x31, - 0xa4, 0xc3, 0xb4, 0x68, 0xe6, 0xa3, 0x95, 0x13, 0xfa, 0xfc, 0x9c, 0xea, 0xe5, 0x91, 0xbf, 0x04, - 0xd0, 0xc6, 0xae, 0x2b, 0xc8, 0x83, 0xf3, 0xb9, 0xde, 0x3a, 0xba, 0x9a, 0x5f, 0x5b, 0xd6, 0xb9, - 0x57, 0x5f, 0xab, 0x84, 0x1b, 0xcb, 0x40, 0x60, 0xae, 0xa0, 0x59, 0x8e, 0x5e, 0x1f, 0x41, 0x25, - 0xd5, 0xb0, 0x57, 0xaf, 0x55, 0xc4, 0x8e, 0xb9, 0x3e, 0x01, 0x94, 0xef, 0xa4, 0xa3, 0xd7, 0x46, - 0x92, 0x19, 0x76, 0xea, 0xd5, 0xd7, 0xab, 0x21, 0x97, 0x0a, 0xca, 0x7b, 0xec, 0x23, 0x05, 0x4d, - 0x75, 0xf1, 0x47, 0x0a, 0x9a, 0x69, 0xdc, 0x8f, 0xa1, 0x23, 0x98, 0xcd, 0xf6, 0xdf, 0xd1, 0x5a, - 0xd9, 0xaf, 0x80, 0x72, 0xed, 0x7d, 0xf5, 0x6a, 0x15, 0xd4, 0x98, 0x19, 0x86, 0x73, 0xe9, 0x7e, - 0x37, 0x7a, 0x35, 0xbf, 0xbe, 0xb0, 0xe3, 0xaf, 0xae, 0x8e, 0x46, 0x4c, 0xca, 0x94, 0xed, 0x81, - 0x17, 0xc9, 0x54, 0xd2, 0x60, 0x2f, 0x92, 0xa9, 0xac, 0xa5, 0xae, 0x8d, 0xa1, 0x6f, 0x64, 0x63, - 0x35, 0xd3, 0x1b, 0x46, 0xeb, 0x65, 0x64, 0x8a, 0x9b, 0xd3, 0xea, 0x46, 0x65, 0xfc, 0x84, 0x37, - 0x7e, 0x09, 0xcd, 0x44, 0x8b, 0xb8, 0x28, 0x7e, 0xe4, 0x9b, 0xce, 0x45, 0xf1, 0xa3, 0xa8, 0xcf, - 0x3c, 0x86, 0xf6, 0xa1, 0x9d, 0x6a, 0x1a, 0xa3, 0x57, 0xca, 0x56, 0xa6, 0x6b, 0xab, 0xea, 0xab, - 0x23, 0xf1, 0x62, 0x1e, 0x86, 0x8c, 0x88, 0x22, 0x04, 0x96, 0x6e, 0x2e, 0x1d, 0x03, 0x5f, 0x19, - 0x85, 0x96, 0x72, 0xe5, 0x5c, 0x6b, 0xb9, 0xd0, 0x95, 0xcb, 0x5a, 0xd7, 0x85, 0xae, 0x5c, 0xde, - 0xad, 0x1e, 0x43, 0x87, 0x30, 0x93, 0x69, 0x2b, 0xa3, 0xd5, 0x32, 0x12, 0xd9, 0x96, 0xb6, 0xba, - 0x56, 0x01, 0x33, 0xe6, 0xf4, 0xff, 0xb2, 0x02, 0xc1, 0x4c, 0xee, 0x4a, 0xf9, 0xd2, 0xa1, 0x9d, - 0xbd, 0x74, 0x32, 0x52, 0x4c, 0xfa, 0x6b, 0x98, 0x2f, 0xaa, 0x36, 0xa2, 0x6b, 0x45, 0x75, 0x8d, - 0xd2, 0x92, 0xa6, 0xba, 0x5e, 0x15, 0x3d, 0x66, 0xfc, 0x19, 0xd4, 0x65, 0x6b, 0x15, 0x15, 0x5c, - 0x4a, 0x99, 0x66, 0xb4, 0xaa, 0x9d, 0x84, 0x92, 0x70, 0x95, 0x9e, 0x8c, 0x0a, 0xc3, 0x9e, 0x67, - 0x79, 0x54, 0xc8, 0x75, 0x67, 0xcb, 0xa3, 0x42, 0xbe, 0x85, 0xca, 0xd8, 0xc5, 0x66, 0x97, 0x6c, - 0x11, 0x96, 0x9b, 0x5d, 0x41, 0x07, 0xb4, 0xdc, 0xec, 0x0a, 0xbb, 0x8e, 0x63, 0xe8, 0x07, 0xf2, - 0x67, 0x12, 0xd9, 0xce, 0x20, 0x2a, 0x8d, 0x2d, 0x25, 0x1d, 0x4a, 0xf5, 0x7a, 0xf5, 0x05, 0x31, - 0xfb, 0x67, 0x32, 0x12, 0x66, 0x3a, 0x83, 0xe5, 0x91, 0xb0, 0xb8, 0x3f, 0xa9, 0x6e, 0x54, 0xc6, - 0xcf, 0x3b, 0x79, 0xb2, 0x75, 0x56, 0xae, 0xed, 0x82, 0x6e, 0x63, 0xb9, 0xb6, 0x0b, 0xbb, 0x71, - 0xcc, 0x3f, 0x8a, 0xda, 0x62, 0x45, 0xfe, 0x71, 0x42, 0xdf, 0x4e, 0x5d, 0xaf, 0x8a, 0x9e, 0x4a, - 0x14, 0xf2, 0x7d, 0x2f, 0x34, 0x72, 0xff, 0xa9, 0x3b, 0xe0, 0x5a, 0x45, 0xec, 0xf2, 0xd3, 0x95, - 0x77, 0xc2, 0x48, 0x01, 0x32, 0x77, 0xc3, 0x46, 0x65, 0xfc, 0x98, 0x77, 0x20, 0x7f, 0x74, 0x93, - 0xe8, 0x59, 0xa1, 0xab, 0x23, 0xe8, 0x24, 0x7a, 0x6e, 0xea, 0x6b, 0x95, 0x70, 0x8b, 0xbc, 0x37, - 0xd9, 0x45, 0x3a, 0xc9, 0x9e, 0x72, 0xad, 0xaf, 0x93, 0xec, 0xa9, 0xa0, 0x31, 0x55, 0xe0, 0xbd, - 0xb2, 0x79, 0x34, 0xda, 0x7b, 0x33, 0x4d, 0xac, 0xd1, 0xde, 0x9b, 0xeb, 0x4b, 0x8d, 0xa1, 0x1f, - 0x0f, 0x7f, 0x8c, 0x91, 0xaf, 0xc1, 0xa2, 0xcd, 0xd2, 0x50, 0x54, 0x5a, 0x7a, 0x56, 0xdf, 0x3c, - 0xd5, 0x9a, 0x84, 0xf2, 0x7f, 0xa6, 0xc8, 0xce, 0x6e, 0x61, 0x11, 0x14, 0xbd, 0x55, 0x81, 0x70, - 0xae, 0x8e, 0xab, 0xbe, 0x7d, 0xca, 0x55, 0x45, 0xd6, 0x90, 0xac, 0x7f, 0x96, 0x5b, 0x43, 0x41, - 0x0d, 0xb5, 0xdc, 0x1a, 0x8a, 0x4a, 0xaa, 0xda, 0x18, 0x7a, 0x00, 0x93, 0xec, 0xb9, 0x8e, 0x2e, - 0x9e, 0xfc, 0x8e, 0x57, 0x2f, 0x15, 0xcf, 0xc7, 0xaf, 0x51, 0x2a, 0xc0, 0xfe, 0x14, 0xfb, 0x27, - 0xc1, 0x9b, 0xff, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x4b, 0x03, 0xaa, 0x60, 0x30, 0x00, 0x00, + // 3302 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x1b, 0x4d, 0x6f, 0xdc, 0xd6, + 0x51, 0xd4, 0xea, 0x63, 0x77, 0x76, 0xd7, 0x92, 0x9f, 0x1c, 0x79, 0x43, 0x49, 0xb6, 0x4c, 0xe7, + 0x43, 0x72, 0x62, 0xc9, 0x51, 0x92, 0xc6, 0x71, 0x9a, 0x34, 0xb6, 0x6c, 0x39, 0x4e, 0x2c, 0x39, + 0xa1, 0x1c, 0xa7, 0x6d, 0x82, 0x12, 0x14, 0xf9, 0x56, 0x62, 0xc4, 0x25, 0x69, 0xf2, 0xad, 0xe2, + 0x35, 0xd2, 0x53, 0x0a, 0xb4, 0x40, 0xd1, 0x1e, 0x8a, 0x5e, 0x7a, 0x29, 0x50, 0xf4, 0xde, 0x6b, + 0xff, 0x42, 0xfe, 0x40, 0x81, 0x9e, 0x7a, 0xe9, 0xb9, 0x87, 0x1e, 0x0a, 0x14, 0xe8, 0xa5, 0x78, + 0x5f, 0x5c, 0x7e, 0x6a, 0xa9, 0x5a, 0x45, 0xd1, 0x1b, 0xdf, 0xbc, 0x79, 0x33, 0x6f, 0xe6, 0xcd, + 0xcc, 0x9b, 0x37, 0xb3, 0x0b, 0x73, 0x47, 0xbe, 0xdb, 0xef, 0x61, 0x23, 0xc2, 0xe1, 0x11, 0x0e, + 0xd7, 0x82, 0xd0, 0x27, 0x3e, 0x9a, 0x4d, 0x01, 0x8d, 0x60, 0x4f, 0x5b, 0x07, 0x74, 0xcb, 0x24, + 0xd6, 0xc1, 0x6d, 0xec, 0x62, 0x82, 0x75, 0xfc, 0xb8, 0x8f, 0x23, 0x82, 0x9e, 0x87, 0x7a, 0xd7, + 0x71, 0xb1, 0xe1, 0xd8, 0x51, 0x47, 0x59, 0xae, 0xad, 0x34, 0xf4, 0x69, 0x3a, 0xbe, 0x67, 0x47, + 0xda, 0x03, 0x98, 0x4b, 0x2d, 0x88, 0x02, 0xdf, 0x8b, 0x30, 0xba, 0x0e, 0xd3, 0x21, 0x8e, 0xfa, + 0x2e, 0xe1, 0x0b, 0x9a, 0x1b, 0x17, 0xd6, 0xb2, 0xbc, 0xd6, 0xe2, 0x25, 0x7d, 0x97, 0xe8, 0x12, + 0x5d, 0xfb, 0x46, 0x81, 0x56, 0x72, 0x06, 0x9d, 0x87, 0x69, 0xc1, 0xbc, 0xa3, 0x2c, 0x2b, 0x2b, + 0x0d, 0x7d, 0x8a, 0xf3, 0x46, 0xf3, 0x30, 0x15, 0x11, 0x93, 0xf4, 0xa3, 0xce, 0xf8, 0xb2, 0xb2, + 0x32, 0xa9, 0x8b, 0x11, 0x3a, 0x07, 0x93, 0x38, 0x0c, 0xfd, 0xb0, 0x53, 0x63, 0xe8, 0x7c, 0x80, + 0x10, 0x4c, 0x44, 0xce, 0x53, 0xdc, 0x99, 0x58, 0x56, 0x56, 0xda, 0x3a, 0xfb, 0x46, 0x1d, 0x98, + 0x3e, 0xc2, 0x61, 0xe4, 0xf8, 0x5e, 0x67, 0x92, 0x81, 0xe5, 0x50, 0xfb, 0x10, 0xce, 0x6c, 0x39, + 0x2e, 0xbe, 0x8b, 0x89, 0xd4, 0x41, 0xe9, 0x36, 0x2e, 0x42, 0xd3, 0xb4, 0x2c, 0x1c, 0x10, 0x63, + 0xff, 0xa9, 0x13, 0xb0, 0xbd, 0xd4, 0x75, 0xe0, 0xa0, 0xbb, 0x4f, 0x9d, 0x40, 0xfb, 0x69, 0x0d, + 0x66, 0x62, 0x62, 0x42, 0x3f, 0x08, 0x26, 0x6c, 0x93, 0x98, 0x8c, 0x54, 0x4b, 0x67, 0xdf, 0xe8, + 0x45, 0x38, 0x63, 0xf9, 0x1e, 0xc1, 0x1e, 0x31, 0x5c, 0xec, 0xed, 0x93, 0x03, 0x46, 0xab, 0xad, + 0xb7, 0x05, 0xf4, 0x3e, 0x03, 0xa2, 0x4b, 0xd0, 0x92, 0x68, 0x64, 0x10, 0x60, 0x21, 0x65, 0x53, + 0xc0, 0x1e, 0x0e, 0x02, 0x8c, 0x2e, 0x43, 0xdb, 0x35, 0x23, 0x62, 0xf4, 0x7c, 0xdb, 0xe9, 0x3a, + 0xd8, 0x66, 0x42, 0x4f, 0xe8, 0x2d, 0x0a, 0xdc, 0x16, 0x30, 0xa4, 0xf2, 0x43, 0xf5, 0xcc, 0x1e, + 0x66, 0xd2, 0x37, 0xf4, 0x78, 0x4c, 0xb7, 0x87, 0x89, 0xb9, 0xdf, 0x99, 0x62, 0x70, 0xf6, 0x8d, + 0x96, 0x00, 0x9c, 0x88, 0xc9, 0x18, 0x60, 0xbb, 0x33, 0xcd, 0xc4, 0x6c, 0x38, 0xd1, 0x5d, 0x0e, + 0x40, 0x1f, 0xc0, 0xf4, 0x01, 0x36, 0x6d, 0x1c, 0x46, 0x9d, 0x3a, 0x3b, 0xf1, 0xb5, 0xfc, 0x89, + 0x67, 0xb4, 0xb0, 0xf6, 0x01, 0x5f, 0x70, 0xc7, 0x23, 0xe1, 0x40, 0x97, 0xcb, 0xd1, 0x22, 0x34, + 0xd8, 0x91, 0x6d, 0xfa, 0x36, 0xee, 0x34, 0xd8, 0xd1, 0x0e, 0x01, 0xea, 0x0d, 0x68, 0x25, 0x97, + 0xa1, 0x59, 0xa8, 0x1d, 0xe2, 0x81, 0x38, 0x13, 0xfa, 0x49, 0xcf, 0xff, 0xc8, 0x74, 0xfb, 0x98, + 0xa9, 0xaf, 0xa1, 0xf3, 0xc1, 0x8d, 0xf1, 0xeb, 0x8a, 0x36, 0x0d, 0x93, 0x77, 0x7a, 0x01, 0x19, + 0x68, 0x6f, 0x41, 0xe7, 0x91, 0x69, 0xf5, 0xfb, 0xbd, 0x47, 0x6c, 0x8b, 0x9b, 0x07, 0xd8, 0x3a, + 0x94, 0x07, 0xbd, 0x00, 0x0d, 0xb1, 0x71, 0x71, 0xd4, 0x6d, 0xbd, 0xce, 0x01, 0xf7, 0x6c, 0xed, + 0x7d, 0x78, 0xbe, 0x60, 0xa1, 0x38, 0xd4, 0xcb, 0xd0, 0xde, 0x37, 0xc3, 0x3d, 0x73, 0x1f, 0x1b, + 0xa1, 0x49, 0x1c, 0x9f, 0xad, 0x56, 0xf4, 0x96, 0x00, 0xea, 0x14, 0xa6, 0x7d, 0x0e, 0x6a, 0x8a, + 0x82, 0xdf, 0x0b, 0x4c, 0x8b, 0x54, 0x61, 0x8e, 0x96, 0xa1, 0x19, 0x84, 0xd8, 0x74, 0x5d, 0xdf, + 0x32, 0x09, 0x17, 0xaf, 0xa6, 0x27, 0x41, 0xda, 0x12, 0x2c, 0x14, 0x12, 0xe7, 0x1b, 0xd4, 0xae, + 0x67, 0x76, 0xef, 0xf7, 0x7a, 0x4e, 0x25, 0xd6, 0xda, 0x7b, 0xb9, 0x5d, 0xb3, 0x95, 0x42, 0xf0, + 0x65, 0x68, 0x39, 0x91, 0x11, 0x62, 0xd3, 0x36, 0x7c, 0xcf, 0xe5, 0x87, 0x51, 0xd7, 0xc1, 0x89, + 0x74, 0x6c, 0xda, 0x0f, 0x3c, 0x77, 0xa0, 0xbd, 0x9d, 0x59, 0xef, 0x62, 0xd3, 0xeb, 0x07, 0x95, + 0x58, 0x67, 0x65, 0x92, 0x4b, 0x85, 0x4c, 0x6f, 0xc3, 0x79, 0x1e, 0x2e, 0x36, 0x7d, 0xd7, 0xc5, + 0x16, 0x71, 0x7c, 0x4f, 0x92, 0xbd, 0x00, 0x60, 0xc5, 0x40, 0x61, 0x21, 0x09, 0x88, 0xa6, 0x42, + 0x27, 0xbf, 0x54, 0x90, 0xfd, 0x8b, 0x02, 0xcf, 0xdd, 0x14, 0x6a, 0xe5, 0x8c, 0x2b, 0x1d, 0x51, + 0x9a, 0xe5, 0x78, 0x96, 0x65, 0xf6, 0x08, 0x6b, 0xb9, 0x23, 0xa4, 0x18, 0x21, 0x0e, 0x5c, 0xc7, + 0x32, 0x19, 0x89, 0x09, 0xee, 0xdd, 0x09, 0x10, 0xb5, 0x78, 0x42, 0x5c, 0xe1, 0xb3, 0xf4, 0x13, + 0x6d, 0xc0, 0x7c, 0x0f, 0xf7, 0xfc, 0x70, 0x60, 0xf4, 0xcc, 0xc0, 0xe8, 0x99, 0x4f, 0x0c, 0x1a, + 0xde, 0x8c, 0xde, 0x1e, 0x73, 0xe0, 0xb6, 0x8e, 0xf8, 0xec, 0xb6, 0x19, 0x6c, 0x9b, 0x4f, 0x76, + 0x9d, 0xa7, 0x78, 0x7b, 0x4f, 0xeb, 0xc0, 0x7c, 0x56, 0x3e, 0x21, 0xfa, 0x77, 0xe0, 0x3c, 0x87, + 0xec, 0x0e, 0x3c, 0x6b, 0x97, 0xc5, 0xd4, 0x4a, 0x07, 0xf5, 0x2f, 0x05, 0x3a, 0xf9, 0x85, 0xc2, + 0x44, 0x9e, 0x55, 0x6b, 0x27, 0xd6, 0xc9, 0x45, 0x68, 0x12, 0xd3, 0x71, 0x0d, 0xbf, 0xdb, 0x8d, + 0x30, 0x61, 0x8a, 0x98, 0xd0, 0x81, 0x82, 0x1e, 0x30, 0x08, 0x5a, 0x85, 0x59, 0x8b, 0xfb, 0x87, + 0x11, 0xe2, 0x23, 0x87, 0xdd, 0x02, 0xd3, 0x6c, 0x63, 0x33, 0x96, 0xf4, 0x1b, 0x0e, 0x46, 0x1a, + 0xb4, 0x1d, 0xfb, 0x89, 0xc1, 0xe2, 0x3f, 0xbb, 0x44, 0xea, 0x8c, 0x5a, 0xd3, 0xb1, 0x9f, 0xd0, + 0x90, 0x46, 0x35, 0xaa, 0x3d, 0x82, 0x45, 0x2e, 0xfc, 0x3d, 0xcf, 0x0a, 0x71, 0x0f, 0x7b, 0xc4, + 0x74, 0x37, 0xfd, 0x60, 0x50, 0xc9, 0x6c, 0x9e, 0x87, 0x7a, 0xe4, 0x78, 0x16, 0x36, 0x3c, 0x7e, + 0x99, 0x4d, 0xe8, 0xd3, 0x6c, 0xbc, 0x13, 0x69, 0xb7, 0x60, 0xa9, 0x84, 0xae, 0xd0, 0xec, 0x25, + 0x68, 0xb1, 0x8d, 0x89, 0x0b, 0x40, 0x5c, 0x29, 0x4d, 0x0a, 0xdb, 0xe4, 0x20, 0xed, 0x35, 0x40, + 0x9c, 0xc6, 0xb6, 0xdf, 0xf7, 0xaa, 0x39, 0xfc, 0x73, 0x30, 0x97, 0x5a, 0x22, 0x6c, 0xe3, 0x75, + 0x38, 0xc7, 0xc1, 0x9f, 0x7a, 0xbd, 0xca, 0xb4, 0xce, 0xc3, 0x73, 0x99, 0x45, 0x82, 0xda, 0x86, + 0x64, 0x92, 0x4e, 0x37, 0x8e, 0x25, 0x36, 0x2f, 0x77, 0x90, 0xce, 0x38, 0x58, 0x6c, 0xe3, 0x1b, + 0x36, 0xc3, 0x43, 0x1a, 0x77, 0x68, 0x24, 0xaa, 0x44, 0x71, 0x11, 0xd4, 0xa2, 0x95, 0x82, 0xee, + 0x67, 0x30, 0x2f, 0x63, 0x9e, 0xd7, 0x75, 0xf6, 0xfb, 0x21, 0xae, 0x1a, 0xab, 0x93, 0x26, 0x3b, + 0x9e, 0x33, 0x59, 0x6d, 0x5d, 0xba, 0x59, 0x82, 0xb0, 0x38, 0xd2, 0x38, 0x83, 0x51, 0x12, 0x19, + 0x8c, 0xf6, 0x07, 0x05, 0xce, 0xca, 0x15, 0x15, 0xed, 0xea, 0x84, 0x8e, 0x55, 0x2b, 0x75, 0xac, + 0x89, 0xa1, 0x63, 0xad, 0xc0, 0x6c, 0xe4, 0xf7, 0x43, 0x0b, 0x1b, 0x34, 0x6b, 0x31, 0x3c, 0x7a, + 0x4b, 0x73, 0xbf, 0x3b, 0xc3, 0xe1, 0xb7, 0x4d, 0x62, 0xee, 0xf8, 0x36, 0xd6, 0xbe, 0x27, 0xcd, + 0x2e, 0x65, 0xaf, 0xab, 0x70, 0x96, 0x25, 0x27, 0x66, 0x10, 0x60, 0xcf, 0x36, 0x4c, 0x42, 0x8d, + 0x5e, 0x61, 0x46, 0x7f, 0x86, 0x4e, 0xdc, 0x64, 0xf0, 0x9b, 0x64, 0x27, 0xd2, 0x7e, 0x3d, 0x0e, + 0x33, 0x74, 0x2d, 0x75, 0xb2, 0x4a, 0xf2, 0xce, 0x42, 0x0d, 0x3f, 0x21, 0x42, 0x50, 0xfa, 0x89, + 0xd6, 0x61, 0x4e, 0x78, 0xb3, 0xe3, 0x7b, 0x43, 0x47, 0xaf, 0xf1, 0xb8, 0x38, 0x9c, 0x8a, 0x7d, + 0xfd, 0x22, 0x34, 0x23, 0xe2, 0x07, 0x32, 0x6e, 0xf0, 0xcc, 0x09, 0x28, 0x48, 0xc4, 0x8d, 0xb4, + 0x4e, 0x27, 0x0b, 0x74, 0x4a, 0x2f, 0x43, 0x6c, 0x19, 0x7c, 0x57, 0x2c, 0xf2, 0xb0, 0xcb, 0xf0, + 0x8e, 0xc5, 0xb5, 0x81, 0xde, 0x83, 0x45, 0x67, 0xdf, 0xf3, 0x43, 0x6c, 0x08, 0x45, 0x32, 0xff, + 0xf5, 0x7c, 0x62, 0x74, 0xfd, 0xbe, 0x27, 0x73, 0xab, 0x0e, 0xc7, 0xd9, 0x65, 0x28, 0x54, 0x03, + 0x3b, 0x3e, 0xd9, 0xa2, 0xf3, 0xda, 0x9b, 0x30, 0x3b, 0xd4, 0x4a, 0xf5, 0x28, 0xf0, 0x8d, 0x22, + 0x2d, 0xee, 0xa1, 0xe9, 0xb8, 0xbb, 0xd8, 0xb3, 0x71, 0xf8, 0x8c, 0xd1, 0x09, 0x5d, 0x83, 0x73, + 0x8e, 0xed, 0x62, 0x83, 0x38, 0x3d, 0xec, 0xf7, 0x89, 0x11, 0x61, 0xcb, 0xf7, 0xec, 0x48, 0xea, + 0x97, 0xce, 0x3d, 0xe4, 0x53, 0xbb, 0x7c, 0x46, 0xfb, 0x49, 0x7c, 0x4b, 0x24, 0x77, 0x31, 0xcc, + 0xa0, 0x3c, 0x8c, 0x29, 0x41, 0x9e, 0x0c, 0x0a, 0x31, 0x5a, 0x1c, 0xc8, 0xf3, 0x3e, 0x7a, 0x42, + 0x02, 0x69, 0xcf, 0xb7, 0x07, 0x6c, 0x47, 0x2d, 0x1d, 0x38, 0xe8, 0x96, 0x6f, 0x0f, 0x58, 0xb8, + 0x8e, 0x0c, 0x66, 0x64, 0xd6, 0x41, 0xdf, 0x3b, 0x64, 0xbb, 0xa9, 0xeb, 0x4d, 0x27, 0xba, 0x6f, + 0x46, 0x64, 0x93, 0x82, 0xb4, 0x3f, 0x2a, 0x32, 0x5e, 0xd0, 0x6d, 0xe8, 0xd8, 0xc2, 0xce, 0xd1, + 0xff, 0x40, 0x1d, 0x74, 0x85, 0x30, 0x82, 0x54, 0xb6, 0x2c, 0x1c, 0x0e, 0xf1, 0x39, 0x71, 0xab, + 0xb2, 0x99, 0x61, 0xb8, 0x4a, 0x6f, 0x5c, 0x84, 0xab, 0x2f, 0xe4, 0x75, 0x71, 0xc7, 0xda, 0x3d, + 0x30, 0x43, 0x3b, 0xba, 0x8b, 0x3d, 0x1c, 0x9a, 0xe4, 0x54, 0xd2, 0x17, 0x6d, 0x19, 0x2e, 0x94, + 0x51, 0x17, 0xfc, 0x3f, 0x97, 0xd7, 0xa0, 0xc4, 0xd0, 0xf1, 0x5e, 0xdf, 0x71, 0xed, 0x53, 0x61, + 0xff, 0x51, 0x56, 0xb8, 0x98, 0xb8, 0xb0, 0x9f, 0x2b, 0x70, 0x36, 0x64, 0x20, 0x62, 0x44, 0x14, + 0x21, 0x7e, 0xb1, 0xb6, 0xf5, 0x19, 0x31, 0xc1, 0x16, 0xd2, 0x97, 0xeb, 0xcf, 0xc7, 0xa5, 0x05, + 0x48, 0x6a, 0xa7, 0x16, 0x56, 0x17, 0xa0, 0x31, 0x64, 0x5f, 0x63, 0xec, 0xeb, 0x91, 0xe0, 0x4b, + 0xad, 0xd3, 0xf2, 0x83, 0x81, 0x81, 0x2d, 0x9e, 0x51, 0xb0, 0xa3, 0xae, 0xd3, 0x07, 0x5c, 0x30, + 0xb8, 0x63, 0xb1, 0x84, 0xa2, 0x7a, 0x8c, 0x4d, 0x50, 0xfb, 0x92, 0x53, 0x9b, 0x4a, 0x52, 0xfb, + 0x92, 0x51, 0x93, 0x38, 0x47, 0x4e, 0x97, 0xe3, 0x4c, 0x0f, 0x71, 0x1e, 0x39, 0x5d, 0x8a, 0x33, + 0xb4, 0xaa, 0xb4, 0x32, 0xc4, 0xa9, 0x7e, 0x05, 0x0b, 0xe9, 0xd9, 0xea, 0x17, 0xf6, 0x33, 0x29, + 0x4b, 0xbb, 0x90, 0x35, 0xa7, 0xcc, 0xad, 0x7f, 0x94, 0xdd, 0x76, 0xe5, 0x0c, 0xe7, 0xd9, 0xf6, + 0xb5, 0x94, 0x55, 0x48, 0x3a, 0x4d, 0xfa, 0x7e, 0x76, 0xdb, 0x27, 0x48, 0x97, 0x8e, 0x67, 0x7c, + 0x31, 0xeb, 0x02, 0xd9, 0x9c, 0xea, 0x37, 0x71, 0x7c, 0x15, 0x18, 0x34, 0xa3, 0xa9, 0x1c, 0xd7, + 0x04, 0x5f, 0x51, 0x79, 0x98, 0x16, 0x6c, 0xd1, 0x3c, 0x4c, 0x89, 0xfb, 0x90, 0xbf, 0x58, 0xc4, + 0x28, 0x55, 0x54, 0xa9, 0x89, 0xa2, 0x8a, 0x2c, 0x16, 0xd1, 0x57, 0xf9, 0x24, 0x0f, 0x8f, 0x74, + 0xfc, 0x11, 0x1e, 0x68, 0x3b, 0x19, 0x8f, 0xe3, 0x5b, 0x3b, 0xa6, 0x24, 0xc2, 0x6b, 0x0e, 0x36, + 0x3b, 0x73, 0x5b, 0x94, 0x56, 0x1a, 0x8e, 0x30, 0x02, 0x5b, 0xfb, 0x85, 0x32, 0x24, 0x78, 0xcb, + 0xf5, 0xf7, 0x4e, 0xd1, 0x2a, 0x93, 0x52, 0xd4, 0x52, 0x52, 0x24, 0xab, 0x46, 0x13, 0xe9, 0xaa, + 0x51, 0xc2, 0x89, 0x92, 0xdb, 0x29, 0x0b, 0xcd, 0x0f, 0xfd, 0xd3, 0x7b, 0x59, 0xe6, 0x43, 0xf3, + 0x90, 0xba, 0xe0, 0x7f, 0x03, 0x16, 0xa8, 0xc2, 0x39, 0x94, 0xbd, 0x5b, 0xaa, 0xbf, 0xed, 0xfe, + 0x36, 0x0e, 0x8b, 0xc5, 0x8b, 0xab, 0xbc, 0xef, 0xde, 0x01, 0x35, 0x7e, 0x3f, 0xd1, 0xab, 0x31, + 0x22, 0x66, 0x2f, 0x88, 0x2f, 0x47, 0x7e, 0x87, 0x9e, 0x17, 0x8f, 0xa9, 0x87, 0x72, 0x5e, 0xde, + 0x90, 0xb9, 0xc7, 0x57, 0x2d, 0xf7, 0xf8, 0xa2, 0x0c, 0x6c, 0x93, 0x94, 0x31, 0xe0, 0x39, 0xdc, + 0x79, 0xdb, 0x24, 0x65, 0x0c, 0xe2, 0xc5, 0x8c, 0x01, 0xb7, 0xda, 0xa6, 0xc0, 0x67, 0x0c, 0x96, + 0x00, 0x44, 0x7a, 0xd5, 0xf7, 0xe4, 0x63, 0xb2, 0xc1, 0x93, 0xab, 0xbe, 0x57, 0x9a, 0x65, 0x4e, + 0x97, 0x66, 0x99, 0xe9, 0xd3, 0xac, 0xe7, 0x4e, 0xf3, 0xb7, 0x0a, 0xc0, 0x6d, 0x27, 0x3a, 0xe4, + 0x5a, 0xa6, 0x79, 0xad, 0xed, 0xc8, 0xe7, 0x00, 0xfd, 0xa4, 0x10, 0xd3, 0x75, 0x85, 0xee, 0xe8, + 0x27, 0xf5, 0x9f, 0x7e, 0x84, 0x6d, 0xa1, 0x1e, 0xf6, 0x4d, 0x61, 0xdd, 0x10, 0x63, 0xa1, 0x01, + 0xf6, 0x4d, 0x33, 0xc5, 0x00, 0x87, 0x16, 0xf6, 0x88, 0xc1, 0xe6, 0xa8, 0xb4, 0xe3, 0x7a, 0x53, + 0xc0, 0xb6, 0x32, 0x28, 0x8c, 0xe4, 0x54, 0x0a, 0xe5, 0xd3, 0x08, 0xdb, 0xda, 0xef, 0x15, 0x68, + 0x6c, 0xe3, 0x9e, 0xd8, 0xdf, 0x05, 0x80, 0x7d, 0x3f, 0xf4, 0xfb, 0xc4, 0xf1, 0x30, 0x4f, 0xe6, + 0x27, 0xf5, 0x04, 0xe4, 0x19, 0x76, 0x4b, 0x23, 0x0c, 0x76, 0xbb, 0xe2, 0x4c, 0xd8, 0x37, 0x85, + 0x1d, 0x60, 0x33, 0x10, 0xc7, 0xc0, 0xbe, 0xe9, 0x93, 0x29, 0x22, 0xa6, 0x75, 0xc8, 0x74, 0x3e, + 0xa1, 0xf3, 0x81, 0xf6, 0x67, 0x05, 0x40, 0xc7, 0x3d, 0x9f, 0x30, 0x93, 0xa5, 0x72, 0xed, 0x99, + 0xd6, 0x21, 0x7d, 0x76, 0xb0, 0xd2, 0x29, 0xd7, 0x67, 0x53, 0xc0, 0x58, 0xe9, 0x74, 0x09, 0x40, + 0xa2, 0x88, 0x30, 0xd8, 0xd0, 0x1b, 0x02, 0xc2, 0x1f, 0x18, 0x32, 0x22, 0x88, 0x6a, 0xe3, 0x30, + 0x34, 0xf2, 0x6d, 0xcb, 0xd0, 0xb8, 0x00, 0x8d, 0xac, 0x45, 0xb1, 0x88, 0xc2, 0xcc, 0xe9, 0x32, + 0xb4, 0x65, 0x6d, 0x96, 0xd9, 0xab, 0x10, 0xa5, 0x25, 0x81, 0xd4, 0x46, 0x59, 0x1d, 0xf4, 0x09, + 0xc1, 0x5e, 0x6c, 0x4a, 0x0d, 0x7d, 0x08, 0xd0, 0xbe, 0x06, 0x90, 0x75, 0x81, 0xae, 0x8f, 0x36, + 0x60, 0x92, 0x12, 0x97, 0xd5, 0xf6, 0xc5, 0x7c, 0xed, 0x75, 0xa8, 0x06, 0x9d, 0xa3, 0x26, 0xe3, + 0xd8, 0x78, 0x2a, 0x8e, 0x8d, 0x7e, 0x16, 0x6a, 0xdf, 0x2a, 0xb0, 0x2c, 0xb2, 0x50, 0x07, 0x87, + 0xdb, 0xfe, 0x11, 0xcd, 0x48, 0x1e, 0xfa, 0x9c, 0xc9, 0xa9, 0x04, 0xe0, 0xeb, 0xd0, 0xb1, 0x71, + 0x44, 0x1c, 0x8f, 0x31, 0x34, 0xe4, 0xa1, 0xb0, 0x72, 0x35, 0xdf, 0xd0, 0x7c, 0x62, 0xfe, 0x16, + 0x9f, 0xde, 0x31, 0x7b, 0x18, 0x5d, 0x85, 0xb9, 0x43, 0x8c, 0x03, 0xc3, 0xf5, 0x2d, 0xd3, 0x35, + 0xa4, 0x6b, 0x8b, 0x34, 0x6b, 0x96, 0x4e, 0xdd, 0xa7, 0x33, 0xb7, 0xb9, 0x7b, 0x6b, 0x11, 0x5c, + 0x3a, 0x46, 0x12, 0x11, 0xde, 0x16, 0xa1, 0x11, 0x84, 0xbe, 0x85, 0x23, 0x6a, 0xb3, 0x0a, 0xbb, + 0xed, 0x86, 0x00, 0x74, 0x0d, 0xe6, 0xe2, 0xc1, 0xc7, 0xdc, 0x49, 0xcc, 0x7d, 0x5e, 0xa0, 0x1d, + 0xd7, 0x8b, 0xa6, 0xb4, 0x5f, 0x29, 0xa0, 0xe5, 0xb8, 0x6e, 0x85, 0x7e, 0xef, 0x14, 0x35, 0xb8, + 0x0e, 0xe7, 0x98, 0x1e, 0x42, 0x46, 0x72, 0xa8, 0x08, 0xfe, 0x1a, 0x3a, 0x4b, 0xe7, 0x38, 0x37, + 0xa9, 0x89, 0x3e, 0x5c, 0x3e, 0x76, 0x4f, 0xff, 0x25, 0x5d, 0x2c, 0xc8, 0x4b, 0x9c, 0x3f, 0x70, + 0x52, 0xb7, 0x92, 0xf6, 0x3b, 0x45, 0xde, 0xa9, 0xe9, 0x59, 0xb1, 0x97, 0x9b, 0xd0, 0xb6, 0x9d, + 0xe8, 0xd0, 0xe0, 0xad, 0x9f, 0xe3, 0xec, 0x7f, 0x18, 0x4d, 0xf5, 0x96, 0x1d, 0x7f, 0xe3, 0x08, + 0xbd, 0x0f, 0x6d, 0x51, 0x3c, 0x4d, 0x74, 0x93, 0x9a, 0x1b, 0x0b, 0x79, 0x12, 0x71, 0xbc, 0xd3, + 0x5b, 0x7c, 0x05, 0x1f, 0x69, 0xff, 0x6c, 0x41, 0xeb, 0x93, 0x3e, 0x0e, 0x07, 0x89, 0xc2, 0x73, + 0x84, 0xc5, 0x31, 0xc8, 0x8e, 0x59, 0x02, 0x42, 0x6f, 0x9c, 0x6e, 0xe8, 0xf7, 0x8c, 0xb8, 0xa9, + 0x36, 0xce, 0x50, 0x9a, 0x14, 0xb8, 0xc5, 0x1b, 0x6b, 0xe8, 0x5d, 0x98, 0xea, 0x3a, 0x2e, 0xc1, + 0xbc, 0x8d, 0xd5, 0xdc, 0x78, 0x31, 0xbf, 0x9f, 0x24, 0xcf, 0xb5, 0x2d, 0x86, 0xac, 0x8b, 0x45, + 0x68, 0x0f, 0xe6, 0x1c, 0x2f, 0x60, 0x4f, 0xd0, 0xd0, 0x31, 0x5d, 0xe7, 0xe9, 0xb0, 0x74, 0xda, + 0xdc, 0x78, 0x6d, 0x04, 0xad, 0x7b, 0x74, 0xe5, 0x6e, 0x72, 0xa1, 0x8e, 0x9c, 0x1c, 0x0c, 0x61, + 0x38, 0xe7, 0xf7, 0x49, 0x9e, 0xc9, 0x24, 0x63, 0xb2, 0x31, 0x82, 0xc9, 0x03, 0xb6, 0x34, 0xcd, + 0x65, 0xce, 0xcf, 0x03, 0xd5, 0x1d, 0x98, 0xe2, 0xc2, 0xd1, 0x20, 0xdf, 0x75, 0xb0, 0x2b, 0x3b, + 0x70, 0x7c, 0x40, 0xe3, 0x98, 0x1f, 0xe0, 0xd0, 0xf4, 0x64, 0xbc, 0x96, 0xc3, 0x61, 0x27, 0xa8, + 0x96, 0xe8, 0x04, 0xa9, 0x7f, 0x9a, 0x04, 0x94, 0x97, 0x50, 0xd6, 0x83, 0x43, 0x1c, 0xd1, 0x18, + 0x98, 0xbc, 0x20, 0x66, 0x12, 0x70, 0x76, 0x49, 0x7c, 0x06, 0x0d, 0x2b, 0x3a, 0x32, 0x98, 0x4a, + 0x84, 0xb9, 0xdc, 0x38, 0xb1, 0x4a, 0xd7, 0x36, 0x77, 0x1f, 0x31, 0xa8, 0x5e, 0xb7, 0xa2, 0x23, + 0xf6, 0x85, 0x7e, 0x08, 0xf0, 0x65, 0xe4, 0x7b, 0x82, 0x32, 0x3f, 0xf8, 0x77, 0x4e, 0x4e, 0xf9, + 0xc3, 0xdd, 0x07, 0x3b, 0x9c, 0x74, 0x83, 0x92, 0xe3, 0xb4, 0x2d, 0x68, 0x07, 0x66, 0xf8, 0xb8, + 0x8f, 0x89, 0x20, 0xcf, 0x6d, 0xe1, 0xbd, 0x93, 0x93, 0xff, 0x98, 0x93, 0xe1, 0x1c, 0x5a, 0x41, + 0x62, 0xa4, 0x7e, 0x3b, 0x0e, 0x75, 0x29, 0x17, 0x7d, 0xc5, 0x32, 0x0b, 0xe7, 0xb5, 0x1c, 0xc3, + 0xf1, 0xba, 0xbe, 0xd0, 0xe8, 0x19, 0x0a, 0xe7, 0xe5, 0x1c, 0x76, 0x7d, 0xad, 0xc2, 0x6c, 0x88, + 0x2d, 0x3f, 0xb4, 0x69, 0xae, 0xef, 0xf4, 0x1c, 0x6a, 0xf6, 0xfc, 0x2c, 0x67, 0x38, 0xfc, 0xb6, + 0x04, 0xa3, 0x97, 0x61, 0x86, 0x1d, 0x7b, 0x02, 0xb3, 0x26, 0x69, 0x62, 0x37, 0x81, 0xb8, 0x0a, + 0xb3, 0x8f, 0xfb, 0x34, 0xf0, 0x59, 0x07, 0x66, 0x68, 0x5a, 0xc4, 0x8f, 0xab, 0x2a, 0x33, 0x0c, + 0xbe, 0x19, 0x83, 0xd1, 0x1b, 0x30, 0xcf, 0x51, 0x71, 0x64, 0x99, 0x41, 0xbc, 0x02, 0x87, 0xe2, + 0xd1, 0x7d, 0x8e, 0xcd, 0xde, 0x61, 0x93, 0x9b, 0x72, 0x0e, 0xa9, 0x50, 0xb7, 0xfc, 0x5e, 0x0f, + 0x7b, 0x24, 0x12, 0x8d, 0xd2, 0x78, 0x8c, 0x6e, 0xc2, 0x92, 0xe9, 0xba, 0xfe, 0x57, 0x06, 0x5b, + 0x69, 0x1b, 0x39, 0xe9, 0xf8, 0x13, 0x5c, 0x65, 0x48, 0x9f, 0x30, 0x1c, 0x3d, 0x2d, 0xa8, 0x7a, + 0x11, 0x1a, 0xf1, 0x39, 0xd2, 0x94, 0x27, 0x61, 0x90, 0xec, 0x5b, 0x3d, 0x03, 0xad, 0xe4, 0x49, + 0xa8, 0x7f, 0xaf, 0xc1, 0x5c, 0x81, 0x53, 0xa1, 0xcf, 0x01, 0xa8, 0xb5, 0x72, 0xd7, 0x12, 0xe6, + 0xfa, 0xdd, 0x93, 0x3b, 0x27, 0xb5, 0x57, 0x0e, 0xd6, 0xa9, 0xf5, 0xf3, 0x4f, 0xf4, 0x23, 0x68, + 0x32, 0x8b, 0x15, 0xd4, 0xb9, 0xc9, 0xbe, 0xfb, 0x1f, 0x50, 0xa7, 0xb2, 0x0a, 0xf2, 0xcc, 0x07, + 0xf8, 0xb7, 0xfa, 0x57, 0x05, 0x1a, 0x31, 0x63, 0x9a, 0xc0, 0xf1, 0x83, 0x62, 0x67, 0x1d, 0xc9, + 0x04, 0x8e, 0xc1, 0xb6, 0x18, 0xe8, 0xff, 0xd2, 0x94, 0xd4, 0xb7, 0x00, 0x86, 0xf2, 0x17, 0x8a, + 0xa0, 0x14, 0x8a, 0xa0, 0xad, 0x42, 0x9b, 0x6a, 0xd6, 0xc1, 0xf6, 0x2e, 0x09, 0x9d, 0x80, 0xfd, + 0xa4, 0x81, 0xe3, 0x44, 0xe2, 0x21, 0x2d, 0x87, 0x1b, 0xff, 0x58, 0x84, 0x56, 0xf2, 0x26, 0x45, + 0x5f, 0x40, 0x33, 0xf1, 0xd3, 0x0d, 0xf4, 0x42, 0xfe, 0xd0, 0xf2, 0x3f, 0x05, 0x51, 0x5f, 0x1c, + 0x81, 0x25, 0xde, 0x9a, 0x63, 0x48, 0x87, 0x69, 0xd1, 0xee, 0x47, 0xcb, 0xc7, 0xfc, 0x12, 0x80, + 0x53, 0xbd, 0x34, 0xf2, 0xb7, 0x02, 0xda, 0xd8, 0x35, 0x05, 0x79, 0x70, 0x36, 0xd7, 0x7d, 0x47, + 0x57, 0xf2, 0x6b, 0xcb, 0x7a, 0xfb, 0xea, 0x2b, 0x95, 0x70, 0x63, 0x19, 0x08, 0xcc, 0x15, 0xb4, + 0xd3, 0xd1, 0xab, 0x23, 0xa8, 0xa4, 0x5a, 0xfa, 0xea, 0xd5, 0x8a, 0xd8, 0x31, 0xd7, 0xc7, 0x80, + 0xf2, 0xbd, 0x76, 0xf4, 0xca, 0x48, 0x32, 0xc3, 0x5e, 0xbe, 0xfa, 0x6a, 0x35, 0xe4, 0x52, 0x41, + 0x79, 0x8f, 0x7d, 0xa4, 0xa0, 0xa9, 0x2e, 0xfe, 0x48, 0x41, 0x33, 0x8d, 0xfb, 0x31, 0x74, 0x08, + 0xb3, 0xd9, 0xfe, 0x3b, 0x5a, 0x2d, 0xfb, 0x9d, 0x50, 0xae, 0xbd, 0xaf, 0x5e, 0xa9, 0x82, 0x1a, + 0x33, 0xc3, 0x70, 0x26, 0xdd, 0xef, 0x46, 0x2f, 0xe7, 0xd7, 0x17, 0x76, 0xfc, 0xd5, 0x95, 0xd1, + 0x88, 0x49, 0x99, 0xb2, 0x3d, 0xf0, 0x22, 0x99, 0x4a, 0x1a, 0xec, 0x45, 0x32, 0x95, 0xb5, 0xd4, + 0xb5, 0x31, 0xf4, 0xb5, 0x6c, 0xac, 0x66, 0x7a, 0xc3, 0x68, 0xad, 0x8c, 0x4c, 0x71, 0x73, 0x5a, + 0x5d, 0xaf, 0x8c, 0x9f, 0xf0, 0xc6, 0x2f, 0xa0, 0x99, 0x68, 0x11, 0x17, 0xc5, 0x8f, 0x7c, 0xd3, + 0xb9, 0x28, 0x7e, 0x14, 0xf5, 0x99, 0xc7, 0xd0, 0x1e, 0xb4, 0x53, 0x4d, 0x63, 0xf4, 0x52, 0xd9, + 0xca, 0x74, 0x6d, 0x55, 0x7d, 0x79, 0x24, 0x5e, 0xcc, 0xc3, 0x90, 0x11, 0x51, 0x84, 0xc0, 0xd2, + 0xcd, 0xa5, 0x63, 0xe0, 0x4b, 0xa3, 0xd0, 0x52, 0xae, 0x9c, 0x6b, 0x2d, 0x17, 0xba, 0x72, 0x59, + 0xeb, 0xba, 0xd0, 0x95, 0xcb, 0xbb, 0xd5, 0x63, 0xe8, 0x00, 0x66, 0x32, 0x6d, 0x65, 0xb4, 0x52, + 0x46, 0x22, 0xdb, 0xd2, 0x56, 0x57, 0x2b, 0x60, 0xc6, 0x9c, 0x7e, 0x20, 0x2b, 0x10, 0xcc, 0xe4, + 0x2e, 0x97, 0x2f, 0x1d, 0xda, 0xd9, 0x0b, 0xc7, 0x23, 0xc5, 0xa4, 0xbf, 0x82, 0x73, 0x45, 0xd5, + 0x46, 0x74, 0xb5, 0xa8, 0xae, 0x51, 0x5a, 0xd2, 0x54, 0xd7, 0xaa, 0xa2, 0xc7, 0x8c, 0x3f, 0x85, + 0xba, 0x6c, 0xad, 0xa2, 0x82, 0x4b, 0x29, 0xd3, 0x8c, 0x56, 0xb5, 0xe3, 0x50, 0x12, 0xae, 0xd2, + 0x93, 0x51, 0x61, 0xd8, 0xf3, 0x2c, 0x8f, 0x0a, 0xb9, 0xee, 0x6c, 0x79, 0x54, 0xc8, 0xb7, 0x50, + 0x19, 0xbb, 0xd8, 0xec, 0x92, 0x2d, 0xc2, 0x72, 0xb3, 0x2b, 0xe8, 0x80, 0x96, 0x9b, 0x5d, 0x61, + 0xd7, 0x71, 0x0c, 0xfd, 0x58, 0xfe, 0x4c, 0x22, 0xdb, 0x19, 0x44, 0xa5, 0xb1, 0xa5, 0xa4, 0x43, + 0xa9, 0x5e, 0xab, 0xbe, 0x20, 0x66, 0xff, 0x54, 0x46, 0xc2, 0x4c, 0x67, 0xb0, 0x3c, 0x12, 0x16, + 0xf7, 0x27, 0xd5, 0xf5, 0xca, 0xf8, 0x79, 0x27, 0x4f, 0xb6, 0xce, 0xca, 0xb5, 0x5d, 0xd0, 0x6d, + 0x2c, 0xd7, 0x76, 0x61, 0x37, 0x8e, 0xf9, 0x47, 0x51, 0x5b, 0xac, 0xc8, 0x3f, 0x8e, 0xe9, 0xdb, + 0xa9, 0x6b, 0x55, 0xd1, 0x53, 0x89, 0x42, 0xbe, 0xef, 0x85, 0x46, 0xee, 0x3f, 0x75, 0x07, 0x5c, + 0xad, 0x88, 0x5d, 0x7e, 0xba, 0xf2, 0x4e, 0x18, 0x29, 0x40, 0xe6, 0x6e, 0x58, 0xaf, 0x8c, 0x1f, + 0xf3, 0x0e, 0xe4, 0x8f, 0x6e, 0x12, 0x3d, 0x2b, 0x74, 0x65, 0x04, 0x9d, 0x44, 0xcf, 0x4d, 0x7d, + 0xa5, 0x12, 0x6e, 0x91, 0xf7, 0x26, 0xbb, 0x48, 0xc7, 0xd9, 0x53, 0xae, 0xf5, 0x75, 0x9c, 0x3d, + 0x15, 0x34, 0xa6, 0x0a, 0xbc, 0x57, 0x36, 0x8f, 0x46, 0x7b, 0x6f, 0xa6, 0x89, 0x35, 0xda, 0x7b, + 0x73, 0x7d, 0xa9, 0x31, 0xf4, 0xb3, 0xe1, 0x8f, 0x31, 0xf2, 0x35, 0x58, 0xb4, 0x51, 0x1a, 0x8a, + 0x4a, 0x4b, 0xcf, 0xea, 0xeb, 0x27, 0x5a, 0x93, 0x50, 0xfe, 0x2f, 0x15, 0xd9, 0xd9, 0x2d, 0x2c, + 0x82, 0xa2, 0x37, 0x2a, 0x10, 0xce, 0xd5, 0x71, 0xd5, 0x37, 0x4f, 0xb8, 0xaa, 0xc8, 0x1a, 0x92, + 0xf5, 0xcf, 0x72, 0x6b, 0x28, 0xa8, 0xa1, 0x96, 0x5b, 0x43, 0x51, 0x49, 0x55, 0x1b, 0x43, 0xf7, + 0x61, 0x92, 0x3d, 0xd7, 0xd1, 0x85, 0xe3, 0xdf, 0xf1, 0xea, 0xc5, 0xe2, 0xf9, 0xf8, 0x35, 0x4a, + 0x05, 0xd8, 0x9b, 0x62, 0xff, 0x35, 0x78, 0xfd, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x46, 0xc8, + 0xe1, 0xbc, 0x82, 0x30, 0x00, 0x00, } diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go index 24f982241..b87de4b5b 100644 --- a/weed/server/volume_grpc_vacuum.go +++ b/weed/server/volume_grpc_vacuum.go @@ -51,6 +51,11 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv } else { glog.V(1).Infof("commit volume %d", req.VolumeId) } + if err == nil { + if vs.store.GetVolume(needle.VolumeId(req.VolumeId)).IsReadOnly() { + resp.IsReadOnly = true + } + } return resp, err diff --git a/weed/storage/store.go b/weed/storage/store.go index 19dbcb70e..f0dbbdf18 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -145,7 +145,7 @@ func (s *Store) VolumeInfos() []*VolumeInfo { FileCount: int(v.FileCount()), DeleteCount: int(v.DeletedCount()), DeletedByteCount: v.DeletedSize(), - ReadOnly: v.noWriteOrDelete || v.noWriteCanDelete, + ReadOnly: v.IsReadOnly(), Ttl: v.Ttl, CompactRevision: uint32(v.CompactionRevision), } @@ -229,7 +229,7 @@ func (s *Store) Close() { func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (isUnchanged bool, err error) { if v := s.findVolume(i); v != nil { - if v.noWriteOrDelete || v.noWriteCanDelete { + if v.v.IsReadOnly() { err = fmt.Errorf("volume %d is read only", i) return } diff --git a/weed/storage/volume.go b/weed/storage/volume.go index 7da83de7a..755b98b79 100644 --- a/weed/storage/volume.go +++ b/weed/storage/volume.go @@ -215,7 +215,7 @@ func (v *Volume) ToVolumeInformationMessage() *master_pb.VolumeInformationMessag FileCount: v.FileCount(), DeleteCount: v.DeletedCount(), DeletedByteCount: v.DeletedSize(), - ReadOnly: v.noWriteOrDelete || v.noWriteCanDelete, + ReadOnly: v.IsReadOnly(), ReplicaPlacement: uint32(v.ReplicaPlacement.Byte()), Version: uint32(v.Version()), Ttl: v.Ttl.ToUint32(), @@ -237,3 +237,7 @@ func (v *Volume) RemoteStorageNameKey() (storageName, storageKey string) { } return v.volumeInfo.GetFiles()[0].BackendName(), v.volumeInfo.GetFiles()[0].GetKey() } + +func (v *Volume) IsReadOnly() bool { + return v.noWriteOrDelete || v.noWriteCanDelete +} diff --git a/weed/storage/volume_checking.go b/weed/storage/volume_checking.go index a65c2a3ff..c33f0049a 100644 --- a/weed/storage/volume_checking.go +++ b/weed/storage/volume_checking.go @@ -58,7 +58,7 @@ func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, offset int64, key NeedleId, size uint32) (lastAppendAtNs uint64, err error) { n := new(needle.Needle) if err = n.ReadData(datFile, offset, size, v); err != nil { - return n.AppendAtNs, err + return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err) } if n.Id != key { return n.AppendAtNs, fmt.Errorf("index key %#x does not match needle's Id %#x", key, n.Id) diff --git a/weed/storage/volume_loading.go b/weed/storage/volume_loading.go index 6b42fc452..3b0897bca 100644 --- a/weed/storage/volume_loading.go +++ b/weed/storage/volume_loading.go @@ -94,7 +94,7 @@ func (v *Volume) load(alsoLoadIndex bool, createDatIfMissing bool, needleMapKind glog.V(0).Infof("volumeDataIntegrityChecking failed %v", err) } - if v.noWriteOrDelete || v.noWriteCanDelete { + if v.IsReadOnly() { if v.nm, err = NewSortedFileNeedleMap(fileName, indexFile); err != nil { glog.V(0).Infof("loading sorted db %s error: %v", fileName+".sdx", err) } diff --git a/weed/topology/topology_vacuum.go b/weed/topology/topology_vacuum.go index 532029ac3..789a01330 100644 --- a/weed/topology/topology_vacuum.go +++ b/weed/topology/topology_vacuum.go @@ -93,12 +93,16 @@ func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, } func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid needle.VolumeId, locationlist *VolumeLocationList) bool { isCommitSuccess := true + isReadOnly := false for _, dn := range locationlist.list { glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - _, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ + resp, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ VolumeId: uint32(vid), }) + if resp.IsReadOnly { + isReadOnly = true + } return err }) if err != nil { @@ -110,7 +114,7 @@ func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, v } if isCommitSuccess { for _, dn := range locationlist.list { - vl.SetVolumeAvailable(dn, vid) + vl.SetVolumeAvailable(dn, vid, isReadOnly) } } return isCommitSuccess diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go index 144e24713..908bbb9e9 100644 --- a/weed/topology/volume_layout.go +++ b/weed/topology/volume_layout.go @@ -257,7 +257,7 @@ func (vl *VolumeLayout) SetVolumeUnavailable(dn *DataNode, vid needle.VolumeId) } return false } -func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId) bool { +func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId, isReadOnly bool) bool { vl.accessLock.Lock() defer vl.accessLock.Unlock() @@ -268,7 +268,7 @@ func (vl *VolumeLayout) SetVolumeAvailable(dn *DataNode, vid needle.VolumeId) bo vl.vid2location[vid].Set(dn) - if vInfo.ReadOnly { + if vInfo.ReadOnly || isReadOnly { return false } From b964bbab3db8f160fe3aa240a808d92aa5f80078 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 17 Mar 2020 10:01:24 -0700 Subject: [PATCH 0262/2432] fix compilation --- weed/storage/store.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/storage/store.go b/weed/storage/store.go index f0dbbdf18..76fe4de27 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -229,7 +229,7 @@ func (s *Store) Close() { func (s *Store) WriteVolumeNeedle(i needle.VolumeId, n *needle.Needle) (isUnchanged bool, err error) { if v := s.findVolume(i); v != nil { - if v.v.IsReadOnly() { + if v.IsReadOnly() { err = fmt.Errorf("volume %d is read only", i) return } From bec6ec7db68fe3858ec6ecd683ef32f5236d174a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 17 Mar 2020 10:01:55 -0700 Subject: [PATCH 0263/2432] go fmt --- weed/filer2/postgres/postgres_store.go | 4 +-- weed/filer2/redis/universal_redis_store.go | 1 - weed/pb/master_pb/master.pb.go | 20 ++++++++------ weed/pb/volume_server_pb/volume_server.pb.go | 26 ++++++++++++------- .../replication/sink/filersink/fetch_write.go | 2 +- weed/replication/sink/s3sink/s3_write.go | 2 +- weed/server/filer_server_handlers_read.go | 1 - 7 files changed, 32 insertions(+), 24 deletions(-) diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go index 2e5f892f1..51c069aae 100644 --- a/weed/filer2/postgres/postgres_store.go +++ b/weed/filer2/postgres/postgres_store.go @@ -51,10 +51,10 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, sslmode) if password != "" { - sqlUrl += " password="+password + sqlUrl += " password=" + password } if database != "" { - sqlUrl += " dbname="+database + sqlUrl += " dbname=" + database } var dbErr error store.DB, dbErr = sql.Open("postgres", sqlUrl) diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index 1f8a0413a..c9f59d37b 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -181,7 +181,6 @@ func genDirectoryListKey(dir string) (dirList string) { return dir + DIR_LIST_MARKER } - func (store *UniversalRedisStore) Shutdown() { store.Client.Close() } diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index c33e2b768..95c9533a1 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -428,10 +428,12 @@ type VolumeEcShardInformationMessage struct { EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits" json:"ec_index_bits,omitempty"` } -func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } -func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardInformationMessage) ProtoMessage() {} -func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } +func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardInformationMessage) ProtoMessage() {} +func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{4} +} func (m *VolumeEcShardInformationMessage) GetId() uint32 { if m != nil { @@ -1422,10 +1424,12 @@ type GetMasterConfigurationResponse struct { MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` } -func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } -func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } +func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*GetMasterConfigurationResponse) ProtoMessage() {} +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{32} +} func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { if m != nil { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index bcc31de16..293b894e2 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1043,10 +1043,12 @@ func (m *VolumeEcShardsGenerateRequest) GetCollection() string { type VolumeEcShardsGenerateResponse struct { } -func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } -func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } +func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{41} +} type VolumeEcShardsRebuildRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1419,10 +1421,12 @@ func (m *VolumeEcShardsToVolumeRequest) GetCollection() string { type VolumeEcShardsToVolumeResponse struct { } -func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } -func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} -func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } +func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{57} +} type ReadVolumeFileStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -2089,8 +2093,10 @@ type QueryRequest_InputSerialization_JSONInput struct { func (m *QueryRequest_InputSerialization_JSONInput) Reset() { *m = QueryRequest_InputSerialization_JSONInput{} } -func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} +func (m *QueryRequest_InputSerialization_JSONInput) String() string { + return proto.CompactTextString(m) +} +func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70, 1, 1} } diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 360a34620..74f3a72bb 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -20,7 +20,7 @@ func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir str return } - replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks)) + replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks)) var wg sync.WaitGroup for chunkIndex, sourceChunk := range sourceChunks { diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index 854688b1e..c5c65ed5c 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -162,6 +162,6 @@ func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, e return nil, err } buf := make([]byte, chunk.Size) - util.ReadUrl(fileUrl, nil, false,false, chunk.Offset, int(chunk.Size), buf) + util.ReadUrl(fileUrl, nil, false, false, chunk.Offset, int(chunk.Size), buf) return bytes.NewReader(buf), nil } diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 5967535b8..878b52ffa 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -94,4 +94,3 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, }) } - From db9854fa0a6b26eb1ce9826a018a58addbf807ca Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Tue, 17 Mar 2020 10:58:35 -0700 Subject: [PATCH 0264/2432] 1.64 --- k8s/seaweedfs/Chart.yaml | 2 +- k8s/seaweedfs/values.yaml | 2 +- weed/util/constants.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index 95420c730..1944f4956 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: c1.63 +version: 1.64 diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index 7ff7b6049..2192013c8 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - imageTag: "c1.63" + imageTag: "1.64" imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/weed/util/constants.go b/weed/util/constants.go index cbf1c50ba..bf654f447 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 63) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 64) ) From 6566c8e114f123a0b8b4a8aac8aae62a8dbc1f97 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 18 Mar 2020 10:50:53 -0700 Subject: [PATCH 0265/2432] weed upload: add usePublicUrl option fix https://github.com/chrislusf/seaweedfs/issues/1236 --- weed/command/upload.go | 26 ++++++++++++-------------- weed/operation/submit.go | 6 ++++-- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/weed/command/upload.go b/weed/command/upload.go index d71046131..358897aee 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -16,14 +16,15 @@ var ( ) type UploadOptions struct { - master *string - dir *string - include *string - replication *string - collection *string - dataCenter *string - ttl *string - maxMB *int + master *string + dir *string + include *string + replication *string + collection *string + dataCenter *string + ttl *string + maxMB *int + usePublicUrl *bool } func init() { @@ -37,6 +38,7 @@ func init() { upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name") upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit") + upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server") } var cmdUpload = &Command{ @@ -79,9 +81,7 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { return e } - results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, - *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB) + results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) if e != nil { @@ -98,9 +98,7 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { fmt.Println(e.Error()) } - results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, - *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB) + results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) } diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 5e4dc4374..4ed4815ec 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -37,8 +37,7 @@ type SubmitResult struct { Error string `json:"error,omitempty"` } -func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, - replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) { +func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) { results := make([]SubmitResult, len(files)) for index, file := range files { results[index].FileName = file.FileName @@ -63,6 +62,9 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart file.Fid = file.Fid + "_" + strconv.Itoa(index) } file.Server = ret.Url + if usePublicUrl { + file.Server = ret.PublicUrl + } file.Replication = replication file.Collection = collection file.DataCenter = dataCenter From d848d089448372326edd9f8a20dbc9fc3c46f6dd Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Wed, 18 Mar 2020 11:16:45 -0700 Subject: [PATCH 0266/2432] use public url also for deletions --- weed/command/benchmark.go | 2 +- weed/command/filer_copy.go | 2 +- weed/operation/chunked_file.go | 4 ++-- weed/operation/delete_content.go | 16 ++++++++++++---- weed/operation/submit.go | 10 +++++----- weed/server/volume_server_handlers_write.go | 2 +- 6 files changed, 22 insertions(+), 14 deletions(-) diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index e85ab1b9b..6f156d6ed 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -241,7 +241,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { if !isSecure && assignResult.Auth != "" { isSecure = true } - if _, err := fp.Upload(0, b.masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil { + if _, err := fp.Upload(0, b.masterClient.GetMaster(), false, assignResult.Auth, b.grpcDialOption); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 0aee8cd80..a8aea36bc 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -459,7 +459,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, for _, chunk := range chunks { fileIds = append(fileIds, chunk.FileId) } - operation.DeleteFiles(copy.masters[0], worker.options.grpcDialOption, fileIds) + operation.DeleteFiles(copy.masters[0], false, worker.options.grpcDialOption, fileIds) return uploadError } diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index 295204dd8..4983245cc 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -72,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) { return json.Marshal(cm) } -func (cm *ChunkManifest) DeleteChunks(master string, grpcDialOption grpc.DialOption) error { +func (cm *ChunkManifest) DeleteChunks(master string, usePublicUrl bool, grpcDialOption grpc.DialOption) error { var fileIds []string for _, ci := range cm.Chunks { fileIds = append(fileIds, ci.Fid) } - results, err := DeleteFiles(master, grpcDialOption, fileIds) + results, err := DeleteFiles(master, usePublicUrl, grpcDialOption, fileIds) if err != nil { glog.V(0).Infof("delete %+v: %v", fileIds, err) return fmt.Errorf("chunk delete: %v", err) diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 361c09e7e..d4a72f375 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -29,10 +29,18 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) { } // DeleteFiles batch deletes a list of fileIds -func DeleteFiles(master string, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { - - lookupFunc := func(vids []string) (map[string]LookupResult, error) { - return LookupVolumeIds(master, grpcDialOption, vids) +func DeleteFiles(master string, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { + + lookupFunc := func(vids []string) (results map[string]LookupResult, err error) { + results, err = LookupVolumeIds(master, grpcDialOption, vids) + if err == nil && usePublicUrl { + for _, result := range results { + for _, loc := range result.Locations { + loc.Url = loc.PublicUrl + } + } + } + return } return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 4ed4815ec..0552ab9de 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -68,7 +68,7 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart file.Replication = replication file.Collection = collection file.DataCenter = dataCenter - results[index].Size, err = file.Upload(maxMB, master, ret.Auth, grpcDialOption) + results[index].Size, err = file.Upload(maxMB, master, usePublicUrl, ret.Auth, grpcDialOption) if err != nil { results[index].Error = err.Error() } @@ -111,7 +111,7 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) { return ret, nil } -func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { +func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { fileUrl := "http://" + fi.Server + "/" + fi.Fid if fi.ModTime != 0 { fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime)) @@ -155,7 +155,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp ret, err = Assign(master, grpcDialOption, ar) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master, grpcDialOption) + cm.DeleteChunks(master, usePublicUrl, grpcDialOption) return } id = ret.Fid @@ -173,7 +173,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp ret.Auth) if e != nil { // delete all uploaded chunks - cm.DeleteChunks(master, grpcDialOption) + cm.DeleteChunks(master, usePublicUrl, grpcDialOption) return 0, e } cm.Chunks = append(cm.Chunks, @@ -188,7 +188,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp err = upload_chunked_file_manifest(fileUrl, &cm, jwt) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master, grpcDialOption) + cm.DeleteChunks(master, usePublicUrl, grpcDialOption) } } else { ret, e := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt) diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index 56cebf50f..3a71a1332 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -126,7 +126,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { return } // make sure all chunks had deleted before delete manifest - if e := chunkManifest.DeleteChunks(vs.GetMaster(), vs.grpcDialOption); e != nil { + if e := chunkManifest.DeleteChunks(vs.GetMaster(), false, vs.grpcDialOption); e != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e)) return } From 709f231e23d2ad40e2094a906d50fefad31423be Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 19 Mar 2020 21:13:56 -0700 Subject: [PATCH 0267/2432] tiered storage: add s3 endpoint for private s3 implementation fix https://github.com/chrislusf/seaweedfs/issues/1238 --- weed/command/scaffold.go | 1 + weed/storage/backend/s3_backend/s3_backend.go | 6 +++++- weed/storage/backend/s3_backend/s3_sessions.go | 5 +++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index cb20adc72..bcb4fc31e 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -380,6 +380,7 @@ sequencer_etcd_urls = "http://127.0.0.1:2379" aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials). region = "us-east-2" bucket = "your_bucket_name" # an existing bucket + endpoint = "" # create this number of logical volumes if no more writable volumes # count_x means how many copies of data. diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index 8d71861c2..e08fef8a4 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -36,6 +36,7 @@ type S3BackendStorage struct { aws_secret_access_key string region string bucket string + endpoint string conn s3iface.S3API } @@ -46,7 +47,9 @@ func newS3BackendStorage(configuration backend.StringProperties, configPrefix st s.aws_secret_access_key = configuration.GetString(configPrefix + "aws_secret_access_key") s.region = configuration.GetString(configPrefix + "region") s.bucket = configuration.GetString(configPrefix + "bucket") - s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region) + s.endpoint = configuration.GetString(configPrefix + "endpoint") + + s.conn, err = createSession(s.aws_access_key_id, s.aws_secret_access_key, s.region, s.endpoint) glog.V(0).Infof("created backend storage s3.%s for region %s bucket %s", s.id, s.region, s.bucket) return @@ -58,6 +61,7 @@ func (s *S3BackendStorage) ToProperties() map[string]string { m["aws_secret_access_key"] = s.aws_secret_access_key m["region"] = s.region m["bucket"] = s.bucket + m["endpoint"] = s.endpoint return m } diff --git a/weed/storage/backend/s3_backend/s3_sessions.go b/weed/storage/backend/s3_backend/s3_sessions.go index 5fdbcb66b..e2fdf1eb6 100644 --- a/weed/storage/backend/s3_backend/s3_sessions.go +++ b/weed/storage/backend/s3_backend/s3_sessions.go @@ -24,7 +24,7 @@ func getSession(region string) (s3iface.S3API, bool) { return sess, found } -func createSession(awsAccessKeyId, awsSecretAccessKey, region string) (s3iface.S3API, error) { +func createSession(awsAccessKeyId, awsSecretAccessKey, region, endpoint string) (s3iface.S3API, error) { sessionsLock.Lock() defer sessionsLock.Unlock() @@ -34,7 +34,8 @@ func createSession(awsAccessKeyId, awsSecretAccessKey, region string) (s3iface.S } config := &aws.Config{ - Region: aws.String(region), + Region: aws.String(region), + Endpoint: aws.String(endpoint), } if awsAccessKeyId != "" && awsSecretAccessKey != "" { config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") From 81797a059acc5c601281a06ccc3c041815c7df74 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 19 Mar 2020 23:54:52 -0700 Subject: [PATCH 0268/2432] volume: sync volume file right before compaction fix https://github.com/chrislusf/seaweedfs/issues/1237 --- weed/storage/backend/backend.go | 1 + weed/storage/backend/disk_file.go | 4 ++++ weed/storage/backend/memory_map/memory_map_backend.go | 4 ++++ weed/storage/backend/s3_backend/s3_backend.go | 4 ++++ weed/storage/volume_vacuum.go | 9 +++++++++ 5 files changed, 22 insertions(+) diff --git a/weed/storage/backend/backend.go b/weed/storage/backend/backend.go index 6941ca5a1..daab29621 100644 --- a/weed/storage/backend/backend.go +++ b/weed/storage/backend/backend.go @@ -19,6 +19,7 @@ type BackendStorageFile interface { io.Closer GetStat() (datSize int64, modTime time.Time, err error) Name() string + Sync() error } type BackendStorage interface { diff --git a/weed/storage/backend/disk_file.go b/weed/storage/backend/disk_file.go index c4b3caffb..2b04c8df2 100644 --- a/weed/storage/backend/disk_file.go +++ b/weed/storage/backend/disk_file.go @@ -48,3 +48,7 @@ func (df *DiskFile) GetStat() (datSize int64, modTime time.Time, err error) { func (df *DiskFile) Name() string { return df.fullFilePath } + +func (df *DiskFile) Sync() error { + return df.File.Sync() +} diff --git a/weed/storage/backend/memory_map/memory_map_backend.go b/weed/storage/backend/memory_map/memory_map_backend.go index 03e7308d0..44ef4d3e1 100644 --- a/weed/storage/backend/memory_map/memory_map_backend.go +++ b/weed/storage/backend/memory_map/memory_map_backend.go @@ -58,3 +58,7 @@ func (mmf *MemoryMappedFile) GetStat() (datSize int64, modTime time.Time, err er func (mmf *MemoryMappedFile) Name() string { return mmf.mm.File.Name() } + +func (mm *MemoryMappedFile) Sync() error { + return nil +} diff --git a/weed/storage/backend/s3_backend/s3_backend.go b/weed/storage/backend/s3_backend/s3_backend.go index e08fef8a4..4706c9334 100644 --- a/weed/storage/backend/s3_backend/s3_backend.go +++ b/weed/storage/backend/s3_backend/s3_backend.go @@ -179,3 +179,7 @@ func (s3backendStorageFile S3BackendStorageFile) GetStat() (datSize int64, modTi func (s3backendStorageFile S3BackendStorageFile) Name() string { return s3backendStorageFile.key } + +func (s3backendStorageFile S3BackendStorageFile) Sync() error { + return nil +} diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index 669d5dd6c..cec7badec 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -53,6 +53,9 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error v.lastCompactIndexOffset = v.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactionRevision glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) + if err := v.DataBackend.Sync(); err != nil { + glog.V(0).Infof("compact fail to sync volume %d", v.Id) + } return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond) } @@ -73,6 +76,9 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) erro v.lastCompactIndexOffset = v.IndexFileSize() v.lastCompactRevision = v.SuperBlock.CompactionRevision glog.V(3).Infof("creating copies for volume %d ...", v.Id) + if err := v.DataBackend.Sync(); err != nil { + glog.V(0).Infof("compact2 fail to sync volume %d", v.Id) + } return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate, compactionBytePerSecond) } @@ -93,6 +99,9 @@ func (v *Volume) CommitCompact() error { glog.V(3).Infof("Got volume %d committing lock...", v.Id) v.nm.Close() if v.DataBackend != nil { + if err := v.DataBackend.Sync(); err != nil { + glog.V(0).Infof("fail to sync volume %d", v.Id) + } if err := v.DataBackend.Close(); err != nil { glog.V(0).Infof("fail to close volume %d", v.Id) } From 165b0d22a43daa8680611cc366f4032196956725 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 00:05:42 -0700 Subject: [PATCH 0269/2432] 1.65 --- k8s/seaweedfs/Chart.yaml | 2 +- k8s/seaweedfs/values.yaml | 2 +- weed/util/constants.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index 1944f4956..9ee7ed242 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: 1.64 +version: 1.65 diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index 2192013c8..073afe65f 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - imageTag: "1.64" + imageTag: "1.65" imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/weed/util/constants.go b/weed/util/constants.go index bf654f447..3a0eb4148 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 64) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 65) ) From c4bea45099a3768dae7ea683afa16f2154b01ffb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 14:17:31 -0700 Subject: [PATCH 0270/2432] S3 API: fix DeleteMultipleObjectsHandler fix https://github.com/chrislusf/seaweedfs/issues/1241 --- other/java/client/src/main/proto/filer.proto | 3 - weed/pb/filer.proto | 3 - weed/pb/filer_pb/filer.pb.go | 286 +++++++------------ weed/pb/master_pb/master.pb.go | 20 +- weed/pb/volume_server_pb/volume_server.pb.go | 26 +- weed/s3api/filer_multipart.go | 4 +- weed/s3api/filer_util.go | 81 ++---- weed/s3api/s3api_bucket_handlers.go | 2 +- weed/s3api/s3api_object_handlers.go | 51 ++-- weed/server/filer_grpc_server.go | 19 -- 10 files changed, 174 insertions(+), 321 deletions(-) diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 8df46e917..b998c330c 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -24,9 +24,6 @@ service SeaweedFiler { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } - rpc StreamDeleteEntries (stream DeleteEntryRequest) returns (stream DeleteEntryResponse) { - } - rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { } diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 8df46e917..b998c330c 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -24,9 +24,6 @@ service SeaweedFiler { rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } - rpc StreamDeleteEntries (stream DeleteEntryRequest) returns (stream DeleteEntryResponse) { - } - rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { } diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 9cf659ece..37373cb71 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -1136,7 +1136,6 @@ type SeaweedFilerClient interface { CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) - StreamDeleteEntries(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_StreamDeleteEntriesClient, error) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) @@ -1221,37 +1220,6 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq return out, nil } -func (c *seaweedFilerClient) StreamDeleteEntries(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_StreamDeleteEntriesClient, error) { - stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], c.cc, "/filer_pb.SeaweedFiler/StreamDeleteEntries", opts...) - if err != nil { - return nil, err - } - x := &seaweedFilerStreamDeleteEntriesClient{stream} - return x, nil -} - -type SeaweedFiler_StreamDeleteEntriesClient interface { - Send(*DeleteEntryRequest) error - Recv() (*DeleteEntryResponse, error) - grpc.ClientStream -} - -type seaweedFilerStreamDeleteEntriesClient struct { - grpc.ClientStream -} - -func (x *seaweedFilerStreamDeleteEntriesClient) Send(m *DeleteEntryRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *seaweedFilerStreamDeleteEntriesClient) Recv() (*DeleteEntryResponse, error) { - m := new(DeleteEntryResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) { out := new(AtomicRenameEntryResponse) err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, c.cc, opts...) @@ -1314,7 +1282,6 @@ type SeaweedFilerServer interface { CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) - StreamDeleteEntries(SeaweedFiler_StreamDeleteEntriesServer) error AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) @@ -1420,32 +1387,6 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _SeaweedFiler_StreamDeleteEntries_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeaweedFilerServer).StreamDeleteEntries(&seaweedFilerStreamDeleteEntriesServer{stream}) -} - -type SeaweedFiler_StreamDeleteEntriesServer interface { - Send(*DeleteEntryResponse) error - Recv() (*DeleteEntryRequest, error) - grpc.ServerStream -} - -type seaweedFilerStreamDeleteEntriesServer struct { - grpc.ServerStream -} - -func (x *seaweedFilerStreamDeleteEntriesServer) Send(m *DeleteEntryResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *seaweedFilerStreamDeleteEntriesServer) Recv() (*DeleteEntryRequest, error) { - m := new(DeleteEntryRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - func _SeaweedFiler_AtomicRenameEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AtomicRenameEntryRequest) if err := dec(in); err != nil { @@ -1605,12 +1546,6 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ Handler: _SeaweedFiler_ListEntries_Handler, ServerStreams: true, }, - { - StreamName: "StreamDeleteEntries", - Handler: _SeaweedFiler_StreamDeleteEntries_Handler, - ServerStreams: true, - ClientStreams: true, - }, }, Metadata: "filer.proto", } @@ -1618,115 +1553,114 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1759 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x6f, 0xdb, 0xc8, - 0x15, 0x0f, 0x25, 0x4b, 0x16, 0x9f, 0xa4, 0xac, 0x3d, 0x72, 0xb2, 0x8a, 0x62, 0xa7, 0x5e, 0xa6, - 0xd9, 0xba, 0x48, 0xe0, 0x06, 0xee, 0x1e, 0x76, 0xbb, 0xed, 0x21, 0x71, 0x9c, 0x45, 0xd0, 0x24, - 0x9b, 0xd2, 0x49, 0xb1, 0x45, 0x81, 0x12, 0x34, 0x39, 0x96, 0xa7, 0xa6, 0x38, 0xdc, 0x99, 0x61, - 0x6c, 0xef, 0x47, 0x29, 0xd0, 0x43, 0xbf, 0x43, 0x8f, 0x45, 0x2f, 0x45, 0x81, 0x7e, 0x8e, 0x1e, - 0x7b, 0xe8, 0x67, 0x28, 0xe6, 0x0d, 0x49, 0x0d, 0x45, 0xd9, 0xde, 0xed, 0x62, 0x6f, 0x9c, 0xf7, - 0x6f, 0xde, 0xfc, 0xde, 0x5f, 0x09, 0xfa, 0xc7, 0x2c, 0xa1, 0x62, 0x37, 0x13, 0x5c, 0x71, 0xd2, - 0xc3, 0x43, 0x90, 0x1d, 0x79, 0x5f, 0xc2, 0xdd, 0x97, 0x9c, 0x9f, 0xe6, 0xd9, 0x33, 0x26, 0x68, - 0xa4, 0xb8, 0xb8, 0x38, 0x48, 0x95, 0xb8, 0xf0, 0xe9, 0xd7, 0x39, 0x95, 0x8a, 0x6c, 0x82, 0x1b, - 0x97, 0x8c, 0xb1, 0xb3, 0xed, 0xec, 0xb8, 0xfe, 0x9c, 0x40, 0x08, 0xac, 0xa4, 0xe1, 0x8c, 0x8e, - 0x5b, 0xc8, 0xc0, 0x6f, 0xef, 0x00, 0x36, 0x97, 0x1b, 0x94, 0x19, 0x4f, 0x25, 0x25, 0x0f, 0xa0, - 0x43, 0x35, 0x01, 0xad, 0xf5, 0xf7, 0x3e, 0xd8, 0x2d, 0x5d, 0xd9, 0x35, 0x72, 0x86, 0xeb, 0xfd, - 0xdd, 0x01, 0xf2, 0x92, 0x49, 0xa5, 0x89, 0x8c, 0xca, 0x6f, 0xe7, 0xcf, 0x6d, 0xe8, 0x66, 0x82, - 0x1e, 0xb3, 0xf3, 0xc2, 0xa3, 0xe2, 0x44, 0x1e, 0xc1, 0xba, 0x54, 0xa1, 0x50, 0xcf, 0x05, 0x9f, - 0x3d, 0x67, 0x09, 0x7d, 0xad, 0x9d, 0x6e, 0xa3, 0x48, 0x93, 0x41, 0x76, 0x81, 0xb0, 0x34, 0x4a, - 0x72, 0xc9, 0xde, 0xd3, 0xc3, 0x92, 0x3b, 0x5e, 0xd9, 0x76, 0x76, 0x7a, 0xfe, 0x12, 0x0e, 0xd9, - 0x80, 0x4e, 0xc2, 0x66, 0x4c, 0x8d, 0x3b, 0xdb, 0xce, 0xce, 0xd0, 0x37, 0x07, 0xef, 0x97, 0x30, - 0xaa, 0xf9, 0xff, 0xdd, 0x9e, 0xff, 0xe7, 0x16, 0x74, 0x90, 0x50, 0x61, 0xec, 0xcc, 0x31, 0x26, - 0x1f, 0xc1, 0x80, 0xc9, 0x60, 0x0e, 0x44, 0x0b, 0x7d, 0xeb, 0x33, 0x59, 0x61, 0x4e, 0x1e, 0x42, - 0x37, 0x3a, 0xc9, 0xd3, 0x53, 0x39, 0x6e, 0x6f, 0xb7, 0x77, 0xfa, 0x7b, 0xa3, 0xf9, 0x45, 0xfa, - 0xa1, 0xfb, 0x9a, 0xe7, 0x17, 0x22, 0xe4, 0x53, 0x80, 0x50, 0x29, 0xc1, 0x8e, 0x72, 0x45, 0x25, - 0xbe, 0xb4, 0xbf, 0x37, 0xb6, 0x14, 0x72, 0x49, 0x9f, 0x54, 0x7c, 0xdf, 0x92, 0x25, 0x9f, 0x41, - 0x8f, 0x9e, 0x2b, 0x9a, 0xc6, 0x34, 0x1e, 0x77, 0xf0, 0xa2, 0xad, 0x85, 0x17, 0xed, 0x1e, 0x14, - 0x7c, 0xf3, 0xbe, 0x4a, 0x7c, 0xf2, 0x39, 0x0c, 0x6b, 0x2c, 0xb2, 0x06, 0xed, 0x53, 0x5a, 0x46, - 0x55, 0x7f, 0x6a, 0x64, 0xdf, 0x87, 0x49, 0x6e, 0x12, 0x6c, 0xe0, 0x9b, 0xc3, 0x2f, 0x5a, 0x9f, - 0x3a, 0xde, 0x33, 0x70, 0x9f, 0xe7, 0x49, 0x52, 0x29, 0xc6, 0x4c, 0x94, 0x8a, 0x31, 0x13, 0x73, - 0x94, 0x5b, 0x57, 0xa2, 0xfc, 0x37, 0x07, 0xd6, 0x0f, 0xde, 0xd3, 0x54, 0xbd, 0xe6, 0x8a, 0x1d, - 0xb3, 0x28, 0x54, 0x8c, 0xa7, 0xe4, 0x11, 0xb8, 0x3c, 0x89, 0x83, 0x2b, 0xc3, 0xd4, 0xe3, 0x49, - 0xe1, 0xf5, 0x23, 0x70, 0x53, 0x7a, 0x16, 0x5c, 0x79, 0x5d, 0x2f, 0xa5, 0x67, 0x46, 0xfa, 0x3e, - 0x0c, 0x63, 0x9a, 0x50, 0x45, 0x83, 0x2a, 0x3a, 0x3a, 0x74, 0x03, 0x43, 0xdc, 0x37, 0xe1, 0xf8, - 0x18, 0x3e, 0xd0, 0x26, 0xb3, 0x50, 0xd0, 0x54, 0x05, 0x59, 0xa8, 0x4e, 0x30, 0x26, 0xae, 0x3f, - 0x4c, 0xe9, 0xd9, 0x1b, 0xa4, 0xbe, 0x09, 0xd5, 0x89, 0xf7, 0xd7, 0x16, 0xb8, 0x55, 0x30, 0xc9, - 0x87, 0xb0, 0xaa, 0xaf, 0x0d, 0x58, 0x5c, 0x20, 0xd1, 0xd5, 0xc7, 0x17, 0xb1, 0xae, 0x0a, 0x7e, - 0x7c, 0x2c, 0xa9, 0x42, 0xf7, 0xda, 0x7e, 0x71, 0xd2, 0x99, 0x25, 0xd9, 0x37, 0xa6, 0x10, 0x56, - 0x7c, 0xfc, 0xd6, 0x88, 0xcf, 0x14, 0x9b, 0x51, 0xbc, 0xb0, 0xed, 0x9b, 0x03, 0x19, 0x41, 0x87, - 0x06, 0x2a, 0x9c, 0x62, 0x86, 0xbb, 0xfe, 0x0a, 0x7d, 0x1b, 0x4e, 0xc9, 0x8f, 0xe1, 0xa6, 0xe4, - 0xb9, 0x88, 0x68, 0x50, 0x5e, 0xdb, 0x45, 0xee, 0xc0, 0x50, 0x9f, 0x9b, 0xcb, 0x3d, 0x68, 0x1f, - 0xb3, 0x78, 0xbc, 0x8a, 0xc0, 0xac, 0xd5, 0x93, 0xf0, 0x45, 0xec, 0x6b, 0x26, 0xf9, 0x19, 0x40, - 0x65, 0x29, 0x1e, 0xf7, 0x2e, 0x11, 0x75, 0x4b, 0xbb, 0x31, 0xd9, 0x02, 0x88, 0x58, 0x76, 0x42, - 0x45, 0xa0, 0x13, 0xc6, 0xc5, 0xe4, 0x70, 0x0d, 0xe5, 0xd7, 0xf4, 0x42, 0xb3, 0x99, 0x0c, 0xa6, - 0xdf, 0xb0, 0x2c, 0xa3, 0xf1, 0x18, 0x10, 0x61, 0x97, 0xc9, 0x2f, 0x0c, 0xc1, 0xfb, 0x0a, 0xba, - 0x85, 0x73, 0x77, 0xc1, 0x7d, 0xcf, 0x93, 0x7c, 0x56, 0x81, 0x36, 0xf4, 0x7b, 0x86, 0xf0, 0x22, - 0x26, 0x77, 0x00, 0xbb, 0x24, 0x5e, 0xd1, 0x42, 0x88, 0x10, 0x5f, 0x7d, 0xc1, 0x6d, 0xe8, 0x46, - 0x9c, 0x9f, 0x32, 0x83, 0xdd, 0xaa, 0x5f, 0x9c, 0xbc, 0xff, 0xb6, 0xe0, 0x66, 0xbd, 0x58, 0xf4, - 0x15, 0x68, 0x05, 0x91, 0x76, 0xd0, 0x0c, 0x9a, 0x3d, 0xac, 0xa1, 0xdd, 0xb2, 0xd1, 0x2e, 0x55, - 0x66, 0x3c, 0x36, 0x17, 0x0c, 0x8d, 0xca, 0x2b, 0x1e, 0x53, 0x9d, 0xeb, 0x39, 0x8b, 0x31, 0x3c, - 0x43, 0x5f, 0x7f, 0x6a, 0xca, 0x94, 0xc5, 0x45, 0xf3, 0xd1, 0x9f, 0xe8, 0x9e, 0x40, 0xbb, 0x5d, - 0x13, 0x70, 0x73, 0xd2, 0x01, 0x9f, 0x69, 0xea, 0xaa, 0x89, 0xa2, 0xfe, 0x26, 0xdb, 0xd0, 0x17, - 0x34, 0x4b, 0x8a, 0xdc, 0x47, 0xf0, 0x5d, 0xdf, 0x26, 0x91, 0x7b, 0x00, 0x11, 0x4f, 0x12, 0x1a, - 0xa1, 0x80, 0x8b, 0x02, 0x16, 0x45, 0xe7, 0x9d, 0x52, 0x49, 0x20, 0x69, 0x84, 0x50, 0x77, 0xfc, - 0xae, 0x52, 0xc9, 0x21, 0x8d, 0xf4, 0x3b, 0x72, 0x49, 0x45, 0x80, 0xed, 0xab, 0x8f, 0x7a, 0x3d, - 0x4d, 0xc0, 0x26, 0xbb, 0x05, 0x30, 0x15, 0x3c, 0xcf, 0x0c, 0x77, 0xb0, 0xdd, 0xd6, 0x9d, 0x1c, - 0x29, 0xc8, 0x7e, 0x00, 0x37, 0xe5, 0xc5, 0x2c, 0x61, 0xe9, 0x69, 0xa0, 0x42, 0x31, 0xa5, 0x6a, - 0x3c, 0x34, 0x15, 0x50, 0x50, 0xdf, 0x22, 0xd1, 0xcb, 0x80, 0xec, 0x0b, 0x1a, 0x2a, 0xfa, 0x1d, - 0x86, 0xd6, 0xb7, 0xeb, 0x0d, 0xe4, 0x16, 0x74, 0x79, 0x40, 0xcf, 0xa3, 0xa4, 0x28, 0xd1, 0x0e, - 0x3f, 0x38, 0x8f, 0x12, 0xef, 0x21, 0x8c, 0x6a, 0x37, 0x16, 0x6d, 0x7d, 0x03, 0x3a, 0x54, 0x08, - 0x5e, 0x36, 0x21, 0x73, 0xf0, 0x7e, 0x07, 0xe4, 0x5d, 0x16, 0xff, 0x10, 0xee, 0x79, 0xb7, 0x60, - 0x54, 0x33, 0x6d, 0xfc, 0xf0, 0xfe, 0xe9, 0x00, 0x79, 0x86, 0xbd, 0xe4, 0xfb, 0x8d, 0x71, 0x5d, - 0xdd, 0x7a, 0xc4, 0x98, 0x5e, 0x15, 0x87, 0x2a, 0x2c, 0x06, 0xe0, 0x80, 0x49, 0x63, 0xff, 0x59, - 0xa8, 0xc2, 0x62, 0x10, 0x09, 0x1a, 0xe5, 0x42, 0xcf, 0x44, 0x4c, 0x42, 0x1c, 0x44, 0x7e, 0x49, - 0x22, 0x9f, 0xc0, 0x6d, 0x36, 0x4d, 0xb9, 0xa0, 0x73, 0xb1, 0xc0, 0x40, 0xd5, 0x45, 0xe1, 0x0d, - 0xc3, 0xad, 0x14, 0x0e, 0x10, 0xb9, 0x87, 0x30, 0xaa, 0x3d, 0xe3, 0x4a, 0x98, 0xff, 0xe4, 0xc0, - 0xf8, 0x89, 0xe2, 0x33, 0x16, 0xf9, 0x54, 0x3b, 0x5f, 0x7b, 0xfa, 0x7d, 0x18, 0xea, 0x6e, 0xbe, - 0xf8, 0xfc, 0x01, 0x4f, 0xe2, 0xf9, 0xb4, 0xbc, 0x03, 0xba, 0xa1, 0x07, 0x16, 0x0a, 0xab, 0x3c, - 0x89, 0x31, 0x13, 0xef, 0x83, 0xee, 0xba, 0x96, 0xbe, 0xd9, 0x1b, 0x06, 0x29, 0x3d, 0xab, 0xe9, - 0x6b, 0x21, 0xd4, 0x37, 0xad, 0x7a, 0x35, 0xa5, 0x67, 0x5a, 0xdf, 0xbb, 0x0b, 0x77, 0x96, 0xf8, - 0x56, 0x84, 0xeb, 0x5f, 0x0e, 0x8c, 0x9e, 0x48, 0xc9, 0xa6, 0xe9, 0x6f, 0xb1, 0xed, 0x94, 0x4e, - 0x6f, 0x40, 0x27, 0xe2, 0x79, 0xaa, 0xd0, 0xd9, 0x8e, 0x6f, 0x0e, 0x0b, 0x95, 0xd8, 0x6a, 0x54, - 0xe2, 0x42, 0x2d, 0xb7, 0x9b, 0xb5, 0x6c, 0xd5, 0xea, 0x4a, 0xad, 0x56, 0x7f, 0x04, 0x7d, 0x1d, - 0xe4, 0x20, 0xa2, 0xa9, 0xa2, 0xa2, 0xe8, 0xf3, 0xa0, 0x49, 0xfb, 0x48, 0xd1, 0x02, 0xf6, 0x3c, - 0x32, 0xad, 0x1e, 0xb2, 0xf9, 0x30, 0xfa, 0xb7, 0x03, 0x1b, 0xf5, 0xa7, 0x14, 0x31, 0xbb, 0x74, - 0x2e, 0xe9, 0x56, 0x26, 0x92, 0xe2, 0x1d, 0xfa, 0x53, 0x37, 0x85, 0x2c, 0x3f, 0x4a, 0x58, 0x14, - 0x68, 0x86, 0xf1, 0xdf, 0x35, 0x94, 0x77, 0x22, 0x99, 0xa3, 0xb2, 0x62, 0xa3, 0x42, 0x60, 0x25, - 0xcc, 0xd5, 0x49, 0x39, 0x9b, 0xf4, 0xf7, 0x02, 0x52, 0xdd, 0xeb, 0x90, 0x5a, 0x6d, 0x22, 0x55, - 0x65, 0x5a, 0xcf, 0xce, 0xb4, 0x4f, 0x60, 0x64, 0x96, 0xdb, 0x7a, 0xb8, 0xb6, 0x00, 0xaa, 0x39, - 0x22, 0xc7, 0x8e, 0x69, 0x66, 0xe5, 0x20, 0x91, 0xde, 0xaf, 0xc0, 0x7d, 0xc9, 0x8d, 0x5d, 0x49, - 0x1e, 0x83, 0x9b, 0x94, 0x07, 0x14, 0xed, 0xef, 0x91, 0x79, 0x8d, 0x97, 0x72, 0xfe, 0x5c, 0xc8, - 0xfb, 0x1c, 0x7a, 0x25, 0xb9, 0xc4, 0xcc, 0xb9, 0x0c, 0xb3, 0xd6, 0x02, 0x66, 0xde, 0x3f, 0x1c, - 0xd8, 0xa8, 0xbb, 0x5c, 0x84, 0xe5, 0x1d, 0x0c, 0xab, 0x2b, 0x82, 0x59, 0x98, 0x15, 0xbe, 0x3c, - 0xb6, 0x7d, 0x69, 0xaa, 0x55, 0x0e, 0xca, 0x57, 0x61, 0x66, 0x72, 0x79, 0x90, 0x58, 0xa4, 0xc9, - 0x5b, 0x58, 0x6f, 0x88, 0x2c, 0xd9, 0xec, 0x7e, 0x6a, 0x6f, 0x76, 0xb5, 0xed, 0xb4, 0xd2, 0xb6, - 0xd7, 0xbd, 0xcf, 0xe0, 0x43, 0xd3, 0x0e, 0xf6, 0xab, 0x18, 0x96, 0xd8, 0xd7, 0x43, 0xed, 0x2c, - 0x86, 0xda, 0x9b, 0xc0, 0xb8, 0xa9, 0x5a, 0x94, 0xdf, 0x14, 0xd6, 0x0f, 0x55, 0xa8, 0x98, 0x54, - 0x2c, 0xaa, 0x7e, 0x62, 0x2c, 0xe4, 0x86, 0x73, 0xdd, 0x44, 0x6c, 0xd6, 0xe1, 0x1a, 0xb4, 0x95, - 0x2a, 0xf3, 0x57, 0x7f, 0xea, 0x28, 0x10, 0xfb, 0xa6, 0x22, 0x06, 0x3f, 0xc0, 0x55, 0x3a, 0x1f, - 0x14, 0x57, 0x61, 0x62, 0x36, 0x8e, 0x15, 0xdc, 0x38, 0x5c, 0xa4, 0xe0, 0xca, 0x61, 0x86, 0x72, - 0x6c, 0xb8, 0x1d, 0xb3, 0x8f, 0x68, 0x02, 0x32, 0xb7, 0x00, 0xb0, 0x54, 0x4d, 0x95, 0x75, 0x8d, - 0xae, 0xa6, 0xec, 0x6b, 0x82, 0x77, 0x0f, 0x36, 0xbf, 0xa0, 0x4a, 0xef, 0x4e, 0x62, 0x9f, 0xa7, - 0xc7, 0x6c, 0x9a, 0x8b, 0xd0, 0x0a, 0x85, 0xf7, 0x1f, 0x07, 0xb6, 0x2e, 0x11, 0x28, 0x1e, 0x3c, - 0x86, 0xd5, 0x59, 0x28, 0x15, 0x15, 0x65, 0x95, 0x94, 0xc7, 0x45, 0x28, 0x5a, 0xd7, 0x41, 0xd1, - 0x6e, 0x40, 0x71, 0x0b, 0xba, 0xb3, 0xf0, 0x3c, 0x98, 0x1d, 0x15, 0xcb, 0x51, 0x67, 0x16, 0x9e, - 0xbf, 0x3a, 0xc2, 0xce, 0xc6, 0x44, 0x70, 0x94, 0x47, 0xa7, 0x54, 0xc9, 0xaa, 0xb3, 0x31, 0xf1, - 0xd4, 0x50, 0xf4, 0xa3, 0xb5, 0xc0, 0xd7, 0x39, 0xcd, 0xa9, 0x2c, 0x7a, 0x85, 0x1e, 0x8e, 0xbf, - 0x41, 0x02, 0x2e, 0x53, 0xb8, 0x59, 0x62, 0x97, 0xe8, 0xf9, 0xc5, 0x69, 0xef, 0x2f, 0x3d, 0x18, - 0x1c, 0xd2, 0xf0, 0x8c, 0xd2, 0x18, 0x1f, 0x4c, 0xa6, 0x65, 0xa1, 0xd5, 0x7f, 0xf8, 0x92, 0x07, - 0x8b, 0x15, 0xb5, 0xf4, 0x97, 0xf6, 0xe4, 0xe3, 0xeb, 0xc4, 0x8a, 0x9c, 0xbd, 0x41, 0x5e, 0x43, - 0xdf, 0xfa, 0x65, 0x49, 0x36, 0x2d, 0xc5, 0xc6, 0x0f, 0xe6, 0xc9, 0xd6, 0x25, 0xdc, 0xd2, 0xda, - 0x63, 0x87, 0xbc, 0x84, 0xbe, 0xb5, 0xd2, 0xd8, 0xf6, 0x9a, 0xbb, 0x95, 0x6d, 0x6f, 0xc9, 0x1e, - 0xe4, 0xdd, 0xd0, 0xd6, 0xac, 0xc5, 0xc4, 0xb6, 0xd6, 0x5c, 0x85, 0x6c, 0x6b, 0xcb, 0xb6, 0x19, - 0xb4, 0x66, 0xed, 0x01, 0xb6, 0xb5, 0xe6, 0x96, 0x63, 0x5b, 0x5b, 0xb2, 0x3c, 0x78, 0x37, 0xc8, - 0x57, 0x30, 0x3a, 0x54, 0x82, 0x86, 0xb3, 0x39, 0x7b, 0x01, 0xc1, 0xff, 0xc3, 0xea, 0x8e, 0xf3, - 0xd8, 0x21, 0x7f, 0x80, 0xf5, 0xc6, 0x94, 0x27, 0xde, 0x5c, 0xf3, 0xb2, 0xf5, 0x64, 0x72, 0xff, - 0x4a, 0x99, 0xca, 0xf3, 0x2f, 0x61, 0x60, 0x0f, 0x57, 0x62, 0x39, 0xb5, 0x64, 0x7f, 0x98, 0xdc, - 0xbb, 0x8c, 0x6d, 0x1b, 0xb4, 0xfb, 0xbb, 0x6d, 0x70, 0xc9, 0x84, 0xb3, 0x0d, 0x2e, 0x1b, 0x0b, - 0xde, 0x0d, 0xf2, 0x7b, 0x58, 0x5b, 0xec, 0xb3, 0xe4, 0xa3, 0x45, 0xe8, 0x1a, 0xed, 0x7b, 0xe2, - 0x5d, 0x25, 0x52, 0x19, 0x7f, 0x01, 0x30, 0x6f, 0x9f, 0xe4, 0xee, 0x5c, 0xa7, 0xd1, 0xbe, 0x27, - 0x9b, 0xcb, 0x99, 0x95, 0xa9, 0x3f, 0xc2, 0xad, 0xa5, 0x3d, 0x8a, 0x58, 0x05, 0x78, 0x55, 0x97, - 0x9b, 0xfc, 0xe4, 0x5a, 0xb9, 0xf2, 0xae, 0xa7, 0xf7, 0x60, 0x4d, 0x9a, 0x16, 0x71, 0x2c, 0x77, - 0xa3, 0x84, 0xd1, 0x54, 0x3d, 0x05, 0xd4, 0x78, 0x23, 0xb8, 0xe2, 0x47, 0x5d, 0xfc, 0x37, 0xee, - 0xe7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x35, 0x0b, 0x9e, 0x2e, 0x9c, 0x13, 0x00, 0x00, + // 1739 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xdd, 0x6e, 0xdb, 0xc8, + 0x15, 0x0e, 0x25, 0xeb, 0x87, 0x47, 0x52, 0xd6, 0x1e, 0x3b, 0x59, 0x45, 0xb1, 0x53, 0x2f, 0xd3, + 0x6c, 0x53, 0x24, 0x70, 0x03, 0x77, 0x2f, 0x76, 0xbb, 0xed, 0x45, 0xe2, 0x38, 0x8b, 0xa0, 0x49, + 0x36, 0xa5, 0x93, 0xa2, 0x45, 0x81, 0x12, 0x34, 0x39, 0x92, 0xa7, 0xa6, 0x38, 0xdc, 0x99, 0x61, + 0xec, 0xec, 0xa3, 0x14, 0xe8, 0x5b, 0xf4, 0xb2, 0xe8, 0x4d, 0x51, 0xb4, 0xcf, 0xd1, 0xcb, 0x5e, + 0xf4, 0x19, 0x8a, 0x39, 0x43, 0x52, 0x43, 0x51, 0xb6, 0x77, 0xb1, 0xc8, 0x1d, 0xe7, 0xfc, 0xcd, + 0x99, 0xef, 0xfc, 0x4a, 0x30, 0x98, 0xb2, 0x84, 0x8a, 0xbd, 0x4c, 0x70, 0xc5, 0x49, 0x1f, 0x0f, + 0x41, 0x76, 0xec, 0x7d, 0x0d, 0xb7, 0x5f, 0x70, 0x7e, 0x9a, 0x67, 0x4f, 0x99, 0xa0, 0x91, 0xe2, + 0xe2, 0xfd, 0x61, 0xaa, 0xc4, 0x7b, 0x9f, 0x7e, 0x93, 0x53, 0xa9, 0xc8, 0x36, 0xb8, 0x71, 0xc9, + 0x18, 0x3b, 0xbb, 0xce, 0x7d, 0xd7, 0x5f, 0x10, 0x08, 0x81, 0xb5, 0x34, 0x9c, 0xd3, 0x71, 0x0b, + 0x19, 0xf8, 0xed, 0x1d, 0xc2, 0xf6, 0x6a, 0x83, 0x32, 0xe3, 0xa9, 0xa4, 0xe4, 0x1e, 0x74, 0xa8, + 0x26, 0xa0, 0xb5, 0xc1, 0xfe, 0x47, 0x7b, 0xa5, 0x2b, 0x7b, 0x46, 0xce, 0x70, 0xbd, 0xbf, 0x3b, + 0x40, 0x5e, 0x30, 0xa9, 0x34, 0x91, 0x51, 0xf9, 0xdd, 0xfc, 0xb9, 0x09, 0xdd, 0x4c, 0xd0, 0x29, + 0x3b, 0x2f, 0x3c, 0x2a, 0x4e, 0xe4, 0x21, 0x6c, 0x48, 0x15, 0x0a, 0xf5, 0x4c, 0xf0, 0xf9, 0x33, + 0x96, 0xd0, 0x57, 0xda, 0xe9, 0x36, 0x8a, 0x34, 0x19, 0x64, 0x0f, 0x08, 0x4b, 0xa3, 0x24, 0x97, + 0xec, 0x1d, 0x3d, 0x2a, 0xb9, 0xe3, 0xb5, 0x5d, 0xe7, 0x7e, 0xdf, 0x5f, 0xc1, 0x21, 0x5b, 0xd0, + 0x49, 0xd8, 0x9c, 0xa9, 0x71, 0x67, 0xd7, 0xb9, 0x3f, 0xf2, 0xcd, 0xc1, 0xfb, 0x25, 0x6c, 0xd6, + 0xfc, 0xff, 0x7e, 0xcf, 0xff, 0x4b, 0x0b, 0x3a, 0x48, 0xa8, 0x30, 0x76, 0x16, 0x18, 0x93, 0x4f, + 0x60, 0xc8, 0x64, 0xb0, 0x00, 0xa2, 0x85, 0xbe, 0x0d, 0x98, 0xac, 0x30, 0x27, 0x0f, 0xa0, 0x1b, + 0x9d, 0xe4, 0xe9, 0xa9, 0x1c, 0xb7, 0x77, 0xdb, 0xf7, 0x07, 0xfb, 0x9b, 0x8b, 0x8b, 0xf4, 0x43, + 0x0f, 0x34, 0xcf, 0x2f, 0x44, 0xc8, 0xe7, 0x00, 0xa1, 0x52, 0x82, 0x1d, 0xe7, 0x8a, 0x4a, 0x7c, + 0xe9, 0x60, 0x7f, 0x6c, 0x29, 0xe4, 0x92, 0x3e, 0xae, 0xf8, 0xbe, 0x25, 0x4b, 0xbe, 0x80, 0x3e, + 0x3d, 0x57, 0x34, 0x8d, 0x69, 0x3c, 0xee, 0xe0, 0x45, 0x3b, 0x4b, 0x2f, 0xda, 0x3b, 0x2c, 0xf8, + 0xe6, 0x7d, 0x95, 0xf8, 0xe4, 0x4b, 0x18, 0xd5, 0x58, 0x64, 0x1d, 0xda, 0xa7, 0xb4, 0x8c, 0xaa, + 0xfe, 0xd4, 0xc8, 0xbe, 0x0b, 0x93, 0xdc, 0x24, 0xd8, 0xd0, 0x37, 0x87, 0x5f, 0xb4, 0x3e, 0x77, + 0xbc, 0xa7, 0xe0, 0x3e, 0xcb, 0x93, 0xa4, 0x52, 0x8c, 0x99, 0x28, 0x15, 0x63, 0x26, 0x16, 0x28, + 0xb7, 0x2e, 0x45, 0xf9, 0x6f, 0x0e, 0x6c, 0x1c, 0xbe, 0xa3, 0xa9, 0x7a, 0xc5, 0x15, 0x9b, 0xb2, + 0x28, 0x54, 0x8c, 0xa7, 0xe4, 0x21, 0xb8, 0x3c, 0x89, 0x83, 0x4b, 0xc3, 0xd4, 0xe7, 0x49, 0xe1, + 0xf5, 0x43, 0x70, 0x53, 0x7a, 0x16, 0x5c, 0x7a, 0x5d, 0x3f, 0xa5, 0x67, 0x46, 0xfa, 0x2e, 0x8c, + 0x62, 0x9a, 0x50, 0x45, 0x83, 0x2a, 0x3a, 0x3a, 0x74, 0x43, 0x43, 0x3c, 0x30, 0xe1, 0xf8, 0x14, + 0x3e, 0xd2, 0x26, 0xb3, 0x50, 0xd0, 0x54, 0x05, 0x59, 0xa8, 0x4e, 0x30, 0x26, 0xae, 0x3f, 0x4a, + 0xe9, 0xd9, 0x6b, 0xa4, 0xbe, 0x0e, 0xd5, 0x89, 0xf7, 0xd7, 0x16, 0xb8, 0x55, 0x30, 0xc9, 0xc7, + 0xd0, 0xd3, 0xd7, 0x06, 0x2c, 0x2e, 0x90, 0xe8, 0xea, 0xe3, 0xf3, 0x58, 0x57, 0x05, 0x9f, 0x4e, + 0x25, 0x55, 0xe8, 0x5e, 0xdb, 0x2f, 0x4e, 0x3a, 0xb3, 0x24, 0xfb, 0xd6, 0x14, 0xc2, 0x9a, 0x8f, + 0xdf, 0x1a, 0xf1, 0xb9, 0x62, 0x73, 0x8a, 0x17, 0xb6, 0x7d, 0x73, 0x20, 0x9b, 0xd0, 0xa1, 0x81, + 0x0a, 0x67, 0x98, 0xe1, 0xae, 0xbf, 0x46, 0xdf, 0x84, 0x33, 0xf2, 0x63, 0xb8, 0x2e, 0x79, 0x2e, + 0x22, 0x1a, 0x94, 0xd7, 0x76, 0x91, 0x3b, 0x34, 0xd4, 0x67, 0xe6, 0x72, 0x0f, 0xda, 0x53, 0x16, + 0x8f, 0x7b, 0x08, 0xcc, 0x7a, 0x3d, 0x09, 0x9f, 0xc7, 0xbe, 0x66, 0x92, 0x9f, 0x01, 0x54, 0x96, + 0xe2, 0x71, 0xff, 0x02, 0x51, 0xb7, 0xb4, 0x1b, 0x93, 0x1d, 0x80, 0x88, 0x65, 0x27, 0x54, 0x04, + 0x3a, 0x61, 0x5c, 0x4c, 0x0e, 0xd7, 0x50, 0x7e, 0x4d, 0xdf, 0x6b, 0x36, 0x93, 0xc1, 0xec, 0x5b, + 0x96, 0x65, 0x34, 0x1e, 0x03, 0x22, 0xec, 0x32, 0xf9, 0x95, 0x21, 0x78, 0xbf, 0x83, 0x6e, 0xe1, + 0xdc, 0x6d, 0x70, 0xdf, 0xf1, 0x24, 0x9f, 0x57, 0xa0, 0x8d, 0xfc, 0xbe, 0x21, 0x3c, 0x8f, 0xc9, + 0x2d, 0xc0, 0x2e, 0x89, 0x57, 0xb4, 0x10, 0x22, 0xc4, 0x57, 0x5f, 0x70, 0x13, 0xba, 0x11, 0xe7, + 0xa7, 0xcc, 0x60, 0xd7, 0xf3, 0x8b, 0x93, 0xf7, 0xbf, 0x16, 0x5c, 0xaf, 0x17, 0x8b, 0xbe, 0x02, + 0xad, 0x20, 0xd2, 0x0e, 0x9a, 0x41, 0xb3, 0x47, 0x35, 0xb4, 0x5b, 0x36, 0xda, 0xa5, 0xca, 0x9c, + 0xc7, 0xe6, 0x82, 0x91, 0x51, 0x79, 0xc9, 0x63, 0xaa, 0x73, 0x3d, 0x67, 0x31, 0x86, 0x67, 0xe4, + 0xeb, 0x4f, 0x4d, 0x99, 0xb1, 0xb8, 0x68, 0x3e, 0xfa, 0x13, 0xdd, 0x13, 0x68, 0xb7, 0x6b, 0x02, + 0x6e, 0x4e, 0x3a, 0xe0, 0x73, 0x4d, 0xed, 0x99, 0x28, 0xea, 0x6f, 0xb2, 0x0b, 0x03, 0x41, 0xb3, + 0xa4, 0xc8, 0x7d, 0x04, 0xdf, 0xf5, 0x6d, 0x12, 0xb9, 0x03, 0x10, 0xf1, 0x24, 0xa1, 0x11, 0x0a, + 0xb8, 0x28, 0x60, 0x51, 0x74, 0xde, 0x29, 0x95, 0x04, 0x92, 0x46, 0x08, 0x75, 0xc7, 0xef, 0x2a, + 0x95, 0x1c, 0xd1, 0x48, 0xbf, 0x23, 0x97, 0x54, 0x04, 0xd8, 0xbe, 0x06, 0xa8, 0xd7, 0xd7, 0x04, + 0x6c, 0xb2, 0x3b, 0x00, 0x33, 0xc1, 0xf3, 0xcc, 0x70, 0x87, 0xbb, 0x6d, 0xdd, 0xc9, 0x91, 0x82, + 0xec, 0x7b, 0x70, 0x5d, 0xbe, 0x9f, 0x27, 0x2c, 0x3d, 0x0d, 0x54, 0x28, 0x66, 0x54, 0x8d, 0x47, + 0xa6, 0x02, 0x0a, 0xea, 0x1b, 0x24, 0x7a, 0x19, 0x90, 0x03, 0x41, 0x43, 0x45, 0xbf, 0xc7, 0xd0, + 0xfa, 0x6e, 0xbd, 0x81, 0xdc, 0x80, 0x2e, 0x0f, 0xe8, 0x79, 0x94, 0x14, 0x25, 0xda, 0xe1, 0x87, + 0xe7, 0x51, 0xe2, 0x3d, 0x80, 0xcd, 0xda, 0x8d, 0x45, 0x5b, 0xdf, 0x82, 0x0e, 0x15, 0x82, 0x97, + 0x4d, 0xc8, 0x1c, 0xbc, 0xdf, 0x03, 0x79, 0x9b, 0xc5, 0x1f, 0xc2, 0x3d, 0xef, 0x06, 0x6c, 0xd6, + 0x4c, 0x1b, 0x3f, 0xbc, 0x7f, 0x3a, 0x40, 0x9e, 0x62, 0x2f, 0xf9, 0x61, 0x63, 0x5c, 0x57, 0xb7, + 0x1e, 0x31, 0xa6, 0x57, 0xc5, 0xa1, 0x0a, 0x8b, 0x01, 0x38, 0x64, 0xd2, 0xd8, 0x7f, 0x1a, 0xaa, + 0xb0, 0x18, 0x44, 0x82, 0x46, 0xb9, 0xd0, 0x33, 0x11, 0x93, 0x10, 0x07, 0x91, 0x5f, 0x92, 0xc8, + 0x67, 0x70, 0x93, 0xcd, 0x52, 0x2e, 0xe8, 0x42, 0x2c, 0x30, 0x50, 0x75, 0x51, 0x78, 0xcb, 0x70, + 0x2b, 0x85, 0x43, 0x44, 0xee, 0x01, 0x6c, 0xd6, 0x9e, 0x71, 0x29, 0xcc, 0x7f, 0x76, 0x60, 0xfc, + 0x58, 0xf1, 0x39, 0x8b, 0x7c, 0xaa, 0x9d, 0xaf, 0x3d, 0xfd, 0x2e, 0x8c, 0x74, 0x37, 0x5f, 0x7e, + 0xfe, 0x90, 0x27, 0xf1, 0x62, 0x5a, 0xde, 0x02, 0xdd, 0xd0, 0x03, 0x0b, 0x85, 0x1e, 0x4f, 0x62, + 0xcc, 0xc4, 0xbb, 0xa0, 0xbb, 0xae, 0xa5, 0x6f, 0xf6, 0x86, 0x61, 0x4a, 0xcf, 0x6a, 0xfa, 0x5a, + 0x08, 0xf5, 0x4d, 0xab, 0xee, 0xa5, 0xf4, 0x4c, 0xeb, 0x7b, 0xb7, 0xe1, 0xd6, 0x0a, 0xdf, 0x8a, + 0x70, 0xfd, 0xdb, 0x81, 0xcd, 0xc7, 0x52, 0xb2, 0x59, 0xfa, 0x5b, 0x6c, 0x3b, 0xa5, 0xd3, 0x5b, + 0xd0, 0x89, 0x78, 0x9e, 0x2a, 0x74, 0xb6, 0xe3, 0x9b, 0xc3, 0x52, 0x25, 0xb6, 0x1a, 0x95, 0xb8, + 0x54, 0xcb, 0xed, 0x66, 0x2d, 0x5b, 0xb5, 0xba, 0x56, 0xab, 0xd5, 0x1f, 0xc1, 0x40, 0x07, 0x39, + 0x88, 0x68, 0xaa, 0xa8, 0x28, 0xfa, 0x3c, 0x68, 0xd2, 0x01, 0x52, 0xb4, 0x80, 0x3d, 0x8f, 0x4c, + 0xab, 0x87, 0x6c, 0x31, 0x8c, 0xfe, 0xe3, 0xc0, 0x56, 0xfd, 0x29, 0x45, 0xcc, 0x2e, 0x9c, 0x4b, + 0xba, 0x95, 0x89, 0xa4, 0x78, 0x87, 0xfe, 0xd4, 0x4d, 0x21, 0xcb, 0x8f, 0x13, 0x16, 0x05, 0x9a, + 0x61, 0xfc, 0x77, 0x0d, 0xe5, 0xad, 0x48, 0x16, 0xa8, 0xac, 0xd9, 0xa8, 0x10, 0x58, 0x0b, 0x73, + 0x75, 0x52, 0xce, 0x26, 0xfd, 0xbd, 0x84, 0x54, 0xf7, 0x2a, 0xa4, 0x7a, 0x4d, 0xa4, 0xaa, 0x4c, + 0xeb, 0xdb, 0x99, 0xf6, 0x19, 0x6c, 0x9a, 0xe5, 0xb6, 0x1e, 0xae, 0x1d, 0x80, 0x6a, 0x8e, 0xc8, + 0xb1, 0x63, 0x9a, 0x59, 0x39, 0x48, 0xa4, 0xf7, 0x2b, 0x70, 0x5f, 0x70, 0x63, 0x57, 0x92, 0x47, + 0xe0, 0x26, 0xe5, 0x01, 0x45, 0x07, 0xfb, 0x64, 0x51, 0xe3, 0xa5, 0x9c, 0xbf, 0x10, 0xf2, 0xbe, + 0x84, 0x7e, 0x49, 0x2e, 0x31, 0x73, 0x2e, 0xc2, 0xac, 0xb5, 0x84, 0x99, 0xf7, 0x0f, 0x07, 0xb6, + 0xea, 0x2e, 0x17, 0x61, 0x79, 0x0b, 0xa3, 0xea, 0x8a, 0x60, 0x1e, 0x66, 0x85, 0x2f, 0x8f, 0x6c, + 0x5f, 0x9a, 0x6a, 0x95, 0x83, 0xf2, 0x65, 0x98, 0x99, 0x5c, 0x1e, 0x26, 0x16, 0x69, 0xf2, 0x06, + 0x36, 0x1a, 0x22, 0x2b, 0x36, 0xbb, 0x9f, 0xda, 0x9b, 0x5d, 0x6d, 0x3b, 0xad, 0xb4, 0xed, 0x75, + 0xef, 0x0b, 0xf8, 0xd8, 0xb4, 0x83, 0x83, 0x2a, 0x86, 0x25, 0xf6, 0xf5, 0x50, 0x3b, 0xcb, 0xa1, + 0xf6, 0x26, 0x30, 0x6e, 0xaa, 0x16, 0xe5, 0x37, 0x83, 0x8d, 0x23, 0x15, 0x2a, 0x26, 0x15, 0x8b, + 0xaa, 0x9f, 0x18, 0x4b, 0xb9, 0xe1, 0x5c, 0x35, 0x11, 0x9b, 0x75, 0xb8, 0x0e, 0x6d, 0xa5, 0xca, + 0xfc, 0xd5, 0x9f, 0x3a, 0x0a, 0xc4, 0xbe, 0xa9, 0x88, 0xc1, 0x07, 0xb8, 0x4a, 0xe7, 0x83, 0xe2, + 0x2a, 0x4c, 0xcc, 0xc6, 0xb1, 0x86, 0x1b, 0x87, 0x8b, 0x14, 0x5c, 0x39, 0xcc, 0x50, 0x8e, 0x0d, + 0xb7, 0x63, 0xf6, 0x11, 0x4d, 0x40, 0xe6, 0x0e, 0x00, 0x96, 0xaa, 0xa9, 0xb2, 0xae, 0xd1, 0xd5, + 0x94, 0x03, 0x4d, 0xf0, 0xee, 0xc0, 0xf6, 0x57, 0x54, 0xe9, 0xdd, 0x49, 0x1c, 0xf0, 0x74, 0xca, + 0x66, 0xb9, 0x08, 0xad, 0x50, 0x78, 0xff, 0x75, 0x60, 0xe7, 0x02, 0x81, 0xe2, 0xc1, 0x63, 0xe8, + 0xcd, 0x43, 0xa9, 0xa8, 0x28, 0xab, 0xa4, 0x3c, 0x2e, 0x43, 0xd1, 0xba, 0x0a, 0x8a, 0x76, 0x03, + 0x8a, 0x1b, 0xd0, 0x9d, 0x87, 0xe7, 0xc1, 0xfc, 0xb8, 0x58, 0x8e, 0x3a, 0xf3, 0xf0, 0xfc, 0xe5, + 0x31, 0x76, 0x36, 0x26, 0x82, 0xe3, 0x3c, 0x3a, 0xa5, 0x4a, 0x56, 0x9d, 0x8d, 0x89, 0x27, 0x86, + 0xa2, 0x1f, 0xad, 0x05, 0xbe, 0xc9, 0x69, 0x4e, 0x65, 0xd1, 0x2b, 0xf4, 0x70, 0xfc, 0x0d, 0x12, + 0x70, 0x99, 0xc2, 0xcd, 0x12, 0xbb, 0x44, 0xdf, 0x2f, 0x4e, 0xfb, 0xff, 0xea, 0xc1, 0xf0, 0x88, + 0x86, 0x67, 0x94, 0xc6, 0xf8, 0x60, 0x32, 0x2b, 0x0b, 0xad, 0xfe, 0xc3, 0x97, 0xdc, 0x5b, 0xae, + 0xa8, 0x95, 0xbf, 0xb4, 0x27, 0x9f, 0x5e, 0x25, 0x56, 0xe4, 0xec, 0x35, 0xf2, 0x0a, 0x06, 0xd6, + 0x2f, 0x4b, 0xb2, 0x6d, 0x29, 0x36, 0x7e, 0x30, 0x4f, 0x76, 0x2e, 0xe0, 0x96, 0xd6, 0x1e, 0x39, + 0xe4, 0x05, 0x0c, 0xac, 0x95, 0xc6, 0xb6, 0xd7, 0xdc, 0xad, 0x6c, 0x7b, 0x2b, 0xf6, 0x20, 0xef, + 0x9a, 0xb6, 0x66, 0x2d, 0x26, 0xb6, 0xb5, 0xe6, 0x2a, 0x64, 0x5b, 0x5b, 0xb5, 0xcd, 0xa0, 0x35, + 0x6b, 0x0f, 0xb0, 0xad, 0x35, 0xb7, 0x1c, 0xdb, 0xda, 0x8a, 0xe5, 0xc1, 0xbb, 0x46, 0xfe, 0x08, + 0x1b, 0x8d, 0x59, 0x4c, 0xbc, 0x85, 0xd6, 0x45, 0x4b, 0xc4, 0xe4, 0xee, 0xa5, 0x32, 0x95, 0xfd, + 0xaf, 0x61, 0x68, 0x8f, 0x40, 0x62, 0x39, 0xb4, 0x62, 0xca, 0x4f, 0xee, 0x5c, 0xc4, 0xb6, 0x0d, + 0xda, 0x5d, 0xd8, 0x36, 0xb8, 0x62, 0x0e, 0xd9, 0x06, 0x57, 0x35, 0x6f, 0xef, 0x1a, 0xf9, 0x03, + 0xac, 0x2f, 0x77, 0x43, 0xf2, 0xc9, 0x32, 0x6c, 0x8d, 0x26, 0x3b, 0xf1, 0x2e, 0x13, 0xa9, 0x8c, + 0x3f, 0x07, 0x58, 0x34, 0x39, 0x72, 0x7b, 0xa1, 0xd3, 0x68, 0xb2, 0x93, 0xed, 0xd5, 0xcc, 0xca, + 0xd4, 0x9f, 0xe0, 0xc6, 0xca, 0x4e, 0x42, 0xac, 0x32, 0xb9, 0xac, 0x17, 0x4d, 0x7e, 0x72, 0xa5, + 0x5c, 0x79, 0xd7, 0x93, 0x3b, 0xb0, 0x2e, 0x4d, 0x21, 0x4f, 0xe5, 0x5e, 0x94, 0x30, 0x9a, 0xaa, + 0x27, 0x80, 0x1a, 0xaf, 0x05, 0x57, 0xfc, 0xb8, 0x8b, 0xff, 0x99, 0xfd, 0xfc, 0xff, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x84, 0x9e, 0xb9, 0x33, 0x42, 0x13, 0x00, 0x00, } diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 95c9533a1..c33e2b768 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -428,12 +428,10 @@ type VolumeEcShardInformationMessage struct { EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits" json:"ec_index_bits,omitempty"` } -func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } -func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardInformationMessage) ProtoMessage() {} -func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{4} -} +func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } +func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardInformationMessage) ProtoMessage() {} +func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *VolumeEcShardInformationMessage) GetId() uint32 { if m != nil { @@ -1424,12 +1422,10 @@ type GetMasterConfigurationResponse struct { MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` } -func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } -func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{32} -} +func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } +func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } +func (*GetMasterConfigurationResponse) ProtoMessage() {} +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { if m != nil { diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 293b894e2..bcc31de16 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1043,12 +1043,10 @@ func (m *VolumeEcShardsGenerateRequest) GetCollection() string { type VolumeEcShardsGenerateResponse struct { } -func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } -func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{41} -} +func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } +func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } type VolumeEcShardsRebuildRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -1421,12 +1419,10 @@ func (m *VolumeEcShardsToVolumeRequest) GetCollection() string { type VolumeEcShardsToVolumeResponse struct { } -func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } -func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} -func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{57} -} +func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } +func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } type ReadVolumeFileStatusRequest struct { VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` @@ -2093,10 +2089,8 @@ type QueryRequest_InputSerialization_JSONInput struct { func (m *QueryRequest_InputSerialization_JSONInput) Reset() { *m = QueryRequest_InputSerialization_JSONInput{} } -func (m *QueryRequest_InputSerialization_JSONInput) String() string { - return proto.CompactTextString(m) -} -func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} +func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } +func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{70, 1, 1} } diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index 792127771..1350fb18e 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -112,7 +112,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa }, } - if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { + if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil { glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) } @@ -127,7 +127,7 @@ func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput return nil, ErrNoSuchUpload } if exists { - err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) + err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true) } if err != nil { glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err) diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index ec1eedcb4..2793ee71d 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -117,21 +117,13 @@ func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, incl } -func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDirectory, isDeleteData, isRecursive bool) error { +func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDeleteData, isRecursive bool) error { return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.DeleteEntryRequest{ - Directory: parentDirectoryPath, - Name: entryName, - IsDeleteData: isDeleteData, - IsRecursive: isRecursive, - } - - glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) - if _, err := client.DeleteEntry(context.Background(), request); err != nil { - glog.V(0).Infof("delete entry %v: %v", request, err) - return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) + err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) + if err != nil { + return err } return nil @@ -139,57 +131,24 @@ func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDirectory, i } -func (s3a *S3ApiServer) streamRemove(quiet bool, fn func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool), respFn func(err string)) error { - - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - stream, err := client.StreamDeleteEntries(context.Background()) - if err != nil { - glog.V(0).Infof("stream delete entry: %v", err) - return fmt.Errorf("stream delete entry: %v", err) - } - - waitc := make(chan struct{}) - go func() { - for { - resp, err := stream.Recv() - if err == io.EOF { - // read done. - close(waitc) - return - } - if err != nil { - glog.V(0).Infof("streamRemove: %v", err) - return - } - respFn(resp.Error) - } - }() - - for { - finished, parentDirectoryPath, entryName, isDeleteData, isRecursive := fn() - if finished { - break - } - err = stream.Send(&filer_pb.DeleteEntryRequest{ - Directory: parentDirectoryPath, - Name: entryName, - IsDeleteData: isDeleteData, - IsRecursive: isRecursive, - IgnoreRecursiveError: quiet, - }) - if err != nil { - glog.V(0).Infof("streamRemove: %v", err) - break - } +func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath string, entryName string, isDeleteData bool, isRecursive bool) error { + request := &filer_pb.DeleteEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + IsDeleteData: isDeleteData, + IsRecursive: isRecursive, + } + glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) + if resp, err := client.DeleteEntry(context.Background(), request); err != nil { + glog.V(0).Infof("delete entry %v: %v", request, err) + return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) + } else { + if resp.Error != "" { + return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, resp.Error) } - stream.CloseSend() - <-waitc - return err - - }) - + } + return nil } func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 3e5089bed..8efb46014 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -94,7 +94,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque return nil }) - err = s3a.rm(s3a.option.BucketsPath, bucket, true, false, true) + err = s3a.rm(s3a.option.BucketsPath, bucket, false, true) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 9d03cdbe3..bb3ead6f2 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -13,6 +13,7 @@ import ( "github.com/gorilla/mux" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -165,38 +166,32 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h return } - var index int - var deletedObjects []ObjectIdentifier var deleteErrors []DeleteError - s3a.streamRemove(deleteObjects.Quiet, func() (finished bool, parentDirectoryPath string, entryName string, isDeleteData, isRecursive bool) { - if index >= len(deleteObjects.Objects) { - finished = true - return - } - - object := deleteObjects.Objects[index] - lastSeparator := strings.LastIndex(object.ObjectName, "/") - parentDirectoryPath, entryName, isDeleteData, isRecursive = "/", object.ObjectName, true, false - if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) { - entryName = object.ObjectName[lastSeparator+1:] - parentDirectoryPath = "/" + object.ObjectName[:lastSeparator] - } - parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) - return - }, func(err string) { - object := deleteObjects.Objects[index] - if err == "" { - deletedObjects = append(deletedObjects, object) - } else { - deleteErrors = append(deleteErrors, DeleteError{ - Code: "", - Message: err, - Key: object.ObjectName, - }) + s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + for _, object := range deleteObjects.Objects { + lastSeparator := strings.LastIndex(object.ObjectName, "/") + parentDirectoryPath, entryName, isDeleteData, isRecursive := "/", object.ObjectName, true, false + if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) { + entryName = object.ObjectName[lastSeparator+1:] + parentDirectoryPath = "/" + object.ObjectName[:lastSeparator] + } + parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) + + err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) + if err == nil { + deletedObjects = append(deletedObjects, object) + } else { + deleteErrors = append(deleteErrors, DeleteError{ + Code: "", + Message: err.Error(), + Key: object.ObjectName, + }) + } } - index++ + return nil }) deleteResp := DeleteObjectsResponse{} diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index b904c1393..2ba2db132 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -226,25 +226,6 @@ func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntr return resp, nil } -func (fs *FilerServer) StreamDeleteEntries(stream filer_pb.SeaweedFiler_StreamDeleteEntriesServer) error { - for { - req, err := stream.Recv() - if err != nil { - return fmt.Errorf("receive delete entry request: %v", err) - } - fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))) - err = fs.filer.DeleteEntryMetaAndData(context.Background(), fullpath, req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) - resp := &filer_pb.DeleteEntryResponse{} - if err != nil { - resp.Error = err.Error() - } - if err := stream.Send(resp); err != nil { - return err - } - } - return nil -} - func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) { ttlStr := "" From 11372dd54800fc8345e1bb872b01cf05d6382cc6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 15:38:04 -0700 Subject: [PATCH 0271/2432] refactoring --- weed/operation/chunked_file.go | 10 ++++++++-- weed/server/volume_server_handlers_read.go | 5 +---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index 4983245cc..b20c2551e 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -8,11 +8,10 @@ import ( "io/ioutil" "net/http" "sort" + "sync" "google.golang.org/grpc" - "sync" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -126,6 +125,13 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, return io.Copy(w, resp.Body) } +func NewChunkedFileReader(chunkManifest *ChunkManifest, master string) *ChunkedFileReader { + return &ChunkedFileReader{ + Manifest: chunkManifest, + Master: master, + } +} + func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) { var err error switch whence { diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 6e603d158..c412062a6 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -187,10 +187,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, w.Header().Set("X-File-Store", "chunked") - chunkedFileReader := &operation.ChunkedFileReader{ - Manifest: chunkManifest, - Master: vs.GetMaster(), - } + chunkedFileReader := operation.NewChunkedFileReader(chunkManifest, vs.GetMaster()) defer chunkedFileReader.Close() rs := conditionallyResizeImages(chunkedFileReader, ext, r) From f251d036735a12b0b6312c8c8fd388e912665014 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 15:46:16 -0700 Subject: [PATCH 0272/2432] refactoring --- weed/operation/chunked_file.go | 37 ++++++++++++---------- weed/server/volume_server_handlers_read.go | 2 +- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index b20c2551e..d874de4ee 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -40,12 +40,13 @@ type ChunkManifest struct { // seekable chunked file reader type ChunkedFileReader struct { - Manifest *ChunkManifest - Master string - pos int64 - pr *io.PipeReader - pw *io.PipeWriter - mutex sync.Mutex + totalSize int64 + chunkList []*ChunkInfo + master string + pos int64 + pr *io.PipeReader + pw *io.PipeWriter + mutex sync.Mutex } func (s ChunkList) Len() int { return len(s) } @@ -125,10 +126,15 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, return io.Copy(w, resp.Body) } -func NewChunkedFileReader(chunkManifest *ChunkManifest, master string) *ChunkedFileReader { +func NewChunkedFileReader(chunkList []*ChunkInfo, master string) *ChunkedFileReader { + var totalSize int64 + for _, chunk := range chunkList { + totalSize += chunk.Size + } return &ChunkedFileReader{ - Manifest: chunkManifest, - Master: master, + totalSize: totalSize, + chunkList: chunkList, + master: master, } } @@ -139,9 +145,9 @@ func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) { case 1: offset += cf.pos case 2: - offset = cf.Manifest.Size - offset + offset = cf.totalSize - offset } - if offset > cf.Manifest.Size { + if offset > cf.totalSize { err = ErrInvalidRange } if cf.pos != offset { @@ -152,10 +158,9 @@ func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) { } func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) { - cm := cf.Manifest chunkIndex := -1 chunkStartOffset := int64(0) - for i, ci := range cm.Chunks { + for i, ci := range cf.chunkList { if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size { chunkIndex = i chunkStartOffset = cf.pos - ci.Offset @@ -165,10 +170,10 @@ func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) { if chunkIndex < 0 { return n, ErrInvalidRange } - for ; chunkIndex < cm.Chunks.Len(); chunkIndex++ { - ci := cm.Chunks[chunkIndex] + for ; chunkIndex < len(cf.chunkList); chunkIndex++ { + ci := cf.chunkList[chunkIndex] // if we need read date from local volume server first? - fileUrl, lookupError := LookupFileId(cf.Master, ci.Fid) + fileUrl, lookupError := LookupFileId(cf.master, ci.Fid) if lookupError != nil { return n, lookupError } diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index c412062a6..996eb3a2f 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -187,7 +187,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, w.Header().Set("X-File-Store", "chunked") - chunkedFileReader := operation.NewChunkedFileReader(chunkManifest, vs.GetMaster()) + chunkedFileReader := operation.NewChunkedFileReader(chunkManifest.Chunks, vs.GetMaster()) defer chunkedFileReader.Close() rs := conditionallyResizeImages(chunkedFileReader, ext, r) From 308688c8d0b29caa06739338748cb3882c3a8559 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 20:31:11 -0700 Subject: [PATCH 0273/2432] filer: add back image resizing capability --- weed/filer2/stream.go | 58 ++++++++++++++++++++++ weed/operation/chunked_file.go | 1 + weed/server/common.go | 2 +- weed/server/filer_server_handlers_read.go | 16 +++++- weed/server/volume_server_handlers_read.go | 17 +++++-- 5 files changed, 87 insertions(+), 7 deletions(-) diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go index 381d99144..bf985f8bd 100644 --- a/weed/filer2/stream.go +++ b/weed/filer2/stream.go @@ -1,7 +1,10 @@ package filer2 import ( + "bytes" + "fmt" "io" + "math" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -40,3 +43,58 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f return nil } + +type ChunkStreamReader struct { + masterClient *wdclient.MasterClient + chunkViews []*ChunkView + logicOffset int64 + buffer bytes.Buffer + bufferOffset int64 + chunkIndex int +} + +var _ = io.ReadSeeker(&ChunkStreamReader{}) + +func NewChunkStreamReader(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { + + chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32) + + return &ChunkStreamReader{ + masterClient: masterClient, + chunkViews: chunkViews, + bufferOffset: -1, + } +} + +func (c *ChunkStreamReader) Read(p []byte) (n int, err error) { + if c.buffer.Len() == 0 { + if c.chunkIndex >= len(c.chunkViews) { + return 0, io.EOF + } + chunkView := c.chunkViews[c.chunkIndex] + c.fetchChunkToBuffer(chunkView) + c.chunkIndex++ + } + return c.buffer.Read(p) +} + +func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { + return 0, fmt.Errorf("ChunkStreamReader: seek not supported") +} + +func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { + urlString, err := c.masterClient.LookupFileId(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return err + } + c.buffer.Reset() + err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.isGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) { + c.buffer.Write(data) + }) + if err != nil { + glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) + return err + } + return nil +} diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index d874de4ee..1ab3c59ed 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -131,6 +131,7 @@ func NewChunkedFileReader(chunkList []*ChunkInfo, master string) *ChunkedFileRea for _, chunk := range chunkList { totalSize += chunk.Size } + sort.Sort(ChunkList(chunkList)) return &ChunkedFileReader{ totalSize: totalSize, chunkList: chunkList, diff --git a/weed/server/common.go b/weed/server/common.go index e06142d7f..f064ca707 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -224,7 +224,7 @@ func adjustHeadersAfterHEAD(w http.ResponseWriter, r *http.Request, filename str } } -func processRangeRequst(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { +func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { rangeReq := r.Header.Get("Range") if rangeReq == "" { diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 878b52ffa..5322492dc 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -11,6 +11,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/stats" ) @@ -89,8 +90,19 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, totalSize := int64(filer2.TotalSize(entry.Chunks)) - processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { - return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, int(size)) + if rangeReq := r.Header.Get("Range"); rangeReq == "" { + ext := filepath.Ext(filename) + width, height, mode, shouldResize := shouldResizeImages(ext, r) + if shouldResize { + chunkedFileReader := filer2.NewChunkStreamReader(fs.filer.MasterClient, entry.Chunks) + rs, _, _ := images.Resized(ext, chunkedFileReader, width, height, mode) + io.Copy(w, rs) + return + } + } + + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, int(size)) }) } diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 996eb3a2f..a222f198d 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -200,20 +200,29 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker { rs := originalDataReaderSeeker + + width, height, mode, shouldResize := shouldResizeImages(ext, r) + if shouldResize { + rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, mode) + } + return rs +} + +func shouldResizeImages(ext string, r *http.Request) (width, height int, mode string, shouldResize bool) { if len(ext) > 0 { ext = strings.ToLower(ext) } if ext == ".png" || ext == ".jpg" || ext == ".jpeg" || ext == ".gif" { - width, height := 0, 0 if r.FormValue("width") != "" { width, _ = strconv.Atoi(r.FormValue("width")) } if r.FormValue("height") != "" { height, _ = strconv.Atoi(r.FormValue("height")) } - rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, r.FormValue("mode")) } - return rs + mode = r.FormValue("mode") + shouldResize = width > 0 || height > 0 + return } func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error { @@ -235,7 +244,7 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re adjustHeadersAfterHEAD(w, r, filename) - processRangeRequst(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { if _, e = rs.Seek(offset, 0); e != nil { return e } From e1b5ac5c042ab7e88506fde234810400a555d261 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 20:39:48 -0700 Subject: [PATCH 0274/2432] 1.66 --- k8s/seaweedfs/Chart.yaml | 2 +- k8s/seaweedfs/values.yaml | 2 +- weed/util/constants.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index 9ee7ed242..4fe2c6272 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: 1.65 +version: 1.66 diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index 073afe65f..61459ea9d 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - imageTag: "1.65" + imageTag: "1.66" imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/weed/util/constants.go b/weed/util/constants.go index 3a0eb4148..29dfdb97a 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 65) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 66) ) From 4bc19f1e523b50080fca894a469662bd704b2639 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 22:41:37 -0700 Subject: [PATCH 0275/2432] master: fix connections count reporting --- weed/util/net_timeout.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/weed/util/net_timeout.go b/weed/util/net_timeout.go index 8acd50d42..edbb1a3b7 100644 --- a/weed/util/net_timeout.go +++ b/weed/util/net_timeout.go @@ -35,6 +35,7 @@ type Conn struct { net.Conn ReadTimeout time.Duration WriteTimeout time.Duration + isClosed bool } func (c *Conn) Read(b []byte) (count int, e error) { @@ -66,7 +67,10 @@ func (c *Conn) Write(b []byte) (count int, e error) { } func (c *Conn) Close() error { - stats.ConnectionClose() + if !c.isClosed { + stats.ConnectionClose() + c.isClosed = true + } return c.Conn.Close() } From 3505b06023fa82322a614bb303d4aaeae08d228c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 23:38:24 -0700 Subject: [PATCH 0276/2432] report deletion error in the log --- weed/filer2/filer_deletion.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go index 3a64f636e..8c0e159b5 100644 --- a/weed/filer2/filer_deletion.go +++ b/weed/filer2/filer_deletion.go @@ -34,12 +34,15 @@ func (f *Filer) loopProcessingDeletion() { deletionCount = 0 f.fileIdDeletionQueue.Consume(func(fileIds []string) { deletionCount = len(fileIds) - _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) + deleteResults, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) if err != nil { glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) } else { glog.V(1).Infof("deleting fileIds len=%d", deletionCount) } + if len(deleteResults) != deletionCount { + glog.V(0).Infof("delete %d fileIds actual %d", deletionCount, len(deleteResults)) + } }) if deletionCount == 0 { From cbfe31a9a8e94b9601e59497e4a272374bba50f3 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 23:38:46 -0700 Subject: [PATCH 0277/2432] idx file sync before compaction --- weed/storage/needle_map.go | 5 +++++ weed/storage/volume_vacuum.go | 11 +++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/weed/storage/needle_map.go b/weed/storage/needle_map.go index 77d081ea7..8962e78cb 100644 --- a/weed/storage/needle_map.go +++ b/weed/storage/needle_map.go @@ -30,6 +30,7 @@ type NeedleMapper interface { DeletedCount() int MaxFileKey() NeedleId IndexFileSize() uint64 + Sync() error } type baseNeedleMapper struct { @@ -59,3 +60,7 @@ func (nm *baseNeedleMapper) appendToIndexFile(key NeedleId, offset Offset, size _, err := nm.indexFile.Write(bytes) return err } + +func (nm *baseNeedleMapper) Sync() error { + return nm.indexFile.Sync() +} diff --git a/weed/storage/volume_vacuum.go b/weed/storage/volume_vacuum.go index cec7badec..67c3957de 100644 --- a/weed/storage/volume_vacuum.go +++ b/weed/storage/volume_vacuum.go @@ -56,6 +56,9 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error if err := v.DataBackend.Sync(); err != nil { glog.V(0).Infof("compact fail to sync volume %d", v.Id) } + if err := v.nm.Sync(); err != nil { + glog.V(0).Infof("compact fail to sync volume idx %d", v.Id) + } return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond) } @@ -77,7 +80,10 @@ func (v *Volume) Compact2(preallocate int64, compactionBytePerSecond int64) erro v.lastCompactRevision = v.SuperBlock.CompactionRevision glog.V(3).Infof("creating copies for volume %d ...", v.Id) if err := v.DataBackend.Sync(); err != nil { - glog.V(0).Infof("compact2 fail to sync volume %d", v.Id) + glog.V(0).Infof("compact2 fail to sync volume dat %d", v.Id) + } + if err := v.nm.Sync(); err != nil { + glog.V(0).Infof("compact2 fail to sync volume idx %d", v.Id) } return copyDataBasedOnIndexFile(filePath+".dat", filePath+".idx", filePath+".cpd", filePath+".cpx", v.SuperBlock, v.Version(), preallocate, compactionBytePerSecond) } @@ -99,9 +105,6 @@ func (v *Volume) CommitCompact() error { glog.V(3).Infof("Got volume %d committing lock...", v.Id) v.nm.Close() if v.DataBackend != nil { - if err := v.DataBackend.Sync(); err != nil { - glog.V(0).Infof("fail to sync volume %d", v.Id) - } if err := v.DataBackend.Close(); err != nil { glog.V(0).Infof("fail to close volume %d", v.Id) } From d3e4a31058344698b3365cafcad031406b73b1bf Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 23:39:32 -0700 Subject: [PATCH 0278/2432] filer: fix where deletion can miss under high concurrency --- weed/util/queue_unbounded.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/queue_unbounded.go b/weed/util/queue_unbounded.go index 664cd965e..496b9f844 100644 --- a/weed/util/queue_unbounded.go +++ b/weed/util/queue_unbounded.go @@ -18,7 +18,7 @@ func (q *UnboundedQueue) EnQueue(items ...string) { q.inboundLock.Lock() defer q.inboundLock.Unlock() - q.outbound = append(q.outbound, items...) + q.inbound = append(q.inbound, items...) } From 3b3e063f25ee3ff58f3beab4d4197fb2d2d19eec Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Fri, 20 Mar 2020 23:42:49 -0700 Subject: [PATCH 0279/2432] 1.67 --- k8s/seaweedfs/Chart.yaml | 2 +- k8s/seaweedfs/values.yaml | 2 +- weed/util/constants.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml index 4fe2c6272..028893df7 100644 --- a/k8s/seaweedfs/Chart.yaml +++ b/k8s/seaweedfs/Chart.yaml @@ -1,4 +1,4 @@ apiVersion: v1 description: SeaweedFS name: seaweedfs -version: 1.66 +version: 1.67 diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index 61459ea9d..025e16c2d 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -4,7 +4,7 @@ global: registry: "" repository: "" imageName: chrislusf/seaweedfs - imageTag: "1.66" + imageTag: "1.67" imagePullPolicy: IfNotPresent imagePullSecrets: imagepullsecret restartPolicy: Always diff --git a/weed/util/constants.go b/weed/util/constants.go index 29dfdb97a..fcb8b4bb0 100644 --- a/weed/util/constants.go +++ b/weed/util/constants.go @@ -5,5 +5,5 @@ import ( ) var ( - VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 66) + VERSION = fmt.Sprintf("%s %d.%d", sizeLimit, 1, 67) ) From 7c111f7b75ba2b0a95766897503997d65d05b42c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 21 Mar 2020 19:14:25 -0700 Subject: [PATCH 0280/2432] FUSE mount: make "nonempty" optional https://github.com/chrislusf/seaweedfs/issues/1094 --- weed/command/mount.go | 2 ++ weed/command/mount_linux.go | 4 +--- weed/command/mount_std.go | 41 +++++++++++++++---------------------- 3 files changed, 20 insertions(+), 27 deletions(-) diff --git a/weed/command/mount.go b/weed/command/mount.go index f1448c6cc..d4a4ba746 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -12,6 +12,7 @@ type MountOptions struct { dataCenter *string allowOthers *bool umaskString *string + nonempty *bool outsideContainerClusterMode *bool } @@ -34,6 +35,7 @@ func init() { mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system") mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111") + mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory") mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access the file system") diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go index 80a5f9da4..25c4f72cf 100644 --- a/weed/command/mount_linux.go +++ b/weed/command/mount_linux.go @@ -138,9 +138,7 @@ func parseInfoFile(r io.Reader) ([]*Info, error) { } func osSpecificMountOptions() []fuse.MountOption { - return []fuse.MountOption{ - fuse.AllowNonEmptyMount(), - } + return []fuse.MountOption{} } func checkMountPointAvailable(dir string) bool { diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 9177091a5..22ddd1f07 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -35,24 +35,15 @@ func runMount(cmd *Command, args []string) bool { return false } - return RunMount( - *mountOptions.filer, - *mountOptions.filerMountRootPath, - *mountOptions.dir, - *mountOptions.collection, - *mountOptions.replication, - *mountOptions.dataCenter, - *mountOptions.chunkSizeLimitMB, - *mountOptions.allowOthers, - *mountOptions.ttlSec, - *mountOptions.dirListCacheLimit, - os.FileMode(umask), - *mountOptions.outsideContainerClusterMode, - ) + return RunMount(&mountOptions, os.FileMode(umask)) } -func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int, - allowOthers bool, ttlSec int, dirListCacheLimit int64, umask os.FileMode, outsideContainerClusterMode bool) bool { +func RunMount(option *MountOptions, umask os.FileMode) bool { + + filer := *option.filer + filerMountRootPath := *option.filerMountRootPath + dir := *option.dir + chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB util.LoadConfiguration("security", false) @@ -114,14 +105,16 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente fuse.MaxReadahead(1024 * 128), fuse.AsyncRead(), fuse.WritebackCache(), - fuse.AllowNonEmptyMount(), } options = append(options, osSpecificMountOptions()...) - if allowOthers { + if *option.allowOthers { options = append(options, fuse.AllowOther()) } + if *option.nonempty { + options = append(options, fuse.AllowNonEmptyMount()) + } c, err := fuse.Mount(dir, options...) if err != nil { @@ -171,12 +164,12 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente FilerGrpcAddress: filerGrpcAddress, GrpcDialOption: grpcDialOption, FilerMountRootPath: mountRoot, - Collection: collection, - Replication: replication, - TtlSec: int32(ttlSec), + Collection: *option.collection, + Replication: *option.replication, + TtlSec: int32(*option.ttlSec), ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, - DataCenter: dataCenter, - DirListCacheLimit: dirListCacheLimit, + DataCenter: *option.dataCenter, + DirListCacheLimit: *option.dirListCacheLimit, EntryCacheTtl: 3 * time.Second, MountUid: uid, MountGid: gid, @@ -184,7 +177,7 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente MountCtime: fileInfo.ModTime(), MountMtime: time.Now(), Umask: umask, - OutsideContainerClusterMode: outsideContainerClusterMode, + OutsideContainerClusterMode: *mountOptions.outsideContainerClusterMode, Cipher: cipher, })) if err != nil { From 6ff9e2835e822d7f005ea9fd02966ff069d9efc1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 21 Mar 2020 22:16:00 -0700 Subject: [PATCH 0281/2432] filer: skip resizing image if width or height larger than original image fix https://github.com/chrislusf/seaweedfs/issues/1239#issuecomment-602140779 --- weed/filer2/stream.go | 55 +++++++++++++++++++++++++++++----- weed/images/resizing.go | 6 ++-- weed/operation/chunked_file.go | 8 ++--- 3 files changed, 56 insertions(+), 13 deletions(-) diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go index bf985f8bd..6c5c84905 100644 --- a/weed/filer2/stream.go +++ b/weed/filer2/stream.go @@ -2,7 +2,6 @@ package filer2 import ( "bytes" - "fmt" "io" "math" @@ -48,9 +47,11 @@ type ChunkStreamReader struct { masterClient *wdclient.MasterClient chunkViews []*ChunkView logicOffset int64 - buffer bytes.Buffer + buffer []byte bufferOffset int64 + bufferPos int chunkIndex int + totalSize int64 } var _ = io.ReadSeeker(&ChunkStreamReader{}) @@ -58,16 +59,21 @@ var _ = io.ReadSeeker(&ChunkStreamReader{}) func NewChunkStreamReader(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32) + var totalSize uint64 + for _, chunk := range chunkViews { + totalSize += chunk.Size + } return &ChunkStreamReader{ masterClient: masterClient, chunkViews: chunkViews, bufferOffset: -1, + totalSize: int64(totalSize), } } func (c *ChunkStreamReader) Read(p []byte) (n int, err error) { - if c.buffer.Len() == 0 { + if c.isBufferEmpty() { if c.chunkIndex >= len(c.chunkViews) { return 0, io.EOF } @@ -75,11 +81,42 @@ func (c *ChunkStreamReader) Read(p []byte) (n int, err error) { c.fetchChunkToBuffer(chunkView) c.chunkIndex++ } - return c.buffer.Read(p) + n = copy(p, c.buffer[c.bufferPos:]) + c.bufferPos += n + return +} + +func (c *ChunkStreamReader) isBufferEmpty() bool { + return len(c.buffer) <= c.bufferPos } func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { - return 0, fmt.Errorf("ChunkStreamReader: seek not supported") + + var err error + switch whence { + case io.SeekStart: + case io.SeekCurrent: + offset += c.bufferOffset + int64(c.bufferPos) + case io.SeekEnd: + offset = c.totalSize + offset + } + if offset > c.totalSize { + err = io.ErrUnexpectedEOF + } + + for i, chunk := range c.chunkViews { + if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) { + if c.isBufferEmpty() || c.bufferOffset != offset { + c.fetchChunkToBuffer(chunk) + c.chunkIndex = i + 1 + break + } + } + } + c.bufferPos = int(offset - c.bufferOffset) + + return offset, err + } func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { @@ -88,13 +125,17 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err } - c.buffer.Reset() + var buffer bytes.Buffer err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.isGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) { - c.buffer.Write(data) + buffer.Write(data) }) if err != nil { glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) return err } + c.buffer = buffer.Bytes() + c.bufferPos = 0 + c.bufferOffset = chunkView.LogicOffset + return nil } diff --git a/weed/images/resizing.go b/weed/images/resizing.go index ff0eff5e1..b048daa1c 100644 --- a/weed/images/resizing.go +++ b/weed/images/resizing.go @@ -6,10 +6,11 @@ import ( "image/gif" "image/jpeg" "image/png" + "io" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/disintegration/imaging" - "io" + + "github.com/chrislusf/seaweedfs/weed/glog" ) func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) { @@ -35,6 +36,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re } } } else { + read.Seek(0, 0) return read, bounds.Dx(), bounds.Dy() } var buf bytes.Buffer diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index 1ab3c59ed..baa0038c4 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -142,11 +142,11 @@ func NewChunkedFileReader(chunkList []*ChunkInfo, master string) *ChunkedFileRea func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) { var err error switch whence { - case 0: - case 1: + case io.SeekStart: + case io.SeekCurrent: offset += cf.pos - case 2: - offset = cf.totalSize - offset + case io.SeekEnd: + offset = cf.totalSize + offset } if offset > cf.totalSize { err = ErrInvalidRange From 82bfad5b8615d9c2cd21efc059514b8899232a0f Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sat, 21 Mar 2020 23:48:11 -0700 Subject: [PATCH 0282/2432] refactoring --- weed/filer2/stream.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go index 6c5c84905..9c7a68b8e 100644 --- a/weed/filer2/stream.go +++ b/weed/filer2/stream.go @@ -51,7 +51,6 @@ type ChunkStreamReader struct { bufferOffset int64 bufferPos int chunkIndex int - totalSize int64 } var _ = io.ReadSeeker(&ChunkStreamReader{}) @@ -59,16 +58,11 @@ var _ = io.ReadSeeker(&ChunkStreamReader{}) func NewChunkStreamReader(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32) - var totalSize uint64 - for _, chunk := range chunkViews { - totalSize += chunk.Size - } return &ChunkStreamReader{ masterClient: masterClient, chunkViews: chunkViews, bufferOffset: -1, - totalSize: int64(totalSize), } } @@ -92,15 +86,20 @@ func (c *ChunkStreamReader) isBufferEmpty() bool { func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { + var totalSize int64 + for _, chunk := range c.chunkViews { + totalSize += int64(chunk.Size) + } + var err error switch whence { case io.SeekStart: case io.SeekCurrent: offset += c.bufferOffset + int64(c.bufferPos) case io.SeekEnd: - offset = c.totalSize + offset + offset = totalSize + offset } - if offset > c.totalSize { + if offset > totalSize { err = io.ErrUnexpectedEOF } From 65d2ea9fb00757320f348835d9761a357264ea98 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 01:00:36 -0700 Subject: [PATCH 0283/2432] FUSE mount: stream read data with buffer fix https://github.com/chrislusf/seaweedfs/issues/1244 --- weed/filer2/stream.go | 59 ++++++++++++++++++++--- weed/filesys/file.go | 5 ++ weed/filesys/filehandle.go | 16 ++++-- weed/server/filer_server_handlers_read.go | 2 +- 4 files changed, 71 insertions(+), 11 deletions(-) diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go index 9c7a68b8e..0f7c3c176 100644 --- a/weed/filer2/stream.go +++ b/weed/filer2/stream.go @@ -2,8 +2,11 @@ package filer2 import ( "bytes" + "context" + "fmt" "io" "math" + "strings" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -51,18 +54,51 @@ type ChunkStreamReader struct { bufferOffset int64 bufferPos int chunkIndex int + lookupFileId func(fileId string) (targetUrl string, err error) } var _ = io.ReadSeeker(&ChunkStreamReader{}) -func NewChunkStreamReader(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { +func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32) return &ChunkStreamReader{ - masterClient: masterClient, - chunkViews: chunkViews, - bufferOffset: -1, + chunkViews: chunkViews, + lookupFileId: func(fileId string) (targetUrl string, err error) { + return masterClient.LookupFileId(fileId) + }, + } +} + +func NewChunkStreamReaderFromClient(filerClient FilerClient, chunkViews []*ChunkView) *ChunkStreamReader { + + return &ChunkStreamReader{ + chunkViews: chunkViews, + lookupFileId: func(fileId string) (targetUrl string, err error) { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + vid := fileIdToVolumeId(fileId) + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ + VolumeIds: []string{vid}, + }) + if err != nil { + return err + } + + locations := resp.LocationsMap[vid] + if locations == nil || len(locations.Locations) == 0 { + glog.V(0).Infof("failed to locate %s", fileId) + return fmt.Errorf("failed to locate %s", fileId) + } + + volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url) + + targetUrl = fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId) + + return nil + }) + return + }, } } @@ -72,6 +108,7 @@ func (c *ChunkStreamReader) Read(p []byte) (n int, err error) { return 0, io.EOF } chunkView := c.chunkViews[c.chunkIndex] + println("fetch1") c.fetchChunkToBuffer(chunkView) c.chunkIndex++ } @@ -105,7 +142,7 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { for i, chunk := range c.chunkViews { if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) { - if c.isBufferEmpty() || c.bufferOffset != offset { + if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset { c.fetchChunkToBuffer(chunk) c.chunkIndex = i + 1 break @@ -119,7 +156,7 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { } func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { - urlString, err := c.masterClient.LookupFileId(chunkView.FileId) + urlString, err := c.lookupFileId(chunkView.FileId) if err != nil { glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) return err @@ -136,5 +173,15 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { c.bufferPos = 0 c.bufferOffset = chunkView.LogicOffset + // glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + return nil } + +func fileIdToVolumeId(fileId string) (volumeId string) { + parts := strings.Split(fileId, ",") + if len(parts) != 2 { + return fileId + } + return parts[0] +} diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 69d440a73..14b9cb208 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -2,6 +2,7 @@ package filesys import ( "context" + "io" "os" "sort" "time" @@ -32,6 +33,7 @@ type File struct { entry *filer_pb.Entry entryViewCache []filer2.VisibleInterval isOpen int + reader io.ReadSeeker } func (file *File) fullpath() filer2.FullPath { @@ -119,6 +121,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f } file.entry.Chunks = chunks file.entryViewCache = nil + file.reader = nil } file.entry.Attributes.FileSize = req.Size } @@ -245,6 +248,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) { file.entryViewCache = newVisibles newVisibles = t } + file.reader = nil glog.V(3).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks)) @@ -254,6 +258,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) { func (file *File) setEntry(entry *filer_pb.Entry) { file.entry = entry file.entryViewCache = filer2.NonOverlappingVisibleIntervals(file.entry.Chunks) + file.reader = nil } func (file *File) saveEntry() error { diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 100c9eba0..bfdafd580 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -3,6 +3,8 @@ package filesys import ( "context" "fmt" + "io" + "math" "mime" "path" "time" @@ -85,17 +87,23 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { if fh.f.entryViewCache == nil { fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks) + fh.f.reader = nil + } + if fh.f.reader == nil { + chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32) + fh.f.reader = filer2.NewChunkStreamReaderFromClient(fh.f.wfs, chunkViews) } - chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, offset, len(buff)) - - totalRead, err := filer2.ReadIntoBuffer(fh.f.wfs, fh.f.fullpath(), buff, chunkViews, offset) + fh.f.reader.Seek(offset, io.SeekStart) + totalRead, err := fh.f.reader.Read(buff) if err != nil { glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) } - return totalRead, err + // glog.V(0).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err) + + return int64(totalRead), err } // Write to the file handle diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 5322492dc..14414de65 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -94,7 +94,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, ext := filepath.Ext(filename) width, height, mode, shouldResize := shouldResizeImages(ext, r) if shouldResize { - chunkedFileReader := filer2.NewChunkStreamReader(fs.filer.MasterClient, entry.Chunks) + chunkedFileReader := filer2.NewChunkStreamReaderFromFiler(fs.filer.MasterClient, entry.Chunks) rs, _, _ := images.Resized(ext, chunkedFileReader, width, height, mode) io.Copy(w, rs) return From e93e986727d81450b25b8ee9218905e47eb2a282 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 01:30:27 -0700 Subject: [PATCH 0284/2432] webdav: stream read --- weed/server/webdav_server.go | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 1fb0912c5..5a35e05d8 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "math" "os" "path" "strings" @@ -89,6 +90,7 @@ type WebDavFile struct { off int64 entry *filer_pb.Entry entryViewCache []filer2.VisibleInterval + reader io.ReadSeeker } func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { @@ -494,23 +496,25 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { } if f.entryViewCache == nil { f.entryViewCache = filer2.NonOverlappingVisibleIntervals(f.entry.Chunks) + f.reader = nil } - chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, f.off, len(p)) - - totalRead, err := filer2.ReadIntoBuffer(f.fs, filer2.FullPath(f.name), p, chunkViews, f.off) - if err != nil { - return 0, err + if f.reader == nil { + chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32) + f.reader = filer2.NewChunkStreamReaderFromClient(f.fs, chunkViews) } - readSize = int(totalRead) - glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+totalRead) + f.reader.Seek(f.off, io.SeekStart) + readSize, err = f.reader.Read(p) - f.off += totalRead - if readSize == 0 { - return 0, io.EOF + glog.V(0).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize)) + f.off += int64(readSize) + + if err != nil { + glog.Errorf("file read %s: %v", f.name, err) } return + } func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { From 2bdd936fb6adbea465d28b0d6cb9009e1c5d4ab7 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 01:33:40 -0700 Subject: [PATCH 0285/2432] purge code --- weed/filer2/filer_client_util.go | 66 -------------------------------- 1 file changed, 66 deletions(-) diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go index 1c1fa6a5b..60b4dec18 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/filer2/filer_client_util.go @@ -6,11 +6,9 @@ import ( "io" "math" "strings" - "sync" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" ) func VolumeId(fileId string) string { @@ -26,70 +24,6 @@ type FilerClient interface { AdjustedUrl(hostAndPort string) string } -func ReadIntoBuffer(filerClient FilerClient, fullFilePath FullPath, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { - var vids []string - for _, chunkView := range chunkViews { - vids = append(vids, VolumeId(chunkView.FileId)) - } - - vid2Locations := make(map[string]*filer_pb.Locations) - - err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - glog.V(4).Infof("read fh lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ - VolumeIds: vids, - }) - if err != nil { - return err - } - - vid2Locations = resp.LocationsMap - - return nil - }) - - if err != nil { - return 0, fmt.Errorf("failed to lookup volume ids %v: %v", vids, err) - } - - var wg sync.WaitGroup - for _, chunkView := range chunkViews { - wg.Add(1) - go func(chunkView *ChunkView) { - defer wg.Done() - - glog.V(4).Infof("read fh reading chunk: %+v", chunkView) - - locations := vid2Locations[VolumeId(chunkView.FileId)] - if locations == nil || len(locations.Locations) == 0 { - glog.V(0).Infof("failed to locate %s", chunkView.FileId) - err = fmt.Errorf("failed to locate %s", chunkView.FileId) - return - } - - volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url) - var n int64 - n, err = util.ReadUrl(fmt.Sprintf("http://%s/%s", volumeServerAddress, chunkView.FileId), chunkView.CipherKey, chunkView.isGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)]) - - if err != nil { - - glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, volumeServerAddress, chunkView.FileId, n, err) - - err = fmt.Errorf("failed to read http://%s/%s: %v", - volumeServerAddress, chunkView.FileId, err) - return - } - - glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView) - totalRead += n - - }(chunkView) - } - wg.Wait() - return -} - func GetEntry(filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.Entry, err error) { dir, name := fullFilePath.DirAndName() From ae2ee379c065f97f4661db926e1e14808940b607 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 01:37:46 -0700 Subject: [PATCH 0286/2432] consistent 64bit size --- weed/filer2/filechunks.go | 6 +++--- weed/filer2/filechunks_test.go | 2 +- weed/filer2/stream.go | 4 ++-- weed/filesys/filehandle.go | 2 +- weed/replication/sink/azuresink/azure_sink.go | 2 +- weed/replication/sink/b2sink/b2_sink.go | 2 +- weed/replication/sink/gcssink/gcs_sink.go | 2 +- weed/replication/sink/s3sink/s3_sink.go | 2 +- weed/server/filer_server_handlers_read.go | 2 +- weed/server/webdav_server.go | 2 +- weed/shell/command_fs_cat.go | 2 +- 11 files changed, 14 insertions(+), 14 deletions(-) diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go index 711488df1..fe7841fa7 100644 --- a/weed/filer2/filechunks.go +++ b/weed/filer2/filechunks.go @@ -75,7 +75,7 @@ type ChunkView struct { isGzipped bool } -func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) { +func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) { visibles := NonOverlappingVisibleIntervals(chunks) @@ -83,9 +83,9 @@ func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views } -func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int) (views []*ChunkView) { +func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) { - stop := offset + int64(size) + stop := offset + size for _, chunk := range visibles { diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go index bb4a6c74d..7b1133b85 100644 --- a/weed/filer2/filechunks_test.go +++ b/weed/filer2/filechunks_test.go @@ -218,7 +218,7 @@ func TestChunksReading(t *testing.T) { testcases := []struct { Chunks []*filer_pb.FileChunk Offset int64 - Size int + Size int64 Expected []*ChunkView }{ // case 0: normal diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go index 0f7c3c176..8819070ff 100644 --- a/weed/filer2/stream.go +++ b/weed/filer2/stream.go @@ -14,7 +14,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/wdclient" ) -func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int) error { +func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error { chunkViews := ViewFromChunks(chunks, offset, size) @@ -61,7 +61,7 @@ var _ = io.ReadSeeker(&ChunkStreamReader{}) func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { - chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32) + chunkViews := ViewFromChunks(chunks, 0, math.MaxInt64) return &ChunkStreamReader{ chunkViews: chunkViews, diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index bfdafd580..1dfaf5944 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -90,7 +90,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { fh.f.reader = nil } if fh.f.reader == nil { - chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32) + chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64) fh.f.reader = filer2.NewChunkStreamReaderFromClient(fh.f.wfs, chunkViews) } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 89e04922f..d75dbe9af 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -96,7 +96,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { } totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize)) // Create a URL that references a to-be-created blob in your // Azure Storage account's container. diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index df0653f73..b5d410a75 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -85,7 +85,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { } totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize)) bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index 694399274..b1a8d7753 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -90,7 +90,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { } totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize)) wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index e0aee5ada..5dbc3fdb7 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -103,7 +103,7 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { } totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize)) parts := make([]*s3.CompletedPart, len(chunkViews)) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 14414de65..310fbcec4 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -102,7 +102,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, } processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { - return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, int(size)) + return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size) }) } diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 5a35e05d8..a7232d8a8 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -499,7 +499,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { f.reader = nil } if f.reader == nil { - chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32) + chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt64) f.reader = filer2.NewChunkStreamReaderFromClient(f.fs, chunkViews) } diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 3db487979..7d2ac8989 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -54,7 +54,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write return err } - return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt32) + return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64) }) From 9f2c9b666b3e06272a9c498b7974dcb2f2c7ba51 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 01:39:08 -0700 Subject: [PATCH 0287/2432] adjust log level --- weed/server/webdav_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index a7232d8a8..f617e4a98 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -506,7 +506,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { f.reader.Seek(f.off, io.SeekStart) readSize, err = f.reader.Read(p) - glog.V(0).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize)) + glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize)) f.off += int64(readSize) if err != nil { From 64000f5c451f0a94acd526f680dcac6ac3276201 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 13:09:16 -0700 Subject: [PATCH 0288/2432] FUSE mount: rename under root fix https://github.com/chrislusf/seaweedfs/issues/1242 --- weed/filesys/wfs.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 77438b58e..382617384 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -84,6 +84,9 @@ func NewSeaweedFileSystem(option *Option) *WFS { } wfs.root = &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs} + wfs.getNode(filer2.FullPath(wfs.option.FilerMountRootPath), func() fs.Node { + return wfs.root + }) return wfs } From 74e23e8d64732c5221507db43399a16d43528794 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 13:09:33 -0700 Subject: [PATCH 0289/2432] FUSE mount: rename correctly across folders --- weed/filesys/dir_rename.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index 9b0c0fe6e..b590250f5 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -43,7 +43,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector oldFileNode := dir.wfs.getNode(oldPath, func() fs.Node { return nil }) - newDirNode := dir.wfs.getNode(filer2.FullPath(dir.Path), func() fs.Node { + newDirNode := dir.wfs.getNode(filer2.FullPath(newDir.Path), func() fs.Node { return nil }) dir.wfs.forgetNode(newPath) From bda13ed593385b05396e9f6c3edbc5c2aea01156 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 13:09:51 -0700 Subject: [PATCH 0290/2432] commented prints --- weed/filesys/dir_rename.go | 1 + 1 file changed, 1 insertion(+) diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index b590250f5..1a220a063 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -46,6 +46,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector newDirNode := dir.wfs.getNode(filer2.FullPath(newDir.Path), func() fs.Node { return nil }) + // fmt.Printf("new path: %v dir: %v node:%+v\n", newPath, newDir.Path, newDirNode) dir.wfs.forgetNode(newPath) dir.wfs.forgetNode(oldPath) if oldFileNode != nil && newDirNode != nil { From 0bf148f49d63a834000f7bce63df2c0f4d78fa19 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 16:19:00 -0700 Subject: [PATCH 0291/2432] logging --- weed/filesys/file.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 14b9cb208..adafc54d7 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -71,7 +71,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { - // glog.V(4).Infof("file Getxattr %s", file.fullpath()) + glog.V(4).Infof("file Getxattr %s", file.fullpath()) if err := file.maybeLoadEntry(ctx); err != nil { return err From 3137777d8395111f6c1eb4b3653e13f4961b8510 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 16:21:42 -0700 Subject: [PATCH 0292/2432] volume: automatically detect max volume count --- weed/server/master_grpc_server.go | 5 +++++ weed/server/volume_grpc_client_to_master.go | 7 +++++- weed/storage/disk_location.go | 16 +++++++++++++ weed/storage/disk_location_ec.go | 7 ++++++ weed/storage/erasure_coding/ec_volume.go | 7 ++++++ weed/storage/store.go | 25 +++++++++++++++++++++ 6 files changed, 66 insertions(+), 1 deletion(-) diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 84087df8b..cfe5fd9c0 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -81,6 +81,11 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ } } + if dn.GetMaxVolumeCount() != int64(heartbeat.MaxVolumeCount) { + delta := int64(heartbeat.MaxVolumeCount) - dn.GetMaxVolumeCount() + dn.UpAdjustMaxVolumeCountDelta(delta) + } + glog.V(4).Infof("master received heartbeat %s", heartbeat.String()) message := &master_pb.VolumeLocation{ Url: dn.Url(), diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index 1f4d9df10..517eb4bc0 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -80,8 +80,13 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDi doneChan <- err return } - if in.GetVolumeSizeLimit() != 0 { + if in.GetVolumeSizeLimit() != 0 && vs.store.GetVolumeSizeLimit() != in.GetVolumeSizeLimit() { vs.store.SetVolumeSizeLimit(in.GetVolumeSizeLimit()) + if vs.store.MaybeAdjustVolumeMax() { + if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { + glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) + } + } } if in.GetLeader() != "" && masterNode != in.GetLeader() && !isSameIP(in.GetLeader(), masterNode) { glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), masterNode) diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index f15303282..3c8a7b864 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -275,3 +275,19 @@ func (l *DiskLocation) LocateVolume(vid needle.VolumeId) (os.FileInfo, bool) { return nil, false } + +func (l *DiskLocation) UnUsedSpace(volumeSizeLimit uint64) (unUsedSpace uint64) { + + l.volumesLock.RLock() + defer l.volumesLock.RUnlock() + + for _, vol := range l.volumes { + if vol.IsReadOnly() { + continue + } + datSize, idxSize, _ := vol.FileStat() + unUsedSpace += volumeSizeLimit - (datSize + idxSize) + } + + return +} diff --git a/weed/storage/disk_location_ec.go b/weed/storage/disk_location_ec.go index f6c44e966..72d3e2b3e 100644 --- a/weed/storage/disk_location_ec.go +++ b/weed/storage/disk_location_ec.go @@ -183,3 +183,10 @@ func (l *DiskLocation) unmountEcVolumeByCollection(collectionName string) map[ne } return deltaVols } + +func (l *DiskLocation) EcVolumesLen() int { + l.ecVolumesLock.RLock() + defer l.ecVolumesLock.RUnlock() + + return len(l.ecVolumes) +} diff --git a/weed/storage/erasure_coding/ec_volume.go b/weed/storage/erasure_coding/ec_volume.go index 3d9aa2cff..eef53765f 100644 --- a/weed/storage/erasure_coding/ec_volume.go +++ b/weed/storage/erasure_coding/ec_volume.go @@ -152,6 +152,13 @@ func (ev *EcVolume) ShardSize() int64 { return 0 } +func (ev *EcVolume) Size() (size int64) { + for _, shard := range ev.Shards { + size += shard.Size() + } + return +} + func (ev *EcVolume) CreatedAt() time.Time { return ev.ecxCreatedAt } diff --git a/weed/storage/store.go b/weed/storage/store.go index 76fe4de27..4ef3682d8 100644 --- a/weed/storage/store.go +++ b/weed/storage/store.go @@ -12,6 +12,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" . "github.com/chrislusf/seaweedfs/weed/storage/types" @@ -99,6 +100,9 @@ func (s *Store) FindFreeLocation() (ret *DiskLocation) { max := 0 for _, location := range s.Locations { currentFreeCount := location.MaxVolumeCount - location.VolumesLen() + currentFreeCount *= erasure_coding.DataShardsCount + currentFreeCount -= location.EcVolumesLen() + currentFreeCount /= erasure_coding.DataShardsCount if currentFreeCount > max { max = currentFreeCount ret = location @@ -382,3 +386,24 @@ func (s *Store) SetVolumeSizeLimit(x uint64) { func (s *Store) GetVolumeSizeLimit() uint64 { return atomic.LoadUint64(&s.volumeSizeLimit) } + +func (s *Store) MaybeAdjustVolumeMax() (hasChanges bool) { + volumeSizeLimit := s.GetVolumeSizeLimit() + for _, diskLocation := range s.Locations { + if diskLocation.MaxVolumeCount == 0 { + diskStatus := stats.NewDiskStatus(diskLocation.Directory) + unusedSpace := diskLocation.UnUsedSpace(volumeSizeLimit) + unclaimedSpaces := int64(diskStatus.Free) - int64(unusedSpace) + volCount := diskLocation.VolumesLen() + maxVolumeCount := volCount + if unclaimedSpaces > int64(volumeSizeLimit) { + maxVolumeCount += int(uint64(unclaimedSpaces)/volumeSizeLimit) - 1 + } + diskLocation.MaxVolumeCount = maxVolumeCount + glog.V(0).Infof("disk %s max %d unclaimedSpace:%dMB, unused:%dMB volumeSizeLimit:%d/MB", + diskLocation.Directory, maxVolumeCount, unclaimedSpaces/1024/1024, unusedSpace/1024/1024, volumeSizeLimit/1024/1024) + hasChanges = true + } + } + return +} From e32999108abbbc3973d7f423498899435313d58b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 16:27:15 -0700 Subject: [PATCH 0293/2432] add auto configured volume max count help messge --- weed/command/server.go | 2 +- weed/command/volume.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/weed/command/server.go b/weed/command/server.go index 560b90037..8e0d41277 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -53,7 +53,7 @@ var ( serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") - volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...") + volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]... If set to zero on non-windows OS, the limit will be auto configured.") pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway") diff --git a/weed/command/volume.go b/weed/command/volume.go index 4773d8a55..68a0ce223 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -84,7 +84,7 @@ var cmdVolume = &Command{ var ( volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") - maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]...") + maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]... If set to zero on non-windows OS, the limit will be auto configured.") volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") ) From 35208711e5c16656e0ebe9ae570eee81f35fb493 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 18:32:49 -0700 Subject: [PATCH 0294/2432] logging --- weed/topology/node.go | 18 +++++++++++++++++- weed/topology/volume_growth.go | 1 + 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/weed/topology/node.go b/weed/topology/node.go index ceeb96d60..32d9d5e60 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -79,7 +79,7 @@ func (n *NodeImpl) PickNodesByWeight(numberOfNodes int, filterFirstNodeFn func(d } n.RUnlock() if len(candidates) < numberOfNodes { - glog.V(2).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates") + glog.V(0).Infoln(n.Id(), "failed to pick", numberOfNodes, "from ", len(candidates), "node candidates") return nil, nil, errors.New("No enough data node found!") } @@ -192,30 +192,46 @@ func (n *NodeImpl) ReserveOneVolume(r int64) (assignedNode *DataNode, err error) } func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) { //can be negative + if maxVolumeCountDelta == 0 { + return + } atomic.AddInt64(&n.maxVolumeCount, maxVolumeCountDelta) + println("node", n.Id(), "new max", n.maxVolumeCount, "delta", maxVolumeCountDelta) if n.parent != nil { n.parent.UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta) } } func (n *NodeImpl) UpAdjustVolumeCountDelta(volumeCountDelta int64) { //can be negative + if volumeCountDelta == 0 { + return + } atomic.AddInt64(&n.volumeCount, volumeCountDelta) if n.parent != nil { n.parent.UpAdjustVolumeCountDelta(volumeCountDelta) } } func (n *NodeImpl) UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta int64) { //can be negative + if remoteVolumeCountDelta == 0 { + return + } atomic.AddInt64(&n.remoteVolumeCount, remoteVolumeCountDelta) if n.parent != nil { n.parent.UpAdjustRemoteVolumeCountDelta(remoteVolumeCountDelta) } } func (n *NodeImpl) UpAdjustEcShardCountDelta(ecShardCountDelta int64) { //can be negative + if ecShardCountDelta == 0 { + return + } atomic.AddInt64(&n.ecShardCount, ecShardCountDelta) if n.parent != nil { n.parent.UpAdjustEcShardCountDelta(ecShardCountDelta) } } func (n *NodeImpl) UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta int64) { //can be negative + if activeVolumeCountDelta == 0 { + return + } atomic.AddInt64(&n.activeVolumeCount, activeVolumeCountDelta) if n.parent != nil { n.parent.UpAdjustActiveVolumeCountDelta(activeVolumeCountDelta) diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go index 446c88f60..58b5702bf 100644 --- a/weed/topology/volume_growth.go +++ b/weed/topology/volume_growth.go @@ -85,6 +85,7 @@ func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targe if c, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil { counter += c } else { + glog.V(0).Infof("create %d volume, created %d: %v", targetCount, counter, e) return counter, e } } From d6412f27a064cf84ba6c9180342c2ffdf33d8772 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 18:32:56 -0700 Subject: [PATCH 0295/2432] logging --- weed/util/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/util/config.go b/weed/util/config.go index dfbfdbd82..33809d44d 100644 --- a/weed/util/config.go +++ b/weed/util/config.go @@ -27,7 +27,7 @@ func LoadConfiguration(configFileName string, required bool) (loaded bool) { glog.V(1).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed()) if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file - glog.V(0).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) + glog.V(1).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) if required { glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ "\n\nPlease use this command to generate the default %s.toml file\n"+ From c16dc57a58acafd183116e287ce3322379f38d69 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 18:33:19 -0700 Subject: [PATCH 0296/2432] fix max volume count reporting --- weed/server/master_grpc_server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index cfe5fd9c0..e5fcacc0e 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -81,7 +81,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ } } - if dn.GetMaxVolumeCount() != int64(heartbeat.MaxVolumeCount) { + if heartbeat.MaxVolumeCount != 0 && dn.GetMaxVolumeCount() != int64(heartbeat.MaxVolumeCount) { delta := int64(heartbeat.MaxVolumeCount) - dn.GetMaxVolumeCount() dn.UpAdjustMaxVolumeCountDelta(delta) } From 3775211962f9595e9c00600d23ac841646a7b22a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 18:35:45 -0700 Subject: [PATCH 0297/2432] optimize entrypoint for docker --- docker/entrypoint.sh | 23 +++-------------------- docker/local-cluster-compose.yml | 6 +++--- docker/local-dev-compose.yml | 6 +++--- docker/seaweedfs-compose.yml | 2 +- docker/seaweedfs-dev-compose.yml | 6 +++--- 5 files changed, 13 insertions(+), 30 deletions(-) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 791527d3a..7ce0fde8d 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -3,44 +3,27 @@ case "$1" in 'master') - ARGS="-mdir /data" - # Is this instance linked with an other master? (Docker commandline "--link master1:master") - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi + ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024" exec /usr/bin/weed $@ $ARGS ;; 'volume') - ARGS="-ip `hostname -i` -dir /data" - # Is this instance linked with a master? (Docker commandline "--link master1:master") - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -mserver=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi + ARGS="-dir=/data -max=0" exec /usr/bin/weed $@ $ARGS ;; 'server') - ARGS="-ip `hostname -i` -dir /data" - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -master.peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi + ARGS="-ip=`hostname -i` -dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024" exec /usr/bin/weed $@ $ARGS ;; 'filer') ARGS="" - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi exec /usr/bin/weed $@ $ARGS ;; 's3') ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE" - if [ -n "$FILER_PORT_8888_TCP_ADDR" ] ; then - ARGS="$ARGS -filer=$FILER_PORT_8888_TCP_ADDR:$FILER_PORT_8888_TCP_PORT" - fi exec /usr/bin/weed $@ $ARGS ;; diff --git a/docker/local-cluster-compose.yml b/docker/local-cluster-compose.yml index 0b6860fa1..a1ac824e7 100644 --- a/docker/local-cluster-compose.yml +++ b/docker/local-cluster-compose.yml @@ -24,7 +24,7 @@ services: ports: - 8080:8080 - 18080:18080 - command: '-v=2 volume -max=5 -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume' + command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume' depends_on: - master0 - master1 @@ -34,7 +34,7 @@ services: ports: - 8888:8888 - 18888:18888 - command: '-v=4 filer -master="master0:9333,master1:9334,master2:9335"' + command: 'filer -master="master0:9333,master1:9334,master2:9335"' depends_on: - master0 - master1 @@ -44,7 +44,7 @@ services: image: chrislusf/seaweedfs:local ports: - 8333:8333 - command: '-v=4 s3 -filer="filer:8888"' + command: 's3 -filer="filer:8888"' depends_on: - master0 - master1 diff --git a/docker/local-dev-compose.yml b/docker/local-dev-compose.yml index 5ff42ed28..f6fd0f4ce 100644 --- a/docker/local-dev-compose.yml +++ b/docker/local-dev-compose.yml @@ -12,7 +12,7 @@ services: ports: - 8080:8080 - 18080:18080 - command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' + command: "volume -mserver=master:9333 -port=8080 -ip=volume" depends_on: - master filer: @@ -20,7 +20,7 @@ services: ports: - 8888:8888 - 18888:18888 - command: '-v=4 filer -master="master:9333"' + command: 'filer -master="master:9333"' depends_on: - master - volume @@ -28,7 +28,7 @@ services: image: chrislusf/seaweedfs:local ports: - 8333:8333 - command: '-v=4 s3 -filer="filer:8888"' + command: 's3 -filer="filer:8888"' depends_on: - master - volume diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml index 35509c541..70d005017 100644 --- a/docker/seaweedfs-compose.yml +++ b/docker/seaweedfs-compose.yml @@ -12,7 +12,7 @@ services: ports: - 8080:8080 - 18080:18080 - command: 'volume -max=15 -mserver="master:9333" -port=8080' + command: 'volume -mserver="master:9333" -port=8080' depends_on: - master filer: diff --git a/docker/seaweedfs-dev-compose.yml b/docker/seaweedfs-dev-compose.yml index 197510a9f..75801102e 100644 --- a/docker/seaweedfs-dev-compose.yml +++ b/docker/seaweedfs-dev-compose.yml @@ -12,7 +12,7 @@ services: ports: - 8080:8080 - 18080:18080 - command: '-v=2 volume -max=5 -mserver="master:9333" -port=8080 -ip=volume' + command: 'volume -mserver="master:9333" -port=8080 -ip=volume' depends_on: - master filer: @@ -20,7 +20,7 @@ services: ports: - 8888:8888 - 18888:18888 - command: '-v=4 filer -master="master:9333"' + command: 'filer -master="master:9333"' depends_on: - master - volume @@ -28,7 +28,7 @@ services: image: chrislusf/seaweedfs:dev # use a remote dev image ports: - 8333:8333 - command: '-v=4 s3 -filer="filer:8888"' + command: 's3 -filer="filer:8888"' depends_on: - master - volume From e39e78ea8d1fd2bd4c7544ef6b158b9d59aa3ff7 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 18:37:12 -0700 Subject: [PATCH 0298/2432] remove println --- weed/topology/node.go | 1 - 1 file changed, 1 deletion(-) diff --git a/weed/topology/node.go b/weed/topology/node.go index 32d9d5e60..114417edf 100644 --- a/weed/topology/node.go +++ b/weed/topology/node.go @@ -196,7 +196,6 @@ func (n *NodeImpl) UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta int64) { //ca return } atomic.AddInt64(&n.maxVolumeCount, maxVolumeCountDelta) - println("node", n.Id(), "new max", n.maxVolumeCount, "delta", maxVolumeCountDelta) if n.parent != nil { n.parent.UpAdjustMaxVolumeCountDelta(maxVolumeCountDelta) } From b77b4cc3a295b9a37cd051fd9d61bbe02b4c35bb Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 18:52:21 -0700 Subject: [PATCH 0299/2432] optimize -max settings for containers --- k8s/seaweedfs/values.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml index 025e16c2d..32fc35dab 100644 --- a/k8s/seaweedfs/values.yaml +++ b/k8s/seaweedfs/values.yaml @@ -107,8 +107,9 @@ volume: # Directories to store data files. dir[,dir]... (default "/tmp") dir: "/data" - # Maximum numbers of volumes, count[,count]... (default "7") - maxVolumes: "10000" + # Maximum numbers of volumes, count[,count]... + # If set to zero on non-windows OS, the limit will be auto configured. (default "7") + maxVolumes: "0" # Volume server's rack name rack: null From fbca6b29bd48eeed54511a9b53b937597eee5d19 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Sun, 22 Mar 2020 23:52:55 -0700 Subject: [PATCH 0300/2432] refactoring --- weed/s3api/filer_util.go | 10 +++++----- weed/s3api/s3api_bucket_handlers.go | 4 ++-- weed/s3api/s3api_handlers.go | 2 +- weed/s3api/s3api_object_handlers.go | 2 +- weed/s3api/s3api_objects_list_handlers.go | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 2793ee71d..be985c893 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -13,7 +13,7 @@ import ( ) func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ Name: dirName, @@ -47,7 +47,7 @@ func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn fun } func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { entry := &filer_pb.Entry{ Name: fileName, @@ -79,7 +79,7 @@ func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chun func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: parentDirectoryPath, @@ -119,7 +119,7 @@ func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, incl func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDeleteData, isRecursive bool) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) if err != nil { @@ -153,7 +153,7 @@ func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath strin func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: parentDirectoryPath, diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 8efb46014..02a01e74f 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -79,7 +79,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque vars := mux.Vars(r) bucket := vars["bucket"] - err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { // delete collection deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{ @@ -109,7 +109,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request vars := mux.Vars(r) bucket := vars["bucket"] - err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: s3a.option.BucketsPath, diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index d7212d5e3..d850cb088 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -38,7 +38,7 @@ func encodeResponse(response interface{}) []byte { return bytesBuffer.Bytes() } -func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index bb3ead6f2..6a1b67d27 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -169,7 +169,7 @@ func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *h var deletedObjects []ObjectIdentifier var deleteErrors []DeleteError - s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { for _, object := range deleteObjects.Objects { lastSeparator := strings.LastIndex(object.ObjectName, "/") diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index 5006df6a0..4469c03a6 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -91,7 +91,7 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys } // check filer - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir), From c0f0fdb3baeb6e9852c6876b23c1404b2c5e833d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 00:01:34 -0700 Subject: [PATCH 0301/2432] refactoring --- unmaintained/see_meta/see_meta.go | 6 +-- .../filer2/abstract_sql/abstract_sql_store.go | 10 ++--- weed/filer2/cassandra/cassandra_store.go | 10 ++--- weed/filer2/entry.go | 3 +- weed/filer2/etcd/etcd_store.go | 12 +++--- weed/filer2/filer.go | 12 +++--- weed/filer2/filer_buckets.go | 3 +- weed/filer2/filer_delete_entry.go | 3 +- weed/filer2/filer_notify_test.go | 4 +- weed/filer2/filerstore.go | 16 ++++---- weed/filer2/leveldb/leveldb_store.go | 12 +++--- weed/filer2/leveldb/leveldb_store_test.go | 12 +++--- weed/filer2/leveldb2/leveldb2_store.go | 12 +++--- weed/filer2/leveldb2/leveldb2_store_test.go | 12 +++--- weed/filer2/redis/universal_redis_store.go | 13 ++++--- weed/filer2/stream.go | 15 ++++---- weed/filesys/dir.go | 38 +++++++++---------- weed/filesys/dir_rename.go | 8 ++-- weed/filesys/file.go | 7 ++-- weed/filesys/wfs.go | 20 +++++----- weed/filesys/xattr.go | 4 +- .../filer_pb/filer_client.pb.go} | 33 ++++++---------- weed/replication/sink/filersink/filer_sink.go | 6 +-- weed/server/filer_grpc_server.go | 13 ++++--- weed/server/filer_grpc_server_rename.go | 14 ++++--- weed/server/filer_server_handlers_read.go | 3 +- weed/server/filer_server_handlers_read_dir.go | 4 +- weed/server/filer_server_handlers_write.go | 6 +-- .../filer_server_handlers_write_autochunk.go | 3 +- .../filer_server_handlers_write_cipher.go | 3 +- weed/server/webdav_server.go | 24 ++++++------ weed/shell/command_fs_cat.go | 3 +- weed/shell/command_fs_du.go | 7 ++-- weed/shell/command_fs_ls.go | 5 ++- weed/shell/command_fs_meta_cat.go | 4 +- weed/shell/command_fs_meta_load.go | 3 +- weed/shell/command_fs_meta_notify.go | 3 +- weed/shell/command_fs_meta_save.go | 13 +++---- weed/shell/command_fs_mv.go | 8 ++-- weed/shell/command_fs_tree.go | 10 ++--- weed/shell/commands.go | 4 +- weed/{filer2 => util}/fullpath.go | 6 +-- 42 files changed, 207 insertions(+), 200 deletions(-) rename weed/{filer2/filer_client_util.go => pb/filer_pb/filer_client.pb.go} (57%) rename weed/{filer2 => util}/fullpath.go (85%) diff --git a/unmaintained/see_meta/see_meta.go b/unmaintained/see_meta/see_meta.go index 0d2ac8de1..452badfd6 100644 --- a/unmaintained/see_meta/see_meta.go +++ b/unmaintained/see_meta/see_meta.go @@ -7,10 +7,10 @@ import ( "log" "os" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" ) var ( @@ -58,7 +58,7 @@ func walkMetaFile(dst *os.File) error { return err } - fmt.Fprintf(os.Stdout, "file %s %v\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String()) + fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String()) for i, chunk := range fullEntry.Entry.Chunks { fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String()) } diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index ff041d0a3..5ade18960 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -99,7 +99,7 @@ func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.En return nil } -func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) { +func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer2.Entry, error) { dir, name := fullpath.DirAndName() row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir) @@ -118,7 +118,7 @@ func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.Fu return entry, nil } -func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error { +func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { dir, name := fullpath.DirAndName() @@ -135,7 +135,7 @@ func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2. return nil } -func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { +func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath) if err != nil { @@ -150,7 +150,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat return nil } -func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { +func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { sqlText := store.SqlListExclusive if inclusive { @@ -172,7 +172,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpat } entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), name), + FullPath: util.NewFullPath(string(fullpath), name), } if err = entry.DecodeAttributesAndChunks(data); err != nil { glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index d57df23eb..5dd7d8036 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -75,7 +75,7 @@ func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entr return store.InsertEntry(ctx, entry) } -func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) { dir, name := fullpath.DirAndName() var data []byte @@ -102,7 +102,7 @@ func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.Full return entry, nil } -func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error { +func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { dir, name := fullpath.DirAndName() @@ -115,7 +115,7 @@ func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.Fu return nil } -func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { +func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { if err := store.session.Query( "DELETE FROM filemeta WHERE directory=?", @@ -126,7 +126,7 @@ func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath return nil } -func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" @@ -139,7 +139,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter() for iter.Scan(&name, &data) { entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), name), + FullPath: util.NewFullPath(string(fullpath), name), } if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil { err = decodeErr diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go index c901927bb..ef6c8f9a6 100644 --- a/weed/filer2/entry.go +++ b/weed/filer2/entry.go @@ -5,6 +5,7 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) type Attr struct { @@ -27,7 +28,7 @@ func (attr Attr) IsDirectory() bool { } type Entry struct { - FullPath + util.FullPath Attr Extended map[string][]byte diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer2/etcd/etcd_store.go index 6c352c8d0..2ef65b4a0 100644 --- a/weed/filer2/etcd/etcd_store.go +++ b/weed/filer2/etcd/etcd_store.go @@ -92,7 +92,7 @@ func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (e return store.InsertEntry(ctx, entry) } -func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) { key := genKey(fullpath.DirAndName()) resp, err := store.client.Get(ctx, string(key)) @@ -115,7 +115,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) return entry, nil } -func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { key := genKey(fullpath.DirAndName()) if _, err := store.client.Delete(ctx, string(key)); err != nil { @@ -125,7 +125,7 @@ func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat return nil } -func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { directoryPrefix := genDirectoryKeyPrefix(fullpath, "") if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil { @@ -136,7 +136,7 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer } func (store *EtcdStore) ListDirectoryEntries( - ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int, + ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, ) (entries []*filer2.Entry, err error) { directoryPrefix := genDirectoryKeyPrefix(fullpath, "") @@ -159,7 +159,7 @@ func (store *EtcdStore) ListDirectoryEntries( break } entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), + FullPath: weed_util.NewFullPath(string(fullpath), fileName), } if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil { err = decodeErr @@ -179,7 +179,7 @@ func genKey(dirPath, fileName string) (key []byte) { return key } -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { keyPrefix = []byte(string(fullpath)) keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR) if len(startFileName) > 0 { diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index e226552ad..0fdd4cf32 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -100,7 +100,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro // not found, check the store directly if dirEntry == nil { glog.V(4).Infof("find uncached directory: %s", dirPath) - dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath)) + dirEntry, _ = f.FindEntry(ctx, util.FullPath(dirPath)) } else { // glog.V(4).Infof("found cached directory: %s", dirPath) } @@ -112,7 +112,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro now := time.Now() dirEntry = &Entry{ - FullPath: FullPath(dirPath), + FullPath: util.FullPath(dirPath), Attr: Attr{ Mtime: now, Crtime: now, @@ -127,7 +127,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool) erro glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) mkdirErr := f.store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { - if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == filer_pb.ErrNotFound { + if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound { glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } @@ -207,7 +207,7 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er return f.store.UpdateEntry(ctx, entry) } -func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err error) { +func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) { now := time.Now() @@ -234,7 +234,7 @@ func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err er } -func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { +func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { if strings.HasSuffix(string(p), "/") && len(p) > 1 { p = p[0 : len(p)-1] } @@ -251,7 +251,7 @@ func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileN return entries, err } -func (f *Filer) doListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) { +func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) { listedEntries, listErr := f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) if listErr != nil { return listedEntries, expiredCount, "", listErr diff --git a/weed/filer2/filer_buckets.go b/weed/filer2/filer_buckets.go index 601b7dbf3..3fc4afdab 100644 --- a/weed/filer2/filer_buckets.go +++ b/weed/filer2/filer_buckets.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" ) type BucketName string @@ -28,7 +29,7 @@ func (f *Filer) LoadBuckets(dirBucketsPath string) { limit := math.MaxInt32 - entries, err := f.ListDirectoryEntries(context.Background(), FullPath(dirBucketsPath), "", false, limit) + entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(dirBucketsPath), "", false, limit) if err != nil { glog.V(1).Infof("no buckets found: %v", err) diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go index d0792ac66..e90c97c12 100644 --- a/weed/filer2/filer_delete_entry.go +++ b/weed/filer2/filer_delete_entry.go @@ -7,9 +7,10 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) -func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) { +func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) { if p == "/" { return nil } diff --git a/weed/filer2/filer_notify_test.go b/weed/filer2/filer_notify_test.go index b74e2ad35..29170bfdf 100644 --- a/weed/filer2/filer_notify_test.go +++ b/weed/filer2/filer_notify_test.go @@ -5,13 +5,15 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) func TestProtoMarshalText(t *testing.T) { oldEntry := &Entry{ - FullPath: FullPath("/this/path/to"), + FullPath: util.FullPath("/this/path/to"), Attr: Attr{ Mtime: time.Now(), Mode: 0644, diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go index e3476aa96..f36c74f14 100644 --- a/weed/filer2/filerstore.go +++ b/weed/filer2/filerstore.go @@ -17,10 +17,10 @@ type FilerStore interface { InsertEntry(context.Context, *Entry) error UpdateEntry(context.Context, *Entry) (err error) // err == filer2.ErrNotFound if not found - FindEntry(context.Context, FullPath) (entry *Entry, err error) - DeleteEntry(context.Context, FullPath) (err error) - DeleteFolderChildren(context.Context, FullPath) (err error) - ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) + FindEntry(context.Context, util.FullPath) (entry *Entry, err error) + DeleteEntry(context.Context, util.FullPath) (err error) + DeleteFolderChildren(context.Context, util.FullPath) (err error) + ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) BeginTransaction(ctx context.Context) (context.Context, error) CommitTransaction(ctx context.Context) error @@ -72,7 +72,7 @@ func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) err return fsw.actualStore.UpdateEntry(ctx, entry) } -func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp FullPath) (entry *Entry, err error) { +func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) { stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "find").Inc() start := time.Now() defer func() { @@ -87,7 +87,7 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp FullPath) (entry return } -func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err error) { +func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "delete").Inc() start := time.Now() defer func() { @@ -97,7 +97,7 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err return fsw.actualStore.DeleteEntry(ctx, fp) } -func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp FullPath) (err error) { +func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) { stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Inc() start := time.Now() defer func() { @@ -107,7 +107,7 @@ func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp FullP return fsw.actualStore.DeleteFolderChildren(ctx, fp) } -func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) { +func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) { stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc() start := time.Now() defer func() { diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 9ddb9bacb..f8e56d93c 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -89,7 +89,7 @@ func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) return store.InsertEntry(ctx, entry) } -func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) { key := genKey(fullpath.DirAndName()) data, err := store.db.Get(key, nil) @@ -114,7 +114,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPa return entry, nil } -func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { key := genKey(fullpath.DirAndName()) err = store.db.Delete(key, nil) @@ -125,7 +125,7 @@ func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.Full return nil } -func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { batch := new(leveldb.Batch) @@ -153,7 +153,7 @@ func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath fi return nil } -func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { directoryPrefix := genDirectoryKeyPrefix(fullpath, "") @@ -176,7 +176,7 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath fi break } entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), + FullPath: weed_util.NewFullPath(string(fullpath), fileName), } if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { err = decodeErr @@ -197,7 +197,7 @@ func genKey(dirPath, fileName string) (key []byte) { return key } -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { keyPrefix = []byte(string(fullpath)) keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR) if len(startFileName) > 0 { diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 497158420..db291a8dc 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -2,10 +2,12 @@ package leveldb import ( "context" - "github.com/chrislusf/seaweedfs/weed/filer2" "io/ioutil" "os" "testing" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { @@ -17,7 +19,7 @@ func TestCreateAndFind(t *testing.T) { filer.SetStore(store) filer.DisableDirectoryCache() - fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") ctx := context.Background() @@ -48,14 +50,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -75,7 +77,7 @@ func TestEmptyRoot(t *testing.T) { ctx := context.Background() // checking one upper directory - entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go index 1e6827356..61fd2e9e6 100644 --- a/weed/filer2/leveldb2/leveldb2_store.go +++ b/weed/filer2/leveldb2/leveldb2_store.go @@ -98,7 +98,7 @@ func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry return store.InsertEntry(ctx, entry) } -func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) { dir, name := fullpath.DirAndName() key, partitionId := genKey(dir, name, store.dbCount) @@ -124,7 +124,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullP return entry, nil } -func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { dir, name := fullpath.DirAndName() key, partitionId := genKey(dir, name, store.dbCount) @@ -136,7 +136,7 @@ func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.Ful return nil } -func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) batch := new(leveldb.Batch) @@ -164,7 +164,7 @@ func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath f return nil } -func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) @@ -188,7 +188,7 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath f break } entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), + FullPath: weed_util.NewFullPath(string(fullpath), fileName), } // println("list", entry.FullPath, "chunks", len(entry.Chunks)) @@ -211,7 +211,7 @@ func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) return key, partitionId } -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) { +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) { keyPrefix, partitionId = hashToBytes(string(fullpath), dbCount) if len(startFileName) > 0 { keyPrefix = append(keyPrefix, []byte(startFileName)...) diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go index dc94f2ac7..1fe76f8ee 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -2,10 +2,12 @@ package leveldb import ( "context" - "github.com/chrislusf/seaweedfs/weed/filer2" "io/ioutil" "os" "testing" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { @@ -17,7 +19,7 @@ func TestCreateAndFind(t *testing.T) { filer.SetStore(store) filer.DisableDirectoryCache() - fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") ctx := context.Background() @@ -48,14 +50,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -75,7 +77,7 @@ func TestEmptyRoot(t *testing.T) { ctx := context.Background() // checking one upper directory - entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index c9f59d37b..e5b9e8840 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -12,6 +12,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -61,7 +62,7 @@ func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2 return store.InsertEntry(ctx, entry) } -func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) { data, err := store.Client.Get(string(fullpath)).Result() if err == redis.Nil { @@ -83,7 +84,7 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2 return entry, nil } -func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { _, err = store.Client.Del(string(fullpath)).Result() @@ -102,7 +103,7 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath file return nil } -func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) { members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() if err != nil { @@ -110,7 +111,7 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full } for _, fileName := range members { - path := filer2.NewFullPath(string(fullpath), fileName) + path := util.NewFullPath(string(fullpath), fileName) _, err = store.Client.Del(string(path)).Result() if err != nil { return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) @@ -120,7 +121,7 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full return nil } -func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { dirListKey := genDirectoryListKey(string(fullpath)) @@ -158,7 +159,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full // fetch entry meta for _, fileName := range members { - path := filer2.NewFullPath(string(fullpath), fileName) + path := util.NewFullPath(string(fullpath), fileName) entry, err := store.FindEntry(ctx, path) if err != nil { glog.V(0).Infof("list %s : %v", path, err) diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go index 8819070ff..bb24312fd 100644 --- a/weed/filer2/stream.go +++ b/weed/filer2/stream.go @@ -71,13 +71,13 @@ func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks [ } } -func NewChunkStreamReaderFromClient(filerClient FilerClient, chunkViews []*ChunkView) *ChunkStreamReader { +func NewChunkStreamReaderFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView) *ChunkStreamReader { return &ChunkStreamReader{ chunkViews: chunkViews, lookupFileId: func(fileId string) (targetUrl string, err error) { err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - vid := fileIdToVolumeId(fileId) + vid := VolumeId(fileId) resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) @@ -178,10 +178,11 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { return nil } -func fileIdToVolumeId(fileId string) (volumeId string) { - parts := strings.Split(fileId, ",") - if len(parts) != 2 { - return fileId +func VolumeId(fileId string) string { + lastCommaIndex := strings.LastIndex(fileId, ",") + if lastCommaIndex > 0 { + return fileId[:lastCommaIndex] } - return parts[0] + return fileId } + diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 483229b3f..7781533c8 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -6,9 +6,9 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -49,7 +49,7 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { return err } - attr.Inode = filer2.FullPath(dir.Path).AsInode() + attr.Inode = util.FullPath(dir.Path).AsInode() attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0) attr.Crtime = time.Unix(dir.entry.Attributes.Crtime, 0) @@ -86,7 +86,7 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { } func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node { - return dir.wfs.getNode(filer2.NewFullPath(dir.Path, name), func() fs.Node { + return dir.wfs.getNode(util.NewFullPath(dir.Path, name), func() fs.Node { return &File{ Name: name, dir: dir, @@ -97,7 +97,7 @@ func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node { }) } -func (dir *Dir) newDirectory(fullpath filer2.FullPath, entry *filer_pb.Entry) fs.Node { +func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.Node { return dir.wfs.getNode(fullpath, func() fs.Node { return &Dir{Path: string(fullpath), wfs: dir.wfs, entry: entry} }) @@ -139,7 +139,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, } var node fs.Node if request.Entry.IsDirectory { - node = dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), request.Entry) + node = dir.newDirectory(util.NewFullPath(dir.Path, req.Name), request.Entry) return node, nil, nil } @@ -182,7 +182,7 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err }) if err == nil { - node := dir.newDirectory(filer2.NewFullPath(dir.Path, req.Name), newEntry) + node := dir.newDirectory(util.NewFullPath(dir.Path, req.Name), newEntry) return node, nil } @@ -193,12 +193,12 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. glog.V(4).Infof("dir Lookup %s: %s", dir.Path, req.Name) - fullFilePath := filer2.NewFullPath(dir.Path, req.Name) + fullFilePath := util.NewFullPath(dir.Path, req.Name) entry := dir.wfs.cacheGet(fullFilePath) if entry == nil { // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) - entry, err = filer2.GetEntry(dir.wfs, fullFilePath) + entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath) if err != nil { glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT @@ -237,8 +237,8 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { cacheTtl := 5 * time.Minute - readErr := filer2.ReadDirAllEntries(dir.wfs, filer2.FullPath(dir.Path), "", func(entry *filer_pb.Entry, isLast bool) { - fullpath := filer2.NewFullPath(dir.Path, entry.Name) + readErr := filer_pb.ReadDirAllEntries(dir.wfs, util.FullPath(dir.Path), "", func(entry *filer_pb.Entry, isLast bool) { + fullpath := util.NewFullPath(dir.Path, entry.Name) inode := fullpath.AsInode() if entry.IsDirectory { dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir} @@ -269,8 +269,8 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error { - filePath := filer2.NewFullPath(dir.Path, req.Name) - entry, err := filer2.GetEntry(dir.wfs, filePath) + filePath := util.NewFullPath(dir.Path, req.Name) + entry, err := filer_pb.GetEntry(dir.wfs, filePath) if err != nil { return err } @@ -304,7 +304,7 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error { func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error { - dir.wfs.cacheDelete(filer2.NewFullPath(dir.Path, req.Name)) + dir.wfs.cacheDelete(util.NewFullPath(dir.Path, req.Name)) return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { @@ -350,7 +350,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus dir.entry.Attributes.Mtime = req.Mtime.Unix() } - dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) + dir.wfs.cacheDelete(util.FullPath(dir.Path)) return dir.saveEntry() @@ -368,7 +368,7 @@ func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { return err } - dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) + dir.wfs.cacheDelete(util.FullPath(dir.Path)) return dir.saveEntry() @@ -386,7 +386,7 @@ func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) e return err } - dir.wfs.cacheDelete(filer2.FullPath(dir.Path)) + dir.wfs.cacheDelete(util.FullPath(dir.Path)) return dir.saveEntry() @@ -411,12 +411,12 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp func (dir *Dir) Forget() { glog.V(3).Infof("Forget dir %s", dir.Path) - dir.wfs.forgetNode(filer2.FullPath(dir.Path)) + dir.wfs.forgetNode(util.FullPath(dir.Path)) } func (dir *Dir) maybeLoadEntry() error { if dir.entry == nil { - parentDirPath, name := filer2.FullPath(dir.Path).DirAndName() + parentDirPath, name := util.FullPath(dir.Path).DirAndName() entry, err := dir.wfs.maybeLoadEntry(parentDirPath, name) if err != nil { return err @@ -428,7 +428,7 @@ func (dir *Dir) maybeLoadEntry() error { func (dir *Dir) saveEntry() error { - parentDir, name := filer2.FullPath(dir.Path).DirAndName() + parentDir, name := util.FullPath(dir.Path).DirAndName() return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index 1a220a063..9595ea955 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -3,9 +3,9 @@ package filesys import ( "context" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -35,15 +35,15 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector }) if err == nil { - newPath := filer2.NewFullPath(newDir.Path, req.NewName) - oldPath := filer2.NewFullPath(dir.Path, req.OldName) + newPath := util.NewFullPath(newDir.Path, req.NewName) + oldPath := util.NewFullPath(dir.Path, req.OldName) dir.wfs.cacheDelete(newPath) dir.wfs.cacheDelete(oldPath) oldFileNode := dir.wfs.getNode(oldPath, func() fs.Node { return nil }) - newDirNode := dir.wfs.getNode(filer2.FullPath(newDir.Path), func() fs.Node { + newDirNode := dir.wfs.getNode(util.FullPath(newDir.Path), func() fs.Node { return nil }) // fmt.Printf("new path: %v dir: %v node:%+v\n", newPath, newDir.Path, newDirNode) diff --git a/weed/filesys/file.go b/weed/filesys/file.go index adafc54d7..e740f2747 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -10,6 +10,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -36,8 +37,8 @@ type File struct { reader io.ReadSeeker } -func (file *File) fullpath() filer2.FullPath { - return filer2.NewFullPath(file.dir.Path, file.Name) +func (file *File) fullpath() util.FullPath { + return util.NewFullPath(file.dir.Path, file.Name) } func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { @@ -218,7 +219,7 @@ func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { func (file *File) Forget() { glog.V(3).Infof("Forget file %s/%s", file.dir.Path, file.Name) - file.wfs.forgetNode(filer2.NewFullPath(file.dir.Path, file.Name)) + file.wfs.forgetNode(util.NewFullPath(file.dir.Path, file.Name)) } diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 382617384..a7475da56 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -12,10 +12,10 @@ import ( "github.com/karlseguin/ccache" "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -54,7 +54,7 @@ type WFS struct { // contains all open handles, protected by handlesLock handlesLock sync.Mutex handles []*FileHandle - pathToHandleIndex map[filer2.FullPath]int + pathToHandleIndex map[util.FullPath]int bufPool sync.Pool @@ -74,7 +74,7 @@ func NewSeaweedFileSystem(option *Option) *WFS { wfs := &WFS{ option: option, listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)), - pathToHandleIndex: make(map[filer2.FullPath]int), + pathToHandleIndex: make(map[util.FullPath]int), bufPool: sync.Pool{ New: func() interface{} { return make([]byte, option.ChunkSizeLimit) @@ -84,7 +84,7 @@ func NewSeaweedFileSystem(option *Option) *WFS { } wfs.root = &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs} - wfs.getNode(filer2.FullPath(wfs.option.FilerMountRootPath), func() fs.Node { + wfs.getNode(util.FullPath(wfs.option.FilerMountRootPath), func() fs.Node { return wfs.root }) @@ -142,7 +142,7 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand return } -func (wfs *WFS) ReleaseHandle(fullpath filer2.FullPath, handleId fuse.HandleID) { +func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) { wfs.handlesLock.Lock() defer wfs.handlesLock.Unlock() @@ -217,25 +217,25 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. return nil } -func (wfs *WFS) cacheGet(path filer2.FullPath) *filer_pb.Entry { +func (wfs *WFS) cacheGet(path util.FullPath) *filer_pb.Entry { item := wfs.listDirectoryEntriesCache.Get(string(path)) if item != nil && !item.Expired() { return item.Value().(*filer_pb.Entry) } return nil } -func (wfs *WFS) cacheSet(path filer2.FullPath, entry *filer_pb.Entry, ttl time.Duration) { +func (wfs *WFS) cacheSet(path util.FullPath, entry *filer_pb.Entry, ttl time.Duration) { if entry == nil { wfs.listDirectoryEntriesCache.Delete(string(path)) } else { wfs.listDirectoryEntriesCache.Set(string(path), entry, ttl) } } -func (wfs *WFS) cacheDelete(path filer2.FullPath) { +func (wfs *WFS) cacheDelete(path util.FullPath) { wfs.listDirectoryEntriesCache.Delete(string(path)) } -func (wfs *WFS) getNode(fullpath filer2.FullPath, fn func() fs.Node) fs.Node { +func (wfs *WFS) getNode(fullpath util.FullPath, fn func() fs.Node) fs.Node { wfs.nodesLock.Lock() defer wfs.nodesLock.Unlock() @@ -250,7 +250,7 @@ func (wfs *WFS) getNode(fullpath filer2.FullPath, fn func() fs.Node) fs.Node { return node } -func (wfs *WFS) forgetNode(fullpath filer2.FullPath) { +func (wfs *WFS) forgetNode(fullpath util.FullPath) { wfs.nodesLock.Lock() defer wfs.nodesLock.Unlock() diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index af154a7ee..a9848fbe7 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -1,9 +1,9 @@ package filesys import ( - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" ) @@ -107,7 +107,7 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) { - fullpath := filer2.NewFullPath(dir, name) + fullpath := util.NewFullPath(dir, name) entry = wfs.cacheGet(fullpath) if entry != nil { return diff --git a/weed/filer2/filer_client_util.go b/weed/pb/filer_pb/filer_client.pb.go similarity index 57% rename from weed/filer2/filer_client_util.go rename to weed/pb/filer_pb/filer_client.pb.go index 60b4dec18..b2be614af 100644 --- a/weed/filer2/filer_client_util.go +++ b/weed/pb/filer_pb/filer_client.pb.go @@ -1,44 +1,35 @@ -package filer2 +package filer_pb import ( "context" "fmt" "io" "math" - "strings" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) -func VolumeId(fileId string) string { - lastCommaIndex := strings.LastIndex(fileId, ",") - if lastCommaIndex > 0 { - return fileId[:lastCommaIndex] - } - return fileId -} - type FilerClient interface { - WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error + WithFilerClient(fn func(SeaweedFilerClient) error) error AdjustedUrl(hostAndPort string) string } -func GetEntry(filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.Entry, err error) { +func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) { dir, name := fullFilePath.DirAndName() - err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { - request := &filer_pb.LookupDirectoryEntryRequest{ + request := &LookupDirectoryEntryRequest{ Directory: dir, Name: name, } // glog.V(3).Infof("read %s request: %v", fullFilePath, request) - resp, err := filer_pb.LookupEntry(client, request) + resp, err := LookupEntry(client, request) if err != nil { - if err == filer_pb.ErrNotFound { + if err == ErrNotFound { return nil } glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) @@ -57,13 +48,13 @@ func GetEntry(filerClient FilerClient, fullFilePath FullPath) (entry *filer_pb.E return } -func ReadDirAllEntries(filerClient FilerClient, fullDirPath FullPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) { +func ReadDirAllEntries(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn func(entry *Entry, isLast bool)) (err error) { - err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { lastEntryName := "" - request := &filer_pb.ListEntriesRequest{ + request := &ListEntriesRequest{ Directory: string(fullDirPath), Prefix: prefix, StartFromFileName: lastEntryName, @@ -76,7 +67,7 @@ func ReadDirAllEntries(filerClient FilerClient, fullDirPath FullPath, prefix str return fmt.Errorf("list %s: %v", fullDirPath, err) } - var prevEntry *filer_pb.Entry + var prevEntry *Entry for { resp, recvErr := stream.Recv() if recvErr != nil { diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 838c2c441..ffce853b8 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -67,7 +67,7 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - dir, name := filer2.FullPath(key).DirAndName() + dir, name := util.FullPath(key).DirAndName() request := &filer_pb.DeleteEntryRequest{ Directory: dir, @@ -90,7 +90,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - dir, name := filer2.FullPath(key).DirAndName() + dir, name := util.FullPath(key).DirAndName() // look up existing entry lookupRequest := &filer_pb.LookupDirectoryEntryRequest{ @@ -136,7 +136,7 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { - dir, name := filer2.FullPath(key).DirAndName() + dir, name := util.FullPath(key).DirAndName() // read existing entry var existingEntry *filer_pb.Entry diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 2ba2db132..abdfb1e46 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -14,11 +14,12 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { - entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) + entry, err := fs.filer.FindEntry(ctx, util.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) if err == filer_pb.ErrNotFound { return &filer_pb.LookupDirectoryEntryResponse{}, nil } @@ -53,7 +54,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file lastFileName := req.StartFromFileName includeLastFile := req.InclusiveStartFrom for limit > 0 { - entries, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) + entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) if err != nil { return err @@ -136,7 +137,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr resp = &filer_pb.CreateEntryResponse{} - fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))) + fullpath := util.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))) chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) if req.Entry.Attributes == nil { @@ -164,7 +165,7 @@ func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntr func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { fullpath := filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name)) - entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(fullpath)) + entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath)) if err != nil { return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) } @@ -175,7 +176,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) newEntry := &filer2.Entry{ - FullPath: filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))), + FullPath: util.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))), Attr: entry.Attr, Extended: req.Entry.Extended, Chunks: chunks, @@ -218,7 +219,7 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr } func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { - err = fs.filer.DeleteEntryMetaAndData(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) + err = fs.filer.DeleteEntryMetaAndData(ctx, util.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) resp = &filer_pb.DeleteEntryResponse{} if err != nil { resp.Error = err.Error() diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index 0669a26f1..d09885d95 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -3,10 +3,12 @@ package weed_server import ( "context" "fmt" + "path/filepath" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "path/filepath" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) { @@ -18,7 +20,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom return nil, err } - oldParent := filer2.FullPath(filepath.ToSlash(req.OldDirectory)) + oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory)) oldEntry, err := fs.filer.FindEntry(ctx, oldParent.Child(req.OldName)) if err != nil { @@ -27,7 +29,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom } var events MoveEvents - moveErr := fs.moveEntry(ctx, oldParent, oldEntry, filer2.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName, &events) + moveErr := fs.moveEntry(ctx, oldParent, oldEntry, util.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName, &events) if moveErr != nil { fs.filer.RollbackTransaction(ctx) return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, err) @@ -48,7 +50,7 @@ func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.Atom return &filer_pb.AtomicRenameEntryResponse{}, nil } -func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { +func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error { if entry.IsDirectory() { if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil { return err @@ -57,7 +59,7 @@ func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events) } -func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { +func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error { currentDirPath := oldParent.Child(entry.Name()) newDirPath := newParent.Child(newName) @@ -90,7 +92,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer return nil } -func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { +func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error { oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 310fbcec4..b59780632 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -14,6 +14,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) { @@ -24,7 +25,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, path = path[:len(path)-1] } - entry, err := fs.filer.FindEntry(context.Background(), filer2.FullPath(path)) + entry, err := fs.filer.FindEntry(context.Background(), util.FullPath(path)) if err != nil { if path == "/" { fs.listDirectoryHandler(w, r) diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index 87e864559..ae28fc1db 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -6,10 +6,10 @@ import ( "strconv" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) // listDirectoryHandler lists directories and folers under a directory @@ -32,7 +32,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName := r.FormValue("lastFileName") - entries, err := fs.filer.ListDirectoryEntries(context.Background(), filer2.FullPath(path), lastFileName, false, limit) + entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit) if err != nil { glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 5cd174b17..c3cb48ce7 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -169,13 +169,13 @@ func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w path += ret.Name } } - existingEntry, err := fs.filer.FindEntry(ctx, filer2.FullPath(path)) + existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path)) crTime := time.Now() if err == nil && existingEntry != nil { crTime = existingEntry.Crtime } entry := &filer2.Entry{ - FullPath: filer2.FullPath(path), + FullPath: util.FullPath(path), Attr: filer2.Attr{ Mtime: time.Now(), Crtime: crTime, @@ -304,7 +304,7 @@ func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true" skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true" - err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, !skipChunkDeletion) + err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, !skipChunkDeletion) if err != nil { glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error()) httpStatus := http.StatusInternalServerError diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 666004c33..c78efcc52 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -15,6 +15,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, @@ -145,7 +146,7 @@ func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r glog.V(4).Infoln("saving", path) entry := &filer2.Entry{ - FullPath: filer2.FullPath(path), + FullPath: util.FullPath(path), Attr: filer2.Attr{ Mtime: time.Now(), Crtime: time.Now(), diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go index 06670399c..cbcf8a05c 100644 --- a/weed/server/filer_server_handlers_write_cipher.go +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -12,6 +12,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) // handling single chunk POST or PUT upload @@ -67,7 +68,7 @@ func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *ht } entry := &filer2.Entry{ - FullPath: filer2.FullPath(path), + FullPath: util.FullPath(path), Attr: filer2.Attr{ Mtime: time.Now(), Crtime: time.Now(), diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index f617e4a98..9ee49f4ee 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -142,7 +142,7 @@ func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm } return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - dir, name := filer2.FullPath(fullDirPath).DirAndName() + dir, name := util.FullPath(fullDirPath).DirAndName() request := &filer_pb.CreateEntryRequest{ Directory: dir, Entry: &filer_pb.Entry{ @@ -189,7 +189,7 @@ func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, f fs.removeAll(ctx, fullFilePath) } - dir, name := filer2.FullPath(fullFilePath).DirAndName() + dir, name := util.FullPath(fullFilePath).DirAndName() err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ Directory: dir, @@ -254,7 +254,7 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) } else { //_, err = fs.db.Exec(`delete from filesystem where fullFilePath = ?`, fullFilePath) } - dir, name := filer2.FullPath(fullFilePath).DirAndName() + dir, name := util.FullPath(fullFilePath).DirAndName() err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.DeleteEntryRequest{ @@ -311,8 +311,8 @@ func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) return os.ErrExist } - oldDir, oldBaseName := filer2.FullPath(oldName).DirAndName() - newDir, newBaseName := filer2.FullPath(newName).DirAndName() + oldDir, oldBaseName := util.FullPath(oldName).DirAndName() + newDir, newBaseName := util.FullPath(newName).DirAndName() return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { @@ -339,10 +339,10 @@ func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.F return nil, err } - fullpath := filer2.FullPath(fullFilePath) + fullpath := util.FullPath(fullFilePath) var fi FileInfo - entry, err := filer2.GetEntry(fs, fullpath) + entry, err := filer_pb.GetEntry(fs, fullpath) if entry == nil { return nil, os.ErrNotExist } @@ -373,12 +373,12 @@ func (f *WebDavFile) Write(buf []byte) (int, error) { glog.V(2).Infof("WebDavFileSystem.Write %v", f.name) - dir, _ := filer2.FullPath(f.name).DirAndName() + dir, _ := util.FullPath(f.name).DirAndName() var err error ctx := context.Background() if f.entry == nil { - f.entry, err = filer2.GetEntry(f.fs, filer2.FullPath(f.name)) + f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name)) } if f.entry == nil { @@ -483,7 +483,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) { glog.V(2).Infof("WebDavFileSystem.Read %v", f.name) if f.entry == nil { - f.entry, err = filer2.GetEntry(f.fs, filer2.FullPath(f.name)) + f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name)) } if f.entry == nil { return 0, err @@ -521,9 +521,9 @@ func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count) - dir, _ := filer2.FullPath(f.name).DirAndName() + dir, _ := util.FullPath(f.name).DirAndName() - err = filer2.ReadDirAllEntries(f.fs, filer2.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) { + err = filer_pb.ReadDirAllEntries(f.fs, util.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) { fi := FileInfo{ size: int64(filer2.TotalSize(entry.GetChunks())), name: entry.Name, diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 7d2ac8989..1479aed95 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -7,6 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -41,7 +42,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write return fmt.Errorf("%s is a directory", path) } - dir, name := filer2.FullPath(path).DirAndName() + dir, name := util.FullPath(path).DirAndName() return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index ca2f22b57..b7313bebe 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -7,6 +7,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -41,7 +42,7 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer } var blockCount, byteCount uint64 - dir, name := filer2.FullPath(path).DirAndName() + dir, name := util.FullPath(path).DirAndName() blockCount, byteCount, err = duTraverseDirectory(writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name) if name == "" && err == nil { @@ -52,9 +53,9 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer } -func duTraverseDirectory(writer io.Writer, filerClient filer2.FilerClient, dir, name string) (blockCount, byteCount uint64, err error) { +func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir, name string) (blockCount, byteCount uint64, err error) { - err = filer2.ReadDirAllEntries(filerClient, filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { + err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { if entry.IsDirectory { subDir := fmt.Sprintf("%s/%s", dir, entry.Name) if dir == "/" { diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 69ebe1b30..4185d67a8 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -10,6 +10,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -63,10 +64,10 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer path = path + "/" } - dir, name := filer2.FullPath(path).DirAndName() + dir, name := util.FullPath(path).DirAndName() entryCount := 0 - err = filer2.ReadDirAllEntries(commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { + err = filer_pb.ReadDirAllEntries(commandEnv.getFilerClient(filerServer, filerPort), util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { if !showHidden && strings.HasPrefix(entry.Name, ".") { return diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go index cd1ffb6fd..9cbe852c0 100644 --- a/weed/shell/command_fs_meta_cat.go +++ b/weed/shell/command_fs_meta_cat.go @@ -6,8 +6,8 @@ import ( "github.com/golang/protobuf/jsonpb" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -40,7 +40,7 @@ func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.W return err } - dir, name := filer2.FullPath(path).DirAndName() + dir, name := util.FullPath(path).DirAndName() return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go index ed92d8011..a19e9d3ce 100644 --- a/weed/shell/command_fs_meta_load.go +++ b/weed/shell/command_fs_meta_load.go @@ -7,7 +7,6 @@ import ( "github.com/golang/protobuf/proto" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -85,7 +84,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. return err } - fmt.Fprintf(writer, "load %s\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name)) + fmt.Fprintf(writer, "load %s\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name)) if fullEntry.Entry.IsDirectory { dirCount++ diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go index 099e04506..995ea16a2 100644 --- a/weed/shell/command_fs_meta_notify.go +++ b/weed/shell/command_fs_meta_notify.go @@ -4,7 +4,6 @@ import ( "fmt" "io" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -44,7 +43,7 @@ func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer i var dirCount, fileCount uint64 - err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { + err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { if entry.IsDirectory { dirCount++ diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index b51fdd0f6..4314542bd 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -11,7 +11,6 @@ import ( "github.com/golang/protobuf/proto" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -86,7 +85,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. var dirCount, fileCount uint64 - err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(path), func(parentPath filer2.FullPath, entry *filer_pb.Entry) { + err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { protoMessage := &filer_pb.FullEntry{ Dir: string(parentPath), @@ -125,7 +124,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. return err } -func doTraverseBFS(writer io.Writer, filerClient filer2.FilerClient, parentPath filer2.FullPath, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { +func doTraverseBFS(writer io.Writer, filerClient filer_pb.FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *filer_pb.Entry)) (err error) { K := 5 @@ -146,7 +145,7 @@ func doTraverseBFS(writer io.Writer, filerClient filer2.FilerClient, parentPath time.Sleep(329 * time.Millisecond) continue } - dir := t.(filer2.FullPath) + dir := t.(util.FullPath) processErr := processOneDirectory(writer, filerClient, dir, queue, &jobQueueWg, fn) if processErr != nil { err = processErr @@ -160,9 +159,9 @@ func doTraverseBFS(writer io.Writer, filerClient filer2.FilerClient, parentPath return } -func processOneDirectory(writer io.Writer, filerClient filer2.FilerClient, parentPath filer2.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath filer2.FullPath, entry *filer_pb.Entry)) (err error) { +func processOneDirectory(writer io.Writer, filerClient filer_pb.FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *filer_pb.Entry)) (err error) { - return filer2.ReadDirAllEntries(filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) { + return filer_pb.ReadDirAllEntries(filerClient, parentPath, "", func(entry *filer_pb.Entry, isLast bool) { fn(parentPath, entry) @@ -172,7 +171,7 @@ func processOneDirectory(writer io.Writer, filerClient filer2.FilerClient, paren subDir = "/" + entry.Name } jobQueueWg.Add(1) - queue.Enqueue(filer2.FullPath(subDir)) + queue.Enqueue(util.FullPath(subDir)) } }) diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index 85275058e..148ac6e2f 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -6,8 +6,8 @@ import ( "io" "path/filepath" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -47,9 +47,9 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer return err } - sourceDir, sourceName := filer2.FullPath(sourcePath).DirAndName() + sourceDir, sourceName := util.FullPath(sourcePath).DirAndName() - destinationDir, destinationName := filer2.FullPath(destinationPath).DirAndName() + destinationDir, destinationName := util.FullPath(destinationPath).DirAndName() return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { @@ -82,7 +82,7 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer _, err = client.AtomicRenameEntry(context.Background(), request) - fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, filer2.NewFullPath(targetDir, targetName)) + fmt.Fprintf(writer, "move: %s => %s\n", sourcePath, util.NewFullPath(targetDir, targetName)) return err diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 04530571c..d1f639cff 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -5,8 +5,8 @@ import ( "io" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -34,9 +34,9 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ return err } - dir, name := filer2.FullPath(path).DirAndName() + dir, name := util.FullPath(path).DirAndName() - dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv.getFilerClient(filerServer, filerPort), filer2.FullPath(dir), name, newPrefix(), -1) + dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv.getFilerClient(filerServer, filerPort), util.FullPath(dir), name, newPrefix(), -1) if terr == nil { fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) @@ -46,11 +46,11 @@ func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writ } -func treeTraverseDirectory(writer io.Writer, filerClient filer2.FilerClient, dir filer2.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { +func treeTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir util.FullPath, name string, prefix *Prefix, level int) (directoryCount, fileCount int64, err error) { prefix.addMarker(level) - err = filer2.ReadDirAllEntries(filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { + err = filer_pb.ReadDirAllEntries(filerClient, dir, name, func(entry *filer_pb.Entry, isLast bool) { if level < 0 && name != "" { if entry.Name != name { return diff --git a/weed/shell/commands.go b/weed/shell/commands.go index b8832ad93..7ca631ab3 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -10,8 +10,8 @@ import ( "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" ) @@ -66,7 +66,7 @@ func (ce *CommandEnv) isDirectory(filerServer string, filerPort int64, path stri func (ce *CommandEnv) checkDirectory(filerServer string, filerPort int64, path string) error { - dir, name := filer2.FullPath(path).DirAndName() + dir, name := util.FullPath(path).DirAndName() return ce.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { diff --git a/weed/filer2/fullpath.go b/weed/util/fullpath.go similarity index 85% rename from weed/filer2/fullpath.go rename to weed/util/fullpath.go index 133069f93..4a2e2854f 100644 --- a/weed/filer2/fullpath.go +++ b/weed/util/fullpath.go @@ -1,10 +1,8 @@ -package filer2 +package util import ( "path/filepath" "strings" - - "github.com/chrislusf/seaweedfs/weed/util" ) type FullPath string @@ -38,5 +36,5 @@ func (fp FullPath) Child(name string) FullPath { } func (fp FullPath) AsInode() uint64 { - return uint64(util.HashStringToLong(string(fp))) + return uint64(HashStringToLong(string(fp))) } From 654a69ff52a0625db174d7851463e3cc464ffe5a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 00:06:24 -0700 Subject: [PATCH 0302/2432] refactoring --- .../{filer_client.pb.go => filer_client.go} | 28 +++++++++++++++++++ weed/s3api/filer_util.go | 24 +--------------- weed/s3api/s3api_handlers.go | 4 +++ 3 files changed, 33 insertions(+), 23 deletions(-) rename weed/pb/filer_pb/{filer_client.pb.go => filer_client.go} (72%) diff --git a/weed/pb/filer_pb/filer_client.pb.go b/weed/pb/filer_pb/filer_client.go similarity index 72% rename from weed/pb/filer_pb/filer_client.pb.go rename to weed/pb/filer_pb/filer_client.go index b2be614af..100e997b2 100644 --- a/weed/pb/filer_pb/filer_client.pb.go +++ b/weed/pb/filer_pb/filer_client.go @@ -92,3 +92,31 @@ func ReadDirAllEntries(filerClient FilerClient, fullDirPath util.FullPath, prefi return } + +func Exists(filerClient FilerClient, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { + + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &LookupDirectoryEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + } + + glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) + resp, err := LookupEntry(client, request) + if err != nil { + if err == ErrNotFound { + exists = false + return nil + } + glog.V(0).Infof("exists entry %v: %v", request, err) + return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) + } + + exists = resp.Entry.IsDirectory == isDirectory + + return nil + }) + + return +} diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index be985c893..2e738af50 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -153,30 +153,8 @@ func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath strin func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: parentDirectoryPath, - Name: entryName, - } + return filer_pb.Exists(s3a, parentDirectoryPath, entryName, isDirectory) - glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) - resp, err := filer_pb.LookupEntry(client, request) - if err != nil { - if err == filer_pb.ErrNotFound { - exists = false - return nil - } - glog.V(0).Infof("exists entry %v: %v", request, err) - return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) - } - - exists = resp.Entry.IsDirectory == isDirectory - - return nil - }) - - return } func objectKey(key *string) *string { diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index d850cb088..05dd4b823 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -46,6 +46,10 @@ func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) err }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) } +func (s3a *S3ApiServer) AdjustedUrl(hostAndPort string) string { + return hostAndPort +} + // If none of the http routes match respond with MethodNotAllowed func notFoundHandler(w http.ResponseWriter, r *http.Request) { From c07bcd5065391441cdc97c05975e447999bab4b1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 00:30:02 -0700 Subject: [PATCH 0303/2432] refactoring --- weed/pb/filer_pb/filer_client.go | 95 ++++++++++++++++++++++++++-- weed/s3api/filer_multipart.go | 4 +- weed/s3api/filer_util.go | 98 ++--------------------------- weed/s3api/s3api_bucket_handlers.go | 6 -- 4 files changed, 97 insertions(+), 106 deletions(-) diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go index 100e997b2..1a92b452d 100644 --- a/weed/pb/filer_pb/filer_client.go +++ b/weed/pb/filer_pb/filer_client.go @@ -5,11 +5,18 @@ import ( "fmt" "io" "math" + "os" + "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/util" ) +var ( + OS_UID = uint32(os.Getuid()) + OS_GID = uint32(os.Getgid()) +) + type FilerClient interface { WithFilerClient(fn func(SeaweedFilerClient) error) error AdjustedUrl(hostAndPort string) string @@ -50,15 +57,26 @@ func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry func ReadDirAllEntries(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn func(entry *Entry, isLast bool)) (err error) { - err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + return doList(filerClient, fullDirPath, prefix, fn, "", false, math.MaxUint32) + +} + +func List(filerClient FilerClient, parentDirectoryPath, prefix string, fn func(entry *Entry, isLast bool), startFrom string, inclusive bool, limit uint32) (err error) { + + return doList(filerClient, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit) + +} - lastEntryName := "" +func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn func(entry *Entry, isLast bool), startFrom string, inclusive bool, limit uint32) (err error) { + + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { request := &ListEntriesRequest{ - Directory: string(fullDirPath), - Prefix: prefix, - StartFromFileName: lastEntryName, - Limit: math.MaxUint32, + Directory: string(fullDirPath), + Prefix: prefix, + StartFromFileName: startFrom, + Limit: limit, + InclusiveStartFrom: inclusive, } glog.V(3).Infof("read directory: %v", request) @@ -120,3 +138,68 @@ func Exists(filerClient FilerClient, parentDirectoryPath string, entryName strin return } + +func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + entry := &Entry{ + Name: dirName, + IsDirectory: true, + Attributes: &FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0777 | os.ModeDir), + Uid: OS_UID, + Gid: OS_GID, + }, + } + + if fn != nil { + fn(entry) + } + + request := &CreateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(1).Infof("mkdir: %v", request) + if err := CreateEntry(client, request); err != nil { + glog.V(0).Infof("mkdir %v: %v", request, err) + return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) + } + + return nil + }) +} + +func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + entry := &Entry{ + Name: fileName, + IsDirectory: false, + Attributes: &FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0770), + Uid: OS_UID, + Gid: OS_GID, + }, + Chunks: chunks, + } + + request := &CreateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) + if err := CreateEntry(client, request); err != nil { + glog.V(0).Infof("create file %v:%v", request, err) + return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) + } + + return nil + }) +} diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index 1350fb18e..e81461dd2 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -155,7 +155,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput }, } - entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) + entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, uint32(*input.MaxUploads)) if err != nil { glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err) return @@ -190,7 +190,7 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP }, } - entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts)) + entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts)) if err != nil { glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 2e738af50..51249cf39 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -3,115 +3,29 @@ package s3api import ( "context" "fmt" - "io" - "os" "strings" - "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - entry := &filer_pb.Entry{ - Name: dirName, - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(0777 | os.ModeDir), - Uid: OS_UID, - Gid: OS_GID, - }, - } - - if fn != nil { - fn(entry) - } - - request := &filer_pb.CreateEntryRequest{ - Directory: parentDirectoryPath, - Entry: entry, - } - glog.V(1).Infof("mkdir: %v", request) - if err := filer_pb.CreateEntry(client, request); err != nil { - glog.V(0).Infof("mkdir %v: %v", request, err) - return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) - } + return filer_pb.Mkdir(s3a, parentDirectoryPath, dirName, fn) - return nil - }) } func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - entry := &filer_pb.Entry{ - Name: fileName, - IsDirectory: false, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(0770), - Uid: OS_UID, - Gid: OS_GID, - }, - Chunks: chunks, - } - - request := &filer_pb.CreateEntryRequest{ - Directory: parentDirectoryPath, - Entry: entry, - } - glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) - if err := filer_pb.CreateEntry(client, request); err != nil { - glog.V(0).Infof("create file %v:%v", request, err) - return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) - } + return filer_pb.MkFile(s3a, parentDirectoryPath, fileName, chunks) - return nil - }) } -func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { - - err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { +func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit uint32) (entries []*filer_pb.Entry, err error) { - request := &filer_pb.ListEntriesRequest{ - Directory: parentDirectoryPath, - Prefix: prefix, - StartFromFileName: startFrom, - InclusiveStartFrom: inclusive, - Limit: uint32(limit), - } - - glog.V(4).Infof("read directory: %v", request) - stream, err := client.ListEntries(context.Background(), request) - if err != nil { - glog.V(0).Infof("read directory %v: %v", request, err) - return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err) - } - - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - break - } else { - return recvErr - } - } - - entries = append(entries, resp.Entry) - - } - - return nil - }) + err = filer_pb.List(s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLast bool) { + entries = append(entries, entry) + }, startFrom, inclusive, limit) return diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 02a01e74f..f1bfb2156 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -6,7 +6,6 @@ import ( "fmt" "math" "net/http" - "os" "time" "github.com/aws/aws-sdk-go/aws" @@ -17,11 +16,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) -var ( - OS_UID = uint32(os.Getuid()) - OS_GID = uint32(os.Getgid()) -) - type ListAllMyBucketsResult struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` Owner *s3.Owner From 7f1e3c843ddc8d6880985f0a32f37cfb19b27c3c Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 01:14:21 -0700 Subject: [PATCH 0304/2432] refactoring --- weed/pb/filer_pb/filer_client.go | 18 ++++++++++ weed/shell/command_bucket_delete.go | 29 ++++----------- weed/shell/command_bucket_list.go | 56 ++++++++++++++--------------- 3 files changed, 52 insertions(+), 51 deletions(-) diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go index 1a92b452d..fd88d3d77 100644 --- a/weed/pb/filer_pb/filer_client.go +++ b/weed/pb/filer_pb/filer_client.go @@ -203,3 +203,21 @@ func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string return nil }) } + +func Remove(filerClient FilerClient, parentDirectoryPath string, name string, isDeleteData, isRecursive, ignoreRecursiveErr bool) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + if _, err := client.DeleteEntry(context.Background(), &DeleteEntryRequest{ + Directory: parentDirectoryPath, + Name: name, + IsDeleteData: isDeleteData, + IsRecursive: isRecursive, + IgnoreRecursiveError: ignoreRecursiveErr, + }); err != nil { + return err + } + + return nil + + }) +} diff --git a/weed/shell/command_bucket_delete.go b/weed/shell/command_bucket_delete.go index c57ce7221..509b3c1de 100644 --- a/weed/shell/command_bucket_delete.go +++ b/weed/shell/command_bucket_delete.go @@ -1,7 +1,6 @@ package shell import ( - "context" "flag" "fmt" "io" @@ -44,28 +43,14 @@ func (c *commandBucketDelete) Do(args []string, commandEnv *CommandEnv, writer i return parseErr } - err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + filerClient := commandEnv.getFilerClient(filerServer, filerPort) - resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) - if err != nil { - return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) - } - filerBucketsPath := resp.DirBuckets - - if _, err := client.DeleteEntry(context.Background(), &filer_pb.DeleteEntryRequest{ - Directory: filerBucketsPath, - Name: *bucketName, - IsDeleteData: false, - IsRecursive: true, - IgnoreRecursiveError: true, - }); err != nil { - return err - } - - return nil - - }) + var filerBucketsPath string + filerBucketsPath, err = readFilerBucketsPath(filerClient) + if err != nil { + return fmt.Errorf("read buckets: %v", err) + } - return err + return filer_pb.Remove(filerClient, filerBucketsPath, *bucketName, false, true, true) } diff --git a/weed/shell/command_bucket_list.go b/weed/shell/command_bucket_list.go index 5eb5972ce..486d40fba 100644 --- a/weed/shell/command_bucket_list.go +++ b/weed/shell/command_bucket_list.go @@ -39,43 +39,41 @@ func (c *commandBucketList) Do(args []string, commandEnv *CommandEnv, writer io. return parseErr } - err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + filerClient := commandEnv.getFilerClient(filerServer, filerPort) - resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) - if err != nil { - return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) - } - filerBucketsPath := resp.DirBuckets + var filerBucketsPath string + filerBucketsPath, err = readFilerBucketsPath(filerClient) + if err != nil { + return fmt.Errorf("read buckets: %v", err) + } - stream, err := client.ListEntries(context.Background(), &filer_pb.ListEntriesRequest{ - Directory: filerBucketsPath, - Limit: math.MaxUint32, - }) - if err != nil { - return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err) + err = filer_pb.List(filerClient, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) { + if entry.Attributes.Replication == "" || entry.Attributes.Replication == "000" { + fmt.Fprintf(writer, " %s\n", entry.Name) + } else { + fmt.Fprintf(writer, " %s\t\t\treplication: %s\n", entry.Name, entry.Attributes.Replication) } + }, "", false, math.MaxUint32) + if err != nil { + return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err) + } + + return err - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - break - } else { - return recvErr - } - } - - if resp.Entry.Attributes.Replication == "" || resp.Entry.Attributes.Replication == "000" { - fmt.Fprintf(writer, " %s\n", resp.Entry.Name) - } else { - fmt.Fprintf(writer, " %s\t\t\treplication: %s\n", resp.Entry.Name, resp.Entry.Attributes.Replication) - } +} + +func readFilerBucketsPath(filerClient filer_pb.FilerClient) (filerBucketsPath string, err error) { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer configuration: %v", err) } + filerBucketsPath = resp.DirBuckets return nil }) - return err - + return filerBucketsPath, err } From a5aa8be19bbf9d0ef9e6668334a0521618739f4a Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 01:14:42 -0700 Subject: [PATCH 0305/2432] Update webdav_server.go --- weed/server/webdav_server.go | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 9ee49f4ee..4acff454b 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -255,23 +255,9 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) //_, err = fs.db.Exec(`delete from filesystem where fullFilePath = ?`, fullFilePath) } dir, name := util.FullPath(fullFilePath).DirAndName() - err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - request := &filer_pb.DeleteEntryRequest{ - Directory: dir, - Name: name, - IsDeleteData: true, - } - - glog.V(3).Infof("removing entry: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - return fmt.Errorf("remove %s: %v", fullFilePath, err) - } + return filer_pb.Remove(fs, dir, name, true, true, true) - return nil - }) - return err } func (fs *WebDavFileSystem) RemoveAll(ctx context.Context, name string) error { From f84c288852ba43db56729bda33ada39c7b996d05 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 01:14:55 -0700 Subject: [PATCH 0306/2432] purge --- weed/server/webdav_server.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index 4acff454b..fb0526b97 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -249,11 +249,6 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) return err } - if fi.IsDir() { - //_, err = fs.db.Exec(`delete from filesystem where fullFilePath like $1 escape '\'`, strings.Replace(fullFilePath, `%`, `\%`, -1)+`%`) - } else { - //_, err = fs.db.Exec(`delete from filesystem where fullFilePath = ?`, fullFilePath) - } dir, name := util.FullPath(fullFilePath).DirAndName() return filer_pb.Remove(fs, dir, name, true, true, true) From cddcb5cafea14e7d5e7105606d88211de82d36a1 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 01:25:23 -0700 Subject: [PATCH 0307/2432] same booleans as before --- weed/server/webdav_server.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go index fb0526b97..350d61721 100644 --- a/weed/server/webdav_server.go +++ b/weed/server/webdav_server.go @@ -244,14 +244,9 @@ func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) return err } - fi, err := fs.stat(ctx, fullFilePath) - if err != nil { - return err - } - dir, name := util.FullPath(fullFilePath).DirAndName() - return filer_pb.Remove(fs, dir, name, true, true, true) + return filer_pb.Remove(fs, dir, name, true, false, false) } From 45ee3736aa10448a8a0d108f582e6064f42f0f59 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 01:25:38 -0700 Subject: [PATCH 0308/2432] refactoring --- weed/filesys/dir.go | 47 ++++++++++++++------------------------------- 1 file changed, 14 insertions(+), 33 deletions(-) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 7781533c8..8dae41324 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -282,23 +282,14 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error { dir.wfs.cacheDelete(filePath) - return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.DeleteEntryRequest{ - Directory: dir.Path, - Name: req.Name, - IsDeleteData: false, - } - - glog.V(3).Infof("remove file: %v", request) - _, err := client.DeleteEntry(context.Background(), request) - if err != nil { - glog.V(3).Infof("not found remove file %s/%s: %v", dir.Path, req.Name, err) - return fuse.ENOENT - } + glog.V(3).Infof("remove file: %v", req) + err = filer_pb.Remove(dir.wfs, dir.Path, req.Name, false, false, false) + if err != nil { + glog.V(3).Infof("not found remove file %s/%s: %v", dir.Path, req.Name, err) + return fuse.ENOENT + } - return nil - }) + return nil } @@ -306,23 +297,13 @@ func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error { dir.wfs.cacheDelete(util.NewFullPath(dir.Path, req.Name)) - return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.DeleteEntryRequest{ - Directory: dir.Path, - Name: req.Name, - IsDeleteData: true, - } - - glog.V(3).Infof("remove directory entry: %v", request) - _, err := client.DeleteEntry(context.Background(), request) - if err != nil { - glog.V(3).Infof("not found remove %s/%s: %v", dir.Path, req.Name, err) - return fuse.ENOENT - } - - return nil - }) + glog.V(3).Infof("remove directory entry: %v", req) + err := filer_pb.Remove(dir.wfs, dir.Path, req.Name, true, false, false) + if err != nil { + glog.V(3).Infof("not found remove %s/%s: %v", dir.Path, req.Name, err) + return fuse.ENOENT + } + return nil } From b97768c51ccfa39aafa0d82cf71a93d6146c4b75 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 01:30:22 -0700 Subject: [PATCH 0309/2432] refactoring --- .../replication/sink/filersink/fetch_write.go | 14 +++------ weed/replication/sink/filersink/filer_sink.go | 31 +++++++------------ 2 files changed, 15 insertions(+), 30 deletions(-) diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 74f3a72bb..07b091073 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -3,7 +3,6 @@ package filersink import ( "context" "fmt" - "strings" "sync" "google.golang.org/grpc" @@ -69,7 +68,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) var host string var auth security.EncodedJwt - if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -114,7 +113,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) return } -func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +func (fs *FilerSink) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) @@ -122,11 +121,6 @@ func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) }, fs.grpcAddress, fs.grpcDialOption) } - -func volumeId(fileId string) string { - lastCommaIndex := strings.LastIndex(fileId, ",") - if lastCommaIndex > 0 { - return fileId[:lastCommaIndex] - } - return fileId +func (fs *FilerSink) AdjustedUrl(hostAndPort string) string { + return hostAndPort } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index ffce853b8..5f055f9d1 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -65,30 +65,21 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, } func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { - return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - dir, name := util.FullPath(key).DirAndName() - - request := &filer_pb.DeleteEntryRequest{ - Directory: dir, - Name: name, - IsDeleteData: deleteIncludeChunks, - } - - glog.V(1).Infof("delete entry: %v", request) - _, err := client.DeleteEntry(context.Background(), request) - if err != nil { - glog.V(0).Infof("delete entry %s: %v", key, err) - return fmt.Errorf("delete entry %s: %v", key, err) - } + dir, name := util.FullPath(key).DirAndName() - return nil - }) + glog.V(1).Infof("delete entry: %v", key) + err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, false, false) + if err != nil { + glog.V(0).Infof("delete entry %s: %v", key, err) + return fmt.Errorf("delete entry %s: %v", key, err) + } + return nil } func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { - return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { dir, name := util.FullPath(key).DirAndName() @@ -140,7 +131,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent // read existing entry var existingEntry *filer_pb.Entry - err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -192,7 +183,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent } // save updated meta data - return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: newParentPath, From daeb18be3060848214da34250c11062a6588e250 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 01:45:43 -0700 Subject: [PATCH 0310/2432] scaffold: fix master.toml typo fix https://github.com/chrislusf/seaweedfs/issues/1247 --- weed/command/scaffold.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index bcb4fc31e..b26ef758a 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -389,10 +389,10 @@ sequencer_etcd_urls = "http://127.0.0.1:2379" # 010 and 001 has two copies, count_2 # 011 has only 3 copies, count_3 [master.volume_growth] -count_1 = 7 # create 1 x 7 = 7 actual volumes -count_2 = 6 # create 2 x 6 = 12 actual volumes -count_3 = 3 # create 3 x 3 = 9 actual volumes -count_other = 1 # create n x 1 = n actual volumes +copy_1 = 7 # create 1 x 7 = 7 actual volumes +copy_2 = 6 # create 2 x 6 = 12 actual volumes +copy_3 = 3 # create 3 x 3 = 9 actual volumes +copy_other = 1 # create n x 1 = n actual volumes ` ) From 40601953bfb6cfc0d27aee6d5ce07c69eccc48ca Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 02:54:49 -0700 Subject: [PATCH 0311/2432] comments --- weed/command/scaffold.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index b26ef758a..9f119a638 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -385,9 +385,9 @@ sequencer_etcd_urls = "http://127.0.0.1:2379" # create this number of logical volumes if no more writable volumes # count_x means how many copies of data. # e.g.: -# 000 has only one copy, count_1 -# 010 and 001 has two copies, count_2 -# 011 has only 3 copies, count_3 +# 000 has only one copy, copy_1 +# 010 and 001 has two copies, copy_2 +# 011 has only 3 copies, copy_3 [master.volume_growth] copy_1 = 7 # create 1 x 7 = 7 actual volumes copy_2 = 6 # create 2 x 6 = 12 actual volumes From d151185b7e5c7912afc38a36c163ef87eedcde3d Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 20:46:17 -0700 Subject: [PATCH 0312/2432] shell: desupport filer url in the arguments --- weed/shell/command_fs_cat.go | 1 - weed/shell/command_fs_cd.go | 6 +++--- weed/shell/command_fs_du.go | 6 +++--- weed/shell/command_fs_ls.go | 3 --- weed/shell/command_fs_meta_cat.go | 2 -- weed/shell/command_fs_tree.go | 3 ++- weed/shell/commands.go | 5 +++-- 7 files changed, 11 insertions(+), 15 deletions(-) diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index 1479aed95..b1d4eea14 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -25,7 +25,6 @@ func (c *commandFsCat) Help() string { return `stream the file content on to the screen fs.cat /dir/file_name - fs.cat http://:/dir/file_name ` } diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go index df42cd516..377fd40f7 100644 --- a/weed/shell/command_fs_cd.go +++ b/weed/shell/command_fs_cd.go @@ -16,14 +16,14 @@ func (c *commandFsCd) Name() string { } func (c *commandFsCd) Help() string { - return `change directory to http://:/dir/ + return `change directory to a directory /path/to/dir The full path can be too long to type. For example, - fs.ls http://:/some/path/to/file_name + fs.ls /some/path/to/file_name can be simplified as - fs.cd http://:/some/path + fs.cd /some/path fs.ls to/file_name ` } diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index b7313bebe..0372ba95f 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -24,9 +24,9 @@ func (c *commandFsDu) Name() string { func (c *commandFsDu) Help() string { return `show disk usage - fs.du http://:/dir - fs.du http://:/dir/file_name - fs.du http://:/dir/file_prefix + fs.du /dir + fs.du /dir/file_name + fs.du /dir/file_prefix ` } diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 4185d67a8..66adf057e 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -30,9 +30,6 @@ func (c *commandFsLs) Help() string { fs.ls [-l] [-a] /dir/ fs.ls [-l] [-a] /dir/file_name fs.ls [-l] [-a] /dir/file_prefix - fs.ls [-l] [-a] http://:/dir/ - fs.ls [-l] [-a] http://:/dir/file_name - fs.ls [-l] [-a] http://:/dir/file_prefix ` } diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go index 9cbe852c0..cbbca746c 100644 --- a/weed/shell/command_fs_meta_cat.go +++ b/weed/shell/command_fs_meta_cat.go @@ -26,8 +26,6 @@ func (c *commandFsMetaCat) Help() string { fs.meta.cat /dir/ fs.meta.cat /dir/file_name - fs.meta.cat http://:/dir/ - fs.meta.cat http://:/dir/file_name ` } diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index d1f639cff..0982082db 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -23,7 +23,8 @@ func (c *commandFsTree) Name() string { func (c *commandFsTree) Help() string { return `recursively list all files under a directory - fs.tree http://:/dir/ + fs.tree /some/dir + ` } diff --git a/weed/shell/commands.go b/weed/shell/commands.go index 7ca631ab3..660929ec7 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -50,7 +50,8 @@ func NewCommandEnv(options ShellOptions) *CommandEnv { func (ce *CommandEnv) parseUrl(input string) (filerServer string, filerPort int64, path string, err error) { if strings.HasPrefix(input, "http") { - return parseFilerUrl(input) + err = fmt.Errorf("http://: prefix is not supported any more") + return } if !strings.HasPrefix(input, "/") { input = filepath.ToSlash(filepath.Join(ce.option.Directory, input)) @@ -101,7 +102,7 @@ func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path } path = u.Path } else { - err = fmt.Errorf("path should have full url http://:/path/to/dirOrFile : %s", entryPath) + err = fmt.Errorf("path should have full url /path/to/dirOrFile : %s", entryPath) } return } From e666aeece2778812a4a9d3fc4daaaac86fe4a412 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 21:26:15 -0700 Subject: [PATCH 0313/2432] simplify parsing filer host and port --- weed/shell/command_bucket_create.go | 9 ++------- weed/shell/command_bucket_delete.go | 8 +++----- weed/shell/command_bucket_list.go | 8 +++----- weed/shell/command_fs_cat.go | 8 +++----- weed/shell/command_fs_cd.go | 10 ++-------- weed/shell/command_fs_du.go | 28 ++++++---------------------- weed/shell/command_fs_ls.go | 8 +++----- weed/shell/command_fs_meta_cat.go | 6 ++---- weed/shell/command_fs_meta_load.go | 9 ++------- weed/shell/command_fs_meta_notify.go | 4 ++-- weed/shell/command_fs_meta_save.go | 8 ++++---- weed/shell/command_fs_mv.go | 6 +++--- weed/shell/command_fs_tree.go | 4 ++-- weed/shell/commands.go | 27 +++++++-------------------- 14 files changed, 44 insertions(+), 99 deletions(-) diff --git a/weed/shell/command_bucket_create.go b/weed/shell/command_bucket_create.go index 3546528aa..52d96e4c3 100644 --- a/weed/shell/command_bucket_create.go +++ b/weed/shell/command_bucket_create.go @@ -43,16 +43,11 @@ func (c *commandBucketCreate) Do(args []string, commandEnv *CommandEnv, writer i return fmt.Errorf("empty bucket name") } - filerServer, filerPort, _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) - if parseErr != nil { - return parseErr - } - - err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { - return fmt.Errorf("get filer %s:%d configuration: %v", filerServer, filerPort, err) + return fmt.Errorf("get filer configuration: %v", err) } filerBucketsPath := resp.DirBuckets diff --git a/weed/shell/command_bucket_delete.go b/weed/shell/command_bucket_delete.go index 509b3c1de..07c2e74ac 100644 --- a/weed/shell/command_bucket_delete.go +++ b/weed/shell/command_bucket_delete.go @@ -38,19 +38,17 @@ func (c *commandBucketDelete) Do(args []string, commandEnv *CommandEnv, writer i return fmt.Errorf("empty bucket name") } - filerServer, filerPort, _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) if parseErr != nil { return parseErr } - filerClient := commandEnv.getFilerClient(filerServer, filerPort) - var filerBucketsPath string - filerBucketsPath, err = readFilerBucketsPath(filerClient) + filerBucketsPath, err = readFilerBucketsPath(commandEnv) if err != nil { return fmt.Errorf("read buckets: %v", err) } - return filer_pb.Remove(filerClient, filerBucketsPath, *bucketName, false, true, true) + return filer_pb.Remove(commandEnv, filerBucketsPath, *bucketName, false, true, true) } diff --git a/weed/shell/command_bucket_list.go b/weed/shell/command_bucket_list.go index 486d40fba..b982ff646 100644 --- a/weed/shell/command_bucket_list.go +++ b/weed/shell/command_bucket_list.go @@ -34,20 +34,18 @@ func (c *commandBucketList) Do(args []string, commandEnv *CommandEnv, writer io. return nil } - filerServer, filerPort, _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) if parseErr != nil { return parseErr } - filerClient := commandEnv.getFilerClient(filerServer, filerPort) - var filerBucketsPath string - filerBucketsPath, err = readFilerBucketsPath(filerClient) + filerBucketsPath, err = readFilerBucketsPath(commandEnv) if err != nil { return fmt.Errorf("read buckets: %v", err) } - err = filer_pb.List(filerClient, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) { + err = filer_pb.List(commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) { if entry.Attributes.Replication == "" || entry.Attributes.Replication == "000" { fmt.Fprintf(writer, " %s\n", entry.Name) } else { diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go index b1d4eea14..7177d8ac3 100644 --- a/weed/shell/command_fs_cat.go +++ b/weed/shell/command_fs_cat.go @@ -30,20 +30,18 @@ func (c *commandFsCat) Help() string { func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - input := findInputDirectory(args) - - filerServer, filerPort, path, err := commandEnv.parseUrl(input) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } - if commandEnv.isDirectory(filerServer, filerPort, path) { + if commandEnv.isDirectory(path) { return fmt.Errorf("%s is a directory", path) } dir, name := util.FullPath(path).DirAndName() - return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go index 377fd40f7..2cc28f7a2 100644 --- a/weed/shell/command_fs_cd.go +++ b/weed/shell/command_fs_cd.go @@ -30,25 +30,19 @@ func (c *commandFsCd) Help() string { func (c *commandFsCd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - input := findInputDirectory(args) - - filerServer, filerPort, path, err := commandEnv.parseUrl(input) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } if path == "/" { - commandEnv.option.FilerHost = filerServer - commandEnv.option.FilerPort = filerPort commandEnv.option.Directory = "/" return nil } - err = commandEnv.checkDirectory(filerServer, filerPort, path) + err = commandEnv.checkDirectory(path) if err == nil { - commandEnv.option.FilerHost = filerServer - commandEnv.option.FilerPort = filerPort commandEnv.option.Directory = path } diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index 0372ba95f..f3d479614 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -32,18 +32,18 @@ func (c *commandFsDu) Help() string { func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } - if commandEnv.isDirectory(filerServer, filerPort, path) { + if commandEnv.isDirectory(path) { path = path + "/" } var blockCount, byteCount uint64 dir, name := util.FullPath(path).DirAndName() - blockCount, byteCount, err = duTraverseDirectory(writer, commandEnv.getFilerClient(filerServer, filerPort), dir, name) + blockCount, byteCount, err = duTraverseDirectory(writer, commandEnv, dir, name) if name == "" && err == nil { fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) @@ -78,29 +78,13 @@ func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir return } -func (env *CommandEnv) withFilerClient(filerServer string, filerPort int64, fn func(filer_pb.SeaweedFilerClient) error) error { +func (env *CommandEnv) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - filerGrpcAddress := fmt.Sprintf("%s:%d", filerServer, filerPort+10000) + filerGrpcAddress := fmt.Sprintf("%s:%d", env.option.FilerHost, env.option.FilerPort+10000) return pb.WithGrpcFilerClient(filerGrpcAddress, env.option.GrpcDialOption, fn) } -type commandFilerClient struct { - env *CommandEnv - filerServer string - filerPort int64 -} - -func (env *CommandEnv) getFilerClient(filerServer string, filerPort int64) *commandFilerClient { - return &commandFilerClient{ - env: env, - filerServer: filerServer, - filerPort: filerPort, - } -} -func (c *commandFilerClient) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return c.env.withFilerClient(c.filerServer, c.filerPort, fn) -} -func (c *commandFilerClient) AdjustedUrl(hostAndPort string) string { +func (env *CommandEnv) AdjustedUrl(hostAndPort string) string { return hostAndPort } diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go index 66adf057e..be531e980 100644 --- a/weed/shell/command_fs_ls.go +++ b/weed/shell/command_fs_ls.go @@ -50,21 +50,19 @@ func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer } } - input := findInputDirectory(args) - - filerServer, filerPort, path, err := commandEnv.parseUrl(input) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } - if commandEnv.isDirectory(filerServer, filerPort, path) { + if commandEnv.isDirectory(path) { path = path + "/" } dir, name := util.FullPath(path).DirAndName() entryCount := 0 - err = filer_pb.ReadDirAllEntries(commandEnv.getFilerClient(filerServer, filerPort), util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { + err = filer_pb.ReadDirAllEntries(commandEnv, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { if !showHidden && strings.HasPrefix(entry.Name, ".") { return diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go index cbbca746c..0679ec075 100644 --- a/weed/shell/command_fs_meta_cat.go +++ b/weed/shell/command_fs_meta_cat.go @@ -31,16 +31,14 @@ func (c *commandFsMetaCat) Help() string { func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - input := findInputDirectory(args) - - filerServer, filerPort, path, err := commandEnv.parseUrl(input) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } dir, name := util.FullPath(path).DirAndName() - return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Name: name, diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go index a19e9d3ce..69ae9454c 100644 --- a/weed/shell/command_fs_meta_load.go +++ b/weed/shell/command_fs_meta_load.go @@ -37,11 +37,6 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. return nil } - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(nil)) - if err != nil { - return err - } - fileName := args[len(args)-1] dst, err := os.OpenFile(fileName, os.O_RDONLY, 0644) @@ -52,7 +47,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. var dirCount, fileCount uint64 - err = commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { sizeBuf := make([]byte, 4) @@ -98,7 +93,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io. if err == nil { fmt.Fprintf(writer, "\ntotal %d directories, %d files", dirCount, fileCount) - fmt.Fprintf(writer, "\n%s is loaded to http://%s:%d%s\n", fileName, filerServer, filerPort, path) + fmt.Fprintf(writer, "\n%s is loaded.\n", fileName) } return err diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go index 995ea16a2..b361b61a6 100644 --- a/weed/shell/command_fs_meta_notify.go +++ b/weed/shell/command_fs_meta_notify.go @@ -32,7 +32,7 @@ func (c *commandFsMetaNotify) Help() string { func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } @@ -43,7 +43,7 @@ func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer i var dirCount, fileCount uint64 - err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { + err = doTraverseBFS(writer, commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { if entry.IsDirectory { dirCount++ diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index 4314542bd..a07a94ccb 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -52,7 +52,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. return nil } - filerServer, filerPort, path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args())) + path, parseErr := commandEnv.parseUrl(findInputDirectory(fsMetaSaveCommand.Args())) if parseErr != nil { return parseErr } @@ -61,7 +61,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. fileName := *outputFileName if fileName == "" { fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta", - filerServer, filerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) + commandEnv.option.FilerHost, commandEnv.option.FilerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) } dst, openErr := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) @@ -85,7 +85,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. var dirCount, fileCount uint64 - err = doTraverseBFS(writer, commandEnv.getFilerClient(filerServer, filerPort), util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { + err = doTraverseBFS(writer, commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { protoMessage := &filer_pb.FullEntry{ Dir: string(parentPath), @@ -118,7 +118,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. if err == nil { fmt.Fprintf(writer, "total %d directories, %d files\n", dirCount, fileCount) - fmt.Fprintf(writer, "meta data for http://%s:%d%s is saved to %s\n", filerServer, filerPort, path, fileName) + fmt.Fprintf(writer, "meta data for %s is saved to %s\n", path, fileName) } return err diff --git a/weed/shell/command_fs_mv.go b/weed/shell/command_fs_mv.go index 148ac6e2f..c7c0984fc 100644 --- a/weed/shell/command_fs_mv.go +++ b/weed/shell/command_fs_mv.go @@ -37,12 +37,12 @@ func (c *commandFsMv) Help() string { func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - filerServer, filerPort, sourcePath, err := commandEnv.parseUrl(args[0]) + sourcePath, err := commandEnv.parseUrl(args[0]) if err != nil { return err } - _, _, destinationPath, err := commandEnv.parseUrl(args[1]) + destinationPath, err := commandEnv.parseUrl(args[1]) if err != nil { return err } @@ -51,7 +51,7 @@ func (c *commandFsMv) Do(args []string, commandEnv *CommandEnv, writer io.Writer destinationDir, destinationName := util.FullPath(destinationPath).DirAndName() - return commandEnv.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { // collect destination entry info destinationRequest := &filer_pb.LookupDirectoryEntryRequest{ diff --git a/weed/shell/command_fs_tree.go b/weed/shell/command_fs_tree.go index 0982082db..b0752ea03 100644 --- a/weed/shell/command_fs_tree.go +++ b/weed/shell/command_fs_tree.go @@ -30,14 +30,14 @@ func (c *commandFsTree) Help() string { func (c *commandFsTree) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { - filerServer, filerPort, path, err := commandEnv.parseUrl(findInputDirectory(args)) + path, err := commandEnv.parseUrl(findInputDirectory(args)) if err != nil { return err } dir, name := util.FullPath(path).DirAndName() - dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv.getFilerClient(filerServer, filerPort), util.FullPath(dir), name, newPrefix(), -1) + dirCount, fCount, terr := treeTraverseDirectory(writer, commandEnv, util.FullPath(dir), name, newPrefix(), -1) if terr == nil { fmt.Fprintf(writer, "%d directories, %d files\n", dirCount, fCount) diff --git a/weed/shell/commands.go b/weed/shell/commands.go index 660929ec7..7e240de5f 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -48,7 +48,7 @@ func NewCommandEnv(options ShellOptions) *CommandEnv { } } -func (ce *CommandEnv) parseUrl(input string) (filerServer string, filerPort int64, path string, err error) { +func (ce *CommandEnv) parseUrl(input string) (path string, err error) { if strings.HasPrefix(input, "http") { err = fmt.Errorf("http://: prefix is not supported any more") return @@ -56,35 +56,22 @@ func (ce *CommandEnv) parseUrl(input string) (filerServer string, filerPort int6 if !strings.HasPrefix(input, "/") { input = filepath.ToSlash(filepath.Join(ce.option.Directory, input)) } - return ce.option.FilerHost, ce.option.FilerPort, input, err + return input, err } -func (ce *CommandEnv) isDirectory(filerServer string, filerPort int64, path string) bool { +func (ce *CommandEnv) isDirectory(path string) bool { - return ce.checkDirectory(filerServer, filerPort, path) == nil + return ce.checkDirectory(path) == nil } -func (ce *CommandEnv) checkDirectory(filerServer string, filerPort int64, path string) error { +func (ce *CommandEnv) checkDirectory(path string) error { dir, name := util.FullPath(path).DirAndName() - return ce.withFilerClient(filerServer, filerPort, func(client filer_pb.SeaweedFilerClient) error { + _, err := filer_pb.Exists(ce, dir, name, true) - resp, lookupErr := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir, - Name: name, - }) - if lookupErr != nil { - return lookupErr - } - - if !resp.Entry.IsDirectory { - return fmt.Errorf("not a directory") - } - - return nil - }) + return err } From b51fa81f0e7cd7ab21f3683f356b0380f9dce75b Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 21:36:39 -0700 Subject: [PATCH 0314/2432] fix directory checking --- weed/shell/commands.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/weed/shell/commands.go b/weed/shell/commands.go index 7e240de5f..6e40380e0 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -69,7 +69,11 @@ func (ce *CommandEnv) checkDirectory(path string) error { dir, name := util.FullPath(path).DirAndName() - _, err := filer_pb.Exists(ce, dir, name, true) + exists, err := filer_pb.Exists(ce, dir, name, true) + + if !exists { + return fmt.Errorf("%s is not a directory", path) + } return err From 38e73463f1c8c7d7dc226ea41679407cb66101d4 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 21:37:04 -0700 Subject: [PATCH 0315/2432] fix du block and byte couting --- weed/shell/command_fs_du.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index f3d479614..dc5c3dec0 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -56,6 +56,9 @@ func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir, name string) (blockCount, byteCount uint64, err error) { err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) { + + var fileBlockCount, fileByteCount uint64 + if entry.IsDirectory { subDir := fmt.Sprintf("%s/%s", dir, entry.Name) if dir == "/" { @@ -67,12 +70,14 @@ func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir byteCount += numByte } } else { + fileBlockCount = uint64(len(entry.Chunks)) + fileByteCount = filer2.TotalSize(entry.Chunks) blockCount += uint64(len(entry.Chunks)) byteCount += filer2.TotalSize(entry.Chunks) } if name != "" && !entry.IsDirectory { - fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", blockCount, byteCount, dir, name) + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", fileBlockCount, fileByteCount, dir, entry.Name) } }) return From 782d776d2a63c8c403f7fa52e97154f146467610 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 22:54:02 -0700 Subject: [PATCH 0316/2432] refactoring --- weed/shell/command_fs_du.go | 12 ------------ weed/shell/commands.go | 13 +++++++++++++ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go index dc5c3dec0..08c553e7c 100644 --- a/weed/shell/command_fs_du.go +++ b/weed/shell/command_fs_du.go @@ -5,7 +5,6 @@ import ( "io" "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -82,14 +81,3 @@ func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir }) return } - -func (env *CommandEnv) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - - filerGrpcAddress := fmt.Sprintf("%s:%d", env.option.FilerHost, env.option.FilerPort+10000) - return pb.WithGrpcFilerClient(filerGrpcAddress, env.option.GrpcDialOption, fn) - -} - -func (env *CommandEnv) AdjustedUrl(hostAndPort string) string { - return hostAndPort -} diff --git a/weed/shell/commands.go b/weed/shell/commands.go index 6e40380e0..31136951e 100644 --- a/weed/shell/commands.go +++ b/weed/shell/commands.go @@ -10,6 +10,7 @@ import ( "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" @@ -79,6 +80,18 @@ func (ce *CommandEnv) checkDirectory(path string) error { } +func (ce *CommandEnv) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress := fmt.Sprintf("%s:%d", ce.option.FilerHost, ce.option.FilerPort+10000) + return pb.WithGrpcFilerClient(filerGrpcAddress, ce.option.GrpcDialOption, fn) + +} + +func (ce *CommandEnv) AdjustedUrl(hostAndPort string) string { + return hostAndPort +} + + func parseFilerUrl(entryPath string) (filerServer string, filerPort int64, path string, err error) { if strings.HasPrefix(entryPath, "http") { var u *url.URL From 8047ec2f5103696982acd6c6407ce77c1b02b89e Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 22:54:46 -0700 Subject: [PATCH 0317/2432] shell: fs.meta.save add option to export all fileIds for all files --- weed/shell/command_fs_meta_notify.go | 2 +- weed/shell/command_fs_meta_save.go | 78 ++++++++++++++++++++-------- 2 files changed, 58 insertions(+), 22 deletions(-) diff --git a/weed/shell/command_fs_meta_notify.go b/weed/shell/command_fs_meta_notify.go index b361b61a6..56e63e98f 100644 --- a/weed/shell/command_fs_meta_notify.go +++ b/weed/shell/command_fs_meta_notify.go @@ -43,7 +43,7 @@ func (c *commandFsMetaNotify) Do(args []string, commandEnv *CommandEnv, writer i var dirCount, fileCount uint64 - err = doTraverseBFS(writer, commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { + err = doTraverseBfs(writer, commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { if entry.IsDirectory { dirCount++ diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index a07a94ccb..5ea69026f 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -48,6 +48,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. fsMetaSaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) verbose := fsMetaSaveCommand.Bool("v", false, "print out each processed files") outputFileName := fsMetaSaveCommand.String("o", "", "output the meta data to this file") + chunksFileName := fsMetaSaveCommand.String("chunks", "", "output all the chunks to this file") if err = fsMetaSaveCommand.Parse(args); err != nil { return nil } @@ -57,13 +58,58 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. return parseErr } - t := time.Now() - fileName := *outputFileName - if fileName == "" { - fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta", - commandEnv.option.FilerHost, commandEnv.option.FilerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) + if *outputFileName != "" { + fileName := *outputFileName + if fileName == "" { + t := time.Now() + fileName = fmt.Sprintf("%s-%d-%4d%02d%02d-%02d%02d%02d.meta", + commandEnv.option.FilerHost, commandEnv.option.FilerPort, t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) + } + return doTraverseBfsAndSaving(fileName, commandEnv, writer, path, *verbose, func(dst io.Writer, outputChan chan []byte) { + sizeBuf := make([]byte, 4) + for b := range outputChan { + util.Uint32toBytes(sizeBuf, uint32(len(b))) + dst.Write(sizeBuf) + dst.Write(b) + } + }, func(entry *filer_pb.FullEntry, outputChan chan []byte) (err error) { + bytes, err := proto.Marshal(entry) + if err != nil { + fmt.Fprintf(writer, "marshall error: %v\n", err) + return + } + + outputChan <- bytes + return nil + }) + } + + if *chunksFileName != "" { + return doTraverseBfsAndSaving(*chunksFileName, commandEnv, writer, path, *verbose, func(dst io.Writer, outputChan chan []byte) { + for b := range outputChan { + dst.Write(b) + } + }, func(entry *filer_pb.FullEntry, outputChan chan []byte) (err error) { + for _, chunk := range entry.Entry.Chunks { + dir := entry.Dir + if dir == "/" { + dir = "" + } + outputLine := fmt.Sprintf("%d\t%s\t%s/%s\n", chunk.Fid.FileKey, chunk.FileId, dir, entry.Entry.Name) + outputChan <- []byte(outputLine) + } + return nil + }) } + return err + +} + +func doTraverseBfsAndSaving(fileName string, commandEnv *CommandEnv, writer io.Writer, path string, verbose bool, + saveFn func(dst io.Writer, outputChan chan []byte), + genFn func(entry *filer_pb.FullEntry, outputChan chan []byte) error) error { + dst, openErr := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) if openErr != nil { return fmt.Errorf("failed to create file %s: %v", fileName, openErr) @@ -74,39 +120,31 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. wg.Add(1) outputChan := make(chan []byte, 1024) go func() { - sizeBuf := make([]byte, 4) - for b := range outputChan { - util.Uint32toBytes(sizeBuf, uint32(len(b))) - dst.Write(sizeBuf) - dst.Write(b) - } + saveFn(dst, outputChan) wg.Done() }() var dirCount, fileCount uint64 - err = doTraverseBFS(writer, commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { + err := doTraverseBfs(writer, commandEnv, util.FullPath(path), func(parentPath util.FullPath, entry *filer_pb.Entry) { protoMessage := &filer_pb.FullEntry{ Dir: string(parentPath), Entry: entry, } - bytes, err := proto.Marshal(protoMessage) - if err != nil { + if err := genFn(protoMessage, outputChan); err != nil { fmt.Fprintf(writer, "marshall error: %v\n", err) return } - outputChan <- bytes - if entry.IsDirectory { atomic.AddUint64(&dirCount, 1) } else { atomic.AddUint64(&fileCount, 1) } - if *verbose { + if verbose { println(parentPath.Child(entry.Name)) } @@ -118,13 +156,11 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io. if err == nil { fmt.Fprintf(writer, "total %d directories, %d files\n", dirCount, fileCount) - fmt.Fprintf(writer, "meta data for %s is saved to %s\n", path, fileName) } - return err - } -func doTraverseBFS(writer io.Writer, filerClient filer_pb.FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *filer_pb.Entry)) (err error) { + +func doTraverseBfs(writer io.Writer, filerClient filer_pb.FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *filer_pb.Entry)) (err error) { K := 5 From bb9b97e2b4ad7b826b21b03a648527e32bc744f2 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Mon, 23 Mar 2020 23:07:11 -0700 Subject: [PATCH 0318/2432] add comments --- weed/shell/command_fs_meta_save.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go index 5ea69026f..f1628973a 100644 --- a/weed/shell/command_fs_meta_save.go +++ b/weed/shell/command_fs_meta_save.go @@ -38,7 +38,14 @@ func (c *commandFsMetaSave) Help() string { The meta data will be saved into a local --