diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 985bd3781..53423f4b7 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,16 +1,28 @@ --- name: Bug report about: Create a report to help us improve +title: '' +labels: '' +assignees: '' --- Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs +Report issues here. Ask questions here https://stackoverflow.com/questions/tagged/seaweedfs + +example of a good issue report: +https://github.com/chrislusf/seaweedfs/issues/1005 +example of a bad issue report: +https://github.com/chrislusf/seaweedfs/issues/1008 **Describe the bug** A clear and concise description of what the bug is. **System Setup** -List the command line to start "weed master", "weed volume", "weed filer", "weed s3", "weed mount". +- List the command line to start "weed master", "weed volume", "weed filer", "weed s3", "weed mount". +- OS version +- output of `weed version` +- if using filer, show the content of `filer.toml` **Expected behavior** A clear and concise description of what you expected to happen. diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 000000000..00781fcf6 --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,37 @@ +name: Go + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + + build: + name: Build + runs-on: ubuntu-latest + steps: + + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ^1.13 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + cd weed; go get -v -t -d ./... + if [ -f Gopkg.toml ]; then + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure + fi + + - name: Build + run: cd weed; go build -v . + + - name: Test + run: cd weed; go test -v . diff --git a/.gitignore b/.gitignore index a56dfb8a3..671b01051 100644 --- a/.gitignore +++ b/.gitignore @@ -80,3 +80,6 @@ build target *.class other/java/hdfs/dependency-reduced-pom.xml + +# binary file +weed/weed diff --git a/.travis.yml b/.travis.yml index f445bff68..bad4a77f1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,18 +1,19 @@ sudo: false language: go go: -- 1.10.x -- 1.11.x -- tip + - 1.12.x + - 1.13.x + - 1.14.x before_install: -- export PATH=/home/travis/gopath/bin:$PATH + - export PATH=/home/travis/gopath/bin:$PATH install: -- go get ./weed/... + - export CGO_ENABLED="0" + - go env script: -- go test ./weed/... + - env GO111MODULE=on go test ./weed/... before_deploy: - make release @@ -22,23 +23,26 @@ deploy: api_key: secure: ERL986+ncQ8lwAJUYDrQ8s2/FxF/cyNIwJIFCqspnWxQgGNNyokET9HapmlPSxjpFRF0q6L2WCg9OY3mSVRq4oI6hg1igOQ12KlLyN71XSJ3c8w0Ay5ho48TQ9l3f3Iu97mntBCe9l0R9pnT8wj1VI8YJxloXwUMG2yeTjA9aBI= file: - - build/linux_arm.tar.gz - - build/linux_arm64.tar.gz - - build/linux_386.tar.gz - - build/linux_amd64.tar.gz - - build/darwin_amd64.tar.gz - - build/windows_386.zip - - build/windows_amd64.zip - - build/freebsd_arm.tar.gz - - build/freebsd_amd64.tar.gz - - build/freebsd_386.tar.gz - - build/netbsd_arm.tar.gz - - build/netbsd_amd64.tar.gz - - build/netbsd_386.tar.gz - - build/openbsd_arm.tar.gz - - build/openbsd_amd64.tar.gz - - build/openbsd_386.tar.gz + - build/linux_arm.tar.gz + - build/linux_arm64.tar.gz + - build/linux_386.tar.gz + - build/linux_amd64.tar.gz + - build/linux_amd64_large_disk.tar.gz + - build/darwin_amd64.tar.gz + - build/darwin_amd64_large_disk.tar.gz + - build/windows_386.zip + - build/windows_amd64.zip + - build/windows_amd64_large_disk.zip + - build/freebsd_arm.tar.gz + - build/freebsd_amd64.tar.gz + - build/freebsd_386.tar.gz + - build/netbsd_arm.tar.gz + - build/netbsd_amd64.tar.gz + - build/netbsd_386.tar.gz + - build/openbsd_arm.tar.gz + - build/openbsd_amd64.tar.gz + - build/openbsd_386.tar.gz on: tags: true repo: chrislusf/seaweedfs - go: tip + go: 1.14.x diff --git a/Makefile b/Makefile index 9357b2a03..59a3c46a2 100644 --- a/Makefile +++ b/Makefile @@ -8,10 +8,16 @@ appname := weed sources := $(wildcard *.go) -build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR) +COMMIT ?= $(shell git rev-parse --short HEAD) +LDFLAGS ?= -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${COMMIT} + +build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR) tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3) zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3) +build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR) +tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3) +zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3) all: build @@ -24,17 +30,31 @@ clean: deps: go get $(GO_FLAGS) -d $(SOURCE_DIR) + rm -rf /home/travis/gopath/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace + rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace build: deps - go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR) + go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o $(BINARY) $(SOURCE_DIR) linux: deps mkdir -p linux - GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR) + GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o linux/$(BINARY) $(SOURCE_DIR) -release: deps windows_build darwin_build linux_build bsd_build +release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build ##### LINUX BUILDS ##### +5_byte_linux_build: + $(call build_large,linux,amd64,) + $(call tar_large,linux,amd64) + +5_byte_darwin_build: + $(call build_large,darwin,amd64,) + $(call tar_large,darwin,amd64) + +5_byte_windows_build: + $(call build_large,windows,amd64,.exe) + $(call zip_large,windows,amd64,.exe) + linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz build/linux_386.tar.gz: $(sources) diff --git a/README.md b/README.md index fc79ec8dd..684665c84 100644 --- a/README.md +++ b/README.md @@ -4,24 +4,22 @@ [![Build Status](https://travis-ci.org/chrislusf/seaweedfs.svg?branch=master)](https://travis-ci.org/chrislusf/seaweedfs) [![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed) [![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki) -[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=604800)](https://hub.docker.com/r/chrislusf/seaweedfs/) +[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=86400)](https://hub.docker.com/r/chrislusf/seaweedfs/) ![SeaweedFS Logo](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/seaweedfs.png)

Supporting SeaweedFS

-SeaweedFS is an independent Apache-licensed open source project with its ongoing development made -possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md). +SeaweedFS is an independent Apache-licensed open source project with its ongoing development made +possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md). If you'd like to grow SeaweedFS even stronger, please consider joining our sponsors on Patreon. -Platinum ($2500/month), Gold ($500/month): put your company logo on the SeaweedFS github page -Generous Backer($50/month), Backer($10/month): put your name on the SeaweedFS backer page. - Your support will be really appreciated by me and other supporters!

Sponsor SeaweedFS via Patreon

+ + --- - [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest) -- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTc4MmVlYmFlNjBmZTgzZmJlYmI1MDE1YzkyNWYyZjkwZDFiM2RlMDdjNjVlNjdjYzc4NGFhZGIyYzEyMzJkYTA) +- [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY) +- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS) - [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs) - [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki) - - -## Introduction +- [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction) + +Table of Contents +================= + +* [Introduction](#introduction) +* [Features](#features) + * [Additional Features](#additional-features) + * [Filer Features](#filer-features) +* [Example Usage](#example-usage) +* [Architecture](#architecture) +* [Compared to Other File Systems](#compared-to-other-file-systems) + * [Compared to HDFS](#compared-to-hdfs) + * [Compared to GlusterFS, Ceph](#compared-to-glusterfs-ceph) + * [Compared to GlusterFS](#compared-to-glusterfs) + * [Compared to Ceph](#compared-to-ceph) +* [Dev Plan](#dev-plan) +* [Installation Guide](#installation-guide) +* [Disk Related Topics](#disk-related-topics) +* [Benchmark](#Benchmark) +* [License](#license) + +## Introduction ## SeaweedFS is a simple and highly scalable distributed file system. There are two objectives: 1. to store billions of files! 2. to serve the files fast! -SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (just one disk read operation). +SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (O(1), usually just one disk read operation). + +SeaweedFS can transparently integrate with the cloud. With hot data on local cluster, and warm data on the cloud with O(1) access time, SeaweedFS can achieve both fast local access time and elastic cloud storage capacity, without any client side changes. There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. -SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). +SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf) -SeaweedFS can work very well with just the object store. [[Filer]] can then be added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Cassandra/LevelDB. +On top of the object store, optional [Filer] can support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, LevelDB, MemSql, TiDB, TiKV, CockroachDB, etc. -## Additional Features -* Can choose no replication or different replication levels, rack and data center aware -* Automatic master servers failover - no single point of failure (SPOF) -* Automatic Gzip compression depending on file mime type -* Automatic compaction to reclaim disk space after deletion or update -* Servers in the same cluster can have different disk spaces, file systems, OS etc. -* Adding/Removing servers does **not** cause any data re-balancing -* Optionally fix the orientation for jpeg pictures -* Support Etag, Accept-Range, Last-Modified, etc. -* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance. +[Back to TOC](#table-of-contents) -## Filer Features +## Additional Features ## +* Can choose no replication or different replication levels, rack and data center aware. +* Automatic master servers failover - no single point of failure (SPOF). +* Automatic Gzip compression depending on file mime type. +* Automatic compaction to reclaim disk space after deletion or update. +* [Automatic entry TTL expiration][VolumeServerTTL]. +* Any server with some disk spaces can add to the total storage space. +* Adding/Removing servers does **not** cause any data re-balancing. +* Optional picture resizing. +* Support ETag, Accept-Range, Last-Modified, etc. +* Support in-memory/leveldb/readonly mode tuning for memory/performance balance. +* Support rebalancing the writable and readonly volumes. +* [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data. +* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. + +[Back to TOC](#table-of-contents) + +## Filer Features ## * [filer server][Filer] provide "normal" directories and files via http. * [mount filer][Mount] to read and write files directly as a local directory via FUSE. * [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling. * [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs. * [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. +* [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. +* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. +* [File TTL][FilerTTL] automatically purge file metadata and actual file data. +* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=604800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/) [Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files [Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount [AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API [BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Backup-to-Cloud [Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System +[WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV +[ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage +[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier +[FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption +[FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores +[VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live +[SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver + +[Back to TOC](#table-of-contents) + +## Example Usage ## -## Example Usage By default, the master node runs on port 9333, and the volume nodes run on port 8080. Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example. SeaweedFS uses HTTP REST operations to read, write, and delete. The responses are in JSON or JSONP format. -### Start Master Server +### Start Master Server ### ``` > ./weed master @@ -125,7 +170,7 @@ Second, to store the file content, send a HTTP multi-part POST request to `url + ``` > curl -F file=@/home/chris/myphoto.jpg http://127.0.0.1:8080/3,01637037d6 -{"size": 43234} +{"name":"myphoto.jpg","size":43234,"eTag":"1cc0118e"} ``` To update, send another POST request with updated file content. @@ -135,6 +180,7 @@ For deletion, send an HTTP DELETE request to the same `url + '/' + fid` URL: ``` > curl -X DELETE http://127.0.0.1:8080/3,01637037d6 ``` + ### Save File Id ### Now, you can save the `fid`, 3,01637037d6 in this case, to a database field. @@ -157,7 +203,7 @@ First look up the volume server's URLs by the file's volumeId: ``` > curl http://localhost:9333/dir/lookup?volumeId=3 -{"locations":[{"publicUrl":"localhost:8080","url":"localhost:8080"}]} +{"volumeId":"3","locations":[{"publicUrl":"localhost:8080","url":"localhost:8080"}]} ``` Since (usually) there are not too many volume servers, and volumes don't move often, you can cache the results most of the time. Depending on the replication type, one volume can have multiple replica locations. Just randomly pick one location to read. @@ -213,7 +259,7 @@ More details about replication can be found [on the wiki][Replication]. You can also set the default replication strategy when starting the master server. -### Allocate File Key on specific data center ### +### Allocate File Key on Specific Data Center ### Volume servers can be started with a specific data center name: @@ -239,6 +285,8 @@ When requesting a file key, an optional "dataCenter" parameter can limit the ass [feat-3]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#upload-large-files [feat-4]: https://github.com/chrislusf/seaweedfs/wiki/Optimization#collection-as-a-simple-name-space +[Back to TOC](#table-of-contents) + ## Architecture ## Usually distributed file systems split each file into chunks, a central master keeps a mapping of filenames, chunk indices to chunk handles, and also which chunks each chunk server has. @@ -279,12 +327,26 @@ Each individual file size is limited to the volume size. All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. +### Tiered Storage to the cloud ### + +The local volume servers are much faster, while cloud storages have elastic capacity and are actually more cost-efficient if not accessed often (usually free to upload, but relatively costly to access). With the append-only structure and O(1) access time, SeaweedFS can take advantage of both local and cloud storage by offloading the warm data to the cloud. + +Usually hot data are fresh and warm data are old. SeaweedFS puts the newly created volumes on local servers, and optionally upload the older volumes on the cloud. If the older data are accessed less often, this literally gives you unlimited capacity with limited local servers, and still fast for new data. + +With the O(1) access time, the network latency cost is kept at minimum. + +If the hot~warm data is split as 20~80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput. + +[Back to TOC](#table-of-contents) + ## Compared to Other File Systems ## Most other distributed file systems seem more complicated than necessary. SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications. +[Back to TOC](#table-of-contents) + ### Compared to HDFS ### HDFS uses the chunk approach for each file, and is ideal for storing large files. @@ -293,6 +355,7 @@ SeaweedFS is ideal for serving relatively smaller files quickly and concurrently SeaweedFS can also store extra large files by splitting them into manageable data chunks, and store the file ids of the data chunks into a meta chunk. This is managed by "weed upload/download" tool, and the weed master or volume servers are agnostic about it. +[Back to TOC](#table-of-contents) ### Compared to GlusterFS, Ceph ### @@ -300,7 +363,7 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa * SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files. * SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached. -* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, MySql, Postgres, etc, and is easy to customized. +* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Mongodb, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized. * SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc. | System | File Meta | File Content Read| POSIX | REST API | Optimized for small files | @@ -309,6 +372,9 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa | SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes | | GlusterFS | hashing | | FUSE, NFS | | | | Ceph | hashing + rules | | FUSE | Yes | | +| MooseFS | in memory | | FUSE | | No | + +[Back to TOC](#table-of-contents) ### Compared to GlusterFS ### @@ -316,11 +382,21 @@ GlusterFS stores files, both directories and content, in configurable volumes ca GlusterFS hashes the path and filename into ids, and assigned to virtual volumes, and then mapped to "bricks". +[Back to TOC](#table-of-contents) + +### Compared to MooseFS ### + +MooseFS chooses to neglect small file issue. From moosefs 3.0 manual, "even a small file will occupy 64KiB plus additionally 4KiB of checksums and 1KiB for the header", because it "was initially designed for keeping large amounts (like several thousands) of very big files" + +MooseFS Master Server keeps all meta data in memory. Same issue as HDFS namenode. + +[Back to TOC](#table-of-contents) + ### Compared to Ceph ### Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120) -SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage. +SeaweedFS has a centralized master group to look up free volumes, while Ceph uses hashing and metadata servers to locate its objects. Having a centralized master makes it easy to code and manage. Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews. @@ -328,7 +404,7 @@ Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS pl SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read. -SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Cassandra, to manage file directories. There are proven, scalable, and easier to manage. +SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. These stores are proven, scalable, and easier to manage. | SeaweedFS | comparable to Ceph | advantage | | ------------- | ------------- | ---------------- | @@ -336,16 +412,26 @@ SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Cassa | Volume | OSD | optimized for small files | | Filer | Ceph FS | linearly scalable, Customizable, O(1) or O(logN) | +[Back to TOC](#table-of-contents) -## Dev plan ## +## Dev Plan ## More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc. Other key features include: Erasure Encoding, JWT security. This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)! +BTW, We suggest run the code style check script `util/gostd` before you push your branch to remote, it will make SeaweedFS easy to review, maintain and develop: + +``` +$ ./util/gostd +``` -## Installation guide for users who are not familiar with golang +[Back to TOC](#table-of-contents) + +## Installation Guide ## + +> Installation guide for users who are not familiar with golang Step 1: install go on your machine and setup the environment by following the instructions at: @@ -366,77 +452,87 @@ go get github.com/chrislusf/seaweedfs/weed Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory -Step 4: after you modify your code locally, you could start a local build by calling `go install` under +Note: +* If you got into this problem, try to `rm -Rf $GOPATH/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace` and build again. +``` +panic: /debug/requests is already registered. You may have two independent copies of golang.org/x/net/trace in your binary, trying to maintain separate state. This may involve a vendored copy of golang.org/x/net/trace. +``` + +Step 4: after you modify your code locally, you could start a local build by calling `go install` under ``` $GOPATH/src/github.com/chrislusf/seaweedfs/weed ``` -## Disk Related topics ## +[Back to TOC](#table-of-contents) + +## Disk Related Topics ## ### Hard Drive Performance ### When testing read performance on SeaweedFS, it basically becomes a performance test of your hard drive's random read speed. Hard drives usually get 100MB/s~200MB/s. -### Solid State Disk +### Solid State Disk ### To modify or delete small files, SSD must delete a whole block at a time, and move content in existing blocks to a new block. SSD is fast when brand new, but will get fragmented over time and you have to garbage collect, compacting blocks. SeaweedFS is friendly to SSD since it is append-only. Deletion and compaction are done on volume level in the background, not slowing reading and not causing fragmentation. -## Benchmark +[Back to TOC](#table-of-contents) + +## Benchmark ## My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CPU: 1 Intel Core i7 2.6GHz. Write 1 million 1KB file: ``` Concurrency Level: 16 -Time taken for tests: 88.796 seconds +Time taken for tests: 66.753 seconds Complete requests: 1048576 Failed requests: 0 -Total transferred: 1106764659 bytes -Requests per second: 11808.87 [#/sec] -Transfer rate: 12172.05 [Kbytes/sec] +Total transferred: 1106789009 bytes +Requests per second: 15708.23 [#/sec] +Transfer rate: 16191.69 [Kbytes/sec] Connection Times (ms) min avg max std -Total: 0.2 1.3 44.8 0.9 +Total: 0.3 1.0 84.3 0.9 Percentage of the requests served within a certain time (ms) - 50% 1.1 ms - 66% 1.3 ms - 75% 1.5 ms - 80% 1.7 ms - 90% 2.1 ms - 95% 2.6 ms - 98% 3.7 ms - 99% 4.6 ms - 100% 44.8 ms + 50% 0.8 ms + 66% 1.0 ms + 75% 1.1 ms + 80% 1.2 ms + 90% 1.4 ms + 95% 1.7 ms + 98% 2.1 ms + 99% 2.6 ms + 100% 84.3 ms ``` Randomly read 1 million files: ``` Concurrency Level: 16 -Time taken for tests: 34.263 seconds +Time taken for tests: 22.301 seconds Complete requests: 1048576 Failed requests: 0 -Total transferred: 1106762945 bytes -Requests per second: 30603.34 [#/sec] -Transfer rate: 31544.49 [Kbytes/sec] +Total transferred: 1106812873 bytes +Requests per second: 47019.38 [#/sec] +Transfer rate: 48467.57 [Kbytes/sec] Connection Times (ms) min avg max std -Total: 0.0 0.5 20.7 0.7 +Total: 0.0 0.3 54.1 0.2 Percentage of the requests served within a certain time (ms) - 50% 0.4 ms - 75% 0.5 ms - 95% 0.6 ms - 98% 0.8 ms - 99% 1.2 ms - 100% 20.7 ms + 50% 0.3 ms + 90% 0.4 ms + 98% 0.6 ms + 99% 0.7 ms + 100% 54.1 ms ``` +[Back to TOC](#table-of-contents) -## License +## License ## Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -450,7 +546,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +The text of this page is available for modification and reuse under the terms of the Creative Commons Attribution-Sharealike 3.0 Unported License and the GNU Free Documentation License (unversioned, with no invariant sections, front-cover texts, or back-cover texts). + +[Back to TOC](#table-of-contents) -## Stargazers over time +## Stargazers over time ## [![Stargazers over time](https://starcharts.herokuapp.com/chrislusf/seaweedfs.svg)](https://starcharts.herokuapp.com/chrislusf/seaweedfs) diff --git a/backers.md b/backers.md index 98767524e..725615237 100644 --- a/backers.md +++ b/backers.md @@ -4,7 +4,9 @@

Generous Backers ($50+)

+- [4Sight Imaging](https://www.4sightimaging.com/) - [Evercam Camera Management Software](https://evercam.io/) +- [Admiral](https://getadmiral.com)

Backers

diff --git a/docker/Dockerfile b/docker/Dockerfile index c7a343111..7146b91c7 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,9 +1,24 @@ -FROM frolvlad/alpine-glibc +FROM alpine -# Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format" -RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \ - wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \ - tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \ +RUN \ + ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \ + elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \ + elif [ $(uname -m) == "aarch64" ]; then echo "arm64"; \ + elif [ $(uname -m) == "armv7l" ]; then echo "arm"; \ + elif [ $(uname -m) == "armv6l" ]; then echo "arm"; fi;) && \ + echo "Building for $ARCH" 1>&2 && \ + SUPERCRONIC_SHA1SUM=$(echo $ARCH | sed 's/386/e0126b0102b9f388ecd55714358e3ad60d0cebdb/g' | sed 's/amd64/5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85/g' | sed 's/arm64/e2714c43e7781bf1579c85aa61259245f56dbba1/g' | sed 's/arm/47481c3341bc3a1ae91a728e0cc63c8e6d3791ad/g') && \ + SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-$ARCH && \ + SUPERCRONIC=supercronic-linux-$ARCH && \ + # Install SeaweedFS and Supercronic ( for cron job mode ) + apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \ + wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \ + tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \ + curl -fsSLO "$SUPERCRONIC_URL" && \ + echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \ + chmod +x "$SUPERCRONIC" && \ + mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \ + ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \ apk del build-dependencies && \ rm -rf /tmp/* @@ -22,6 +37,8 @@ EXPOSE 9333 # s3 server http port EXPOSE 8333 +RUN mkdir -p /data/filerldb2 + VOLUME /data COPY filer.toml /etc/seaweedfs/filer.toml diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index d0a214476..306ce3aa1 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -1,5 +1,15 @@ -FROM golang:latest -RUN go get github.com/chrislusf/seaweedfs/weed +FROM frolvlad/alpine-glibc as builder +RUN apk add git go g++ +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed && go install + +FROM alpine AS final +LABEL author="Chris Lu" +COPY --from=builder /root/go/bin/weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh # volume server gprc port EXPOSE 18080 @@ -16,12 +26,10 @@ EXPOSE 9333 # s3 server http port EXPOSE 8333 +RUN mkdir -p /data/filerldb2 + VOLUME /data -RUN mkdir -p /etc/seaweedfs -RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh -RUN cp /go/bin/weed /usr/bin/ ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Dockerfile.local b/docker/Dockerfile.local new file mode 100644 index 000000000..693d8a952 --- /dev/null +++ b/docker/Dockerfile.local @@ -0,0 +1,29 @@ +FROM alpine AS final +LABEL author="Chris Lu" +COPY ./weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY ./filer.toml /etc/seaweedfs/filer.toml +COPY ./entrypoint.sh /entrypoint.sh + +# volume server grpc port +EXPOSE 18080 +# volume server http port +EXPOSE 8080 +# filer server grpc port +EXPOSE 18888 +# filer server http port +EXPOSE 8888 +# master server shared grpc port +EXPOSE 19333 +# master server shared http port +EXPOSE 9333 +# s3 server http port +EXPOSE 8333 + +RUN mkdir -p /data/filerldb2 + +VOLUME /data + +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Makefile b/docker/Makefile new file mode 100644 index 000000000..166188bc3 --- /dev/null +++ b/docker/Makefile @@ -0,0 +1,19 @@ +all: gen + +.PHONY : gen + +gen: dev + +build: + cd ../weed; GOOS=linux go build; mv weed ../docker/ + docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local . + rm ./weed + +dev: build + docker-compose -f local-dev-compose.yml -p seaweedfs up + +cluster: build + docker-compose -f local-cluster-compose.yml -p seaweedfs up + +clean: + rm ./weed diff --git a/docker/README.md b/docker/README.md index cfe281e71..d6e1f4928 100644 --- a/docker/README.md +++ b/docker/README.md @@ -11,11 +11,29 @@ docker-compose -f seaweedfs-compose.yml -p seaweedfs up ``` -## Development +## Try latest tip + +```bash + +wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-dev-compose.yml + +docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up + +``` + +## Local Development ```bash cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker +make +``` -docker-compose -f dev-compose.yml -p seaweedfs up +## Build and push a multiarch build +Make sure that `docker buildx` is supported (might be an experimental docker feature) +```bash +BUILDER=$(docker buildx create --driver docker-container --use) +docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6 . -t chrislusf/seaweedfs +docker buildx stop $BUILDER ``` + diff --git a/docker/dev-compose.yml b/docker/dev-compose.yml deleted file mode 100644 index 0306b3cb0..000000000 --- a/docker/dev-compose.yml +++ /dev/null @@ -1,43 +0,0 @@ -version: '2' - -services: - master: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 9333:9333 - - 19333:19333 - command: "master" - volume: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 8080:8080 - - 18080:18080 - command: 'volume -max=5 -mserver="master:9333" -port=8080' - depends_on: - - master - filer: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 8888:8888 - - 18888:18888 - command: 'filer -master="master:9333"' - depends_on: - - master - - volume - s3: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 8333:8333 - command: 's3 -filer="filer:8888"' - depends_on: - - master - - volume - - filer diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 105087dbe..05db7a672 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -3,48 +3,46 @@ case "$1" in 'master') - ARGS="-ip `hostname -i` -mdir /data" - # Is this instance linked with an other master? (Docker commandline "--link master1:master") - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi + ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024" exec /usr/bin/weed $@ $ARGS ;; 'volume') - ARGS="-ip `hostname -i` -dir /data" - # Is this instance linked with a master? (Docker commandline "--link master1:master") - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -mserver=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi + ARGS="-dir=/data -max=0" + if [[ $@ == *"-max="* ]]; then + ARGS="-dir=/data" + fi exec /usr/bin/weed $@ $ARGS ;; 'server') - ARGS="-ip `hostname -i` -dir /data" - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -master.peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi + ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024" + if [[ $@ == *"-volume.max="* ]]; then + ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024" + fi exec /usr/bin/weed $@ $ARGS ;; 'filer') - ARGS="-ip `hostname -i` " - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi - mkdir -p /data/filerdb + ARGS="" exec /usr/bin/weed $@ $ARGS ;; 's3') ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE" - if [ -n "$FILER_PORT_8888_TCP_ADDR" ] ; then - ARGS="$ARGS -filer=$FILER_PORT_8888_TCP_ADDR:$FILER_PORT_8888_TCP_PORT" - fi exec /usr/bin/weed $@ $ARGS ;; + 'cronjob') + MASTER=${WEED_MASTER-localhost:9333} + FIX_REPLICATION_CRON_SCHEDULE=${CRON_SCHEDULE-*/7 * * * * *} + echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "volume.fix.replication" | weed shell -master='$MASTER > /crontab + BALANCING_CRON_SCHEDULE=${CRON_SCHEDULE-25 * * * * *} + echo "$BALANCING_CRON_SCHEDULE" 'echo "volume.balance -c ALL -force" | weed shell -master='$MASTER >> /crontab + echo "Running Crontab:" + cat /crontab + exec supercronic /crontab + ;; *) exec /usr/bin/weed $@ ;; diff --git a/docker/filer.toml b/docker/filer.toml index 5bf809cd8..a11e5de2b 100644 --- a/docker/filer.toml +++ b/docker/filer.toml @@ -1,3 +1,3 @@ -[leveldb] +[leveldb2] enabled = true -dir = "/data/filerdb" +dir = "/data/filerldb2" diff --git a/docker/local-cluster-compose.yml b/docker/local-cluster-compose.yml new file mode 100644 index 000000000..a1ac824e7 --- /dev/null +++ b/docker/local-cluster-compose.yml @@ -0,0 +1,53 @@ +version: '2' + +services: + master0: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335" + master1: + image: chrislusf/seaweedfs:local + ports: + - 9334:9334 + - 19334:19334 + command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335" + master2: + image: chrislusf/seaweedfs:local + ports: + - 9335:9335 + - 19335:19335 + command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume' + depends_on: + - master0 + - master1 + - master2 + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + command: 'filer -master="master0:9333,master1:9334,master2:9335"' + depends_on: + - master0 + - master1 + - master2 + - volume + s3: + image: chrislusf/seaweedfs:local + ports: + - 8333:8333 + command: 's3 -filer="filer:8888"' + depends_on: + - master0 + - master1 + - master2 + - volume + - filer diff --git a/docker/local-dev-compose.yml b/docker/local-dev-compose.yml new file mode 100644 index 000000000..f6fd0f4ce --- /dev/null +++ b/docker/local-dev-compose.yml @@ -0,0 +1,35 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: "volume -mserver=master:9333 -port=8080 -ip=volume" + depends_on: + - master + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + command: 'filer -master="master:9333"' + depends_on: + - master + - volume + s3: + image: chrislusf/seaweedfs:local + ports: + - 8333:8333 + command: 's3 -filer="filer:8888"' + depends_on: + - master + - volume + - filer diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml index 05ed0e69e..70d005017 100644 --- a/docker/seaweedfs-compose.yml +++ b/docker/seaweedfs-compose.yml @@ -4,34 +4,44 @@ services: master: image: chrislusf/seaweedfs # use a remote image ports: - - 9333:9333 - - 19333:19333 - command: "master" + - 9333:9333 + - 19333:19333 + command: "master -ip=master" volume: image: chrislusf/seaweedfs # use a remote image ports: - - 8080:8080 - - 18080:18080 - command: 'volume -max=15 -mserver="master:9333" -port=8080' + - 8080:8080 + - 18080:18080 + command: 'volume -mserver="master:9333" -port=8080' depends_on: - - master + - master filer: image: chrislusf/seaweedfs # use a remote image ports: - - 8888:8888 - - 18888:18888 + - 8888:8888 + - 18888:18888 command: 'filer -master="master:9333"' tty: true stdin_open: true depends_on: - - master - - volume + - master + - volume + cronjob: + image: chrislusf/seaweedfs # use a remote image + command: 'cronjob' + environment: + # Run re-replication every 2 minutes + CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *' + WEED_MASTER: master:9333 # Default: localhost:9333 + depends_on: + - master + - volume s3: image: chrislusf/seaweedfs # use a remote image ports: - - 8333:8333 + - 8333:8333 command: 's3 -filer="filer:8888"' depends_on: - - master - - volume - - filer + - master + - volume + - filer diff --git a/docker/seaweedfs-dev-compose.yml b/docker/seaweedfs-dev-compose.yml new file mode 100644 index 000000000..75801102e --- /dev/null +++ b/docker/seaweedfs-dev-compose.yml @@ -0,0 +1,35 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8080:8080 + - 18080:18080 + command: 'volume -mserver="master:9333" -port=8080 -ip=volume' + depends_on: + - master + filer: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8888:8888 + - 18888:18888 + command: 'filer -master="master:9333"' + depends_on: + - master + - volume + s3: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8333:8333 + command: 's3 -filer="filer:8888"' + depends_on: + - master + - volume + - filer diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..6bd3733c5 --- /dev/null +++ b/go.mod @@ -0,0 +1,101 @@ +module github.com/chrislusf/seaweedfs + +go 1.12 + +require ( + cloud.google.com/go v0.44.3 + github.com/Azure/azure-pipeline-go v0.2.2 // indirect + github.com/Azure/azure-storage-blob-go v0.8.0 + github.com/DataDog/zstd v1.4.1 // indirect + github.com/OneOfOne/xxhash v1.2.2 + github.com/Shopify/sarama v1.23.1 + github.com/aws/aws-sdk-go v1.23.13 + github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 + github.com/cespare/xxhash v1.1.0 + github.com/chrislusf/raft v1.0.1 + github.com/coreos/bbolt v1.3.3 // indirect + github.com/coreos/etcd v3.3.15+incompatible // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/disintegration/imaging v1.6.2 + github.com/dustin/go-humanize v1.0.0 + github.com/eapache/go-resiliency v1.2.0 // indirect + github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a + github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect + github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 + github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect + github.com/frankban/quicktest v1.7.2 // indirect + github.com/go-redis/redis v6.15.7+incompatible + github.com/go-sql-driver/mysql v1.4.1 + github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 + github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect + github.com/golang/protobuf v1.4.2 + github.com/google/btree v1.0.0 + github.com/google/uuid v1.1.1 + github.com/gorilla/mux v1.7.3 + github.com/gorilla/websocket v1.4.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect + github.com/hashicorp/golang-lru v0.5.3 // indirect + github.com/jcmturner/gofork v1.0.0 // indirect + github.com/karlseguin/ccache v2.0.3+incompatible + github.com/karlseguin/expect v1.0.1 // indirect + github.com/klauspost/compress v1.10.9 + github.com/klauspost/cpuid v1.2.1 // indirect + github.com/klauspost/crc32 v1.2.0 + github.com/klauspost/reedsolomon v1.9.2 + github.com/kurin/blazer v0.5.3 + github.com/lib/pq v1.2.0 + github.com/magiconair/properties v1.8.1 // indirect + github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect + github.com/mattn/go-runewidth v0.0.4 // indirect + github.com/nats-io/nats-server/v2 v2.0.4 // indirect + github.com/onsi/ginkgo v1.10.1 // indirect + github.com/onsi/gomega v1.7.0 // indirect + github.com/peterh/liner v1.1.0 + github.com/pierrec/lz4 v2.2.7+incompatible // indirect + github.com/prometheus/client_golang v1.1.0 + github.com/prometheus/procfs v0.0.4 // indirect + github.com/rakyll/statik v0.1.7 + github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect + github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff + github.com/seaweedfs/goexif v1.0.2 + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.2.2 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/viper v1.4.0 + github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect + github.com/stretchr/testify v1.4.0 + github.com/syndtr/goleveldb v1.0.0 + github.com/tidwall/gjson v1.3.2 + github.com/tidwall/match v1.0.1 + github.com/willf/bitset v1.1.10 // indirect + github.com/willf/bloom v2.0.3+incompatible + github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect + go.etcd.io/etcd v3.3.15+incompatible + go.mongodb.org/mongo-driver v1.3.2 + gocloud.dev v0.16.0 + gocloud.dev/pubsub/natspubsub v0.16.0 + gocloud.dev/pubsub/rabbitpubsub v0.16.0 + golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect + golang.org/x/net v0.0.0-20190909003024-a7b16738d86b + golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 + golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 + google.golang.org/api v0.9.0 + google.golang.org/appengine v1.6.2 // indirect + google.golang.org/grpc v1.29.1 + google.golang.org/protobuf v1.24.0 + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect + gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect + gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect + sigs.k8s.io/yaml v1.1.0 // indirect +) + +replace ( + github.com/satori/go.uuid v1.2.0 => github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b + go.etcd.io/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547 +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..098dd0cb6 --- /dev/null +++ b/go.sum @@ -0,0 +1,763 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.3 h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= +contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ= +contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= +contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= +contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= +contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= +github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= +github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= +github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= +github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= +github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= +github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ= +github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.23.1 h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs= +github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM= +github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 h1:fUmDBbSvv1uOzo/t8WaxZMVb7BxJ8JECo5lGoR9c5bA= +github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72/go.mod h1:OEE5igu/CDjGegM1Jn6ZMo7R6LlV/JChAkjfQQIRLpg= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI= +github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92/go.mod h1:4jyiUCD5y548+yKW+oiHtccBiMaLCCbFBpK2t7X4eUo= +github.com/chrislusf/raft v1.0.0 h1:tRGtB3nWOg8VOPSoeZ4NfHMLJcjKgyNkUZQbcyKWf1Y= +github.com/chrislusf/raft v1.0.0/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68= +github.com/chrislusf/raft v1.0.1 h1:Wa4ffkmkysW7cX3T/gMC/Mk3PhnOXhsqOVwQJcMndhw= +github.com/chrislusf/raft v1.0.1/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= +github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= +github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= +github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M= +github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-redis/redis v6.15.7+incompatible h1:3skhDh95XQMpnqeqNftPkQD9jL9e5e36z/1SUm6dy1U= +github.com/go-redis/redis v6.15.7+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 h1:P66kRWyEoIx6URKgAC3ijx9jo9gEid7bEhLQ/Z0G65A= +github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34= +github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic= +github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= +github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk= +github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60= +github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= +github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= +github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.11.0 h1:aT5ISUniaOTErogCQ+4pGoYNBB6rm6Fq3g1v8QwYGas= +github.com/grpc-ecosystem/grpc-gateway v1.11.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU= +github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w= +github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY= +github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.9 h1:pPRt1Z78crspaHISkpSSHjDlx+Tt9suHe519dsI0vF4= +github.com/klauspost/compress v1.10.9/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v1.2.0 h1:0VuyqOCruD33/lJ/ojXNvzVyl8Zr5zdTmj9l9qLZ86I= +github.com/klauspost/crc32 v1.2.0/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/reedsolomon v1.9.2 h1:E9CMS2Pqbv+C7tsrYad4YC9MfhnMVWhMRsTi7U0UB18= +github.com/klauspost/reedsolomon v1.9.2/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kurin/blazer v0.5.3 h1:SAgYv0TKU0kN/ETfO5ExjNAPyMt2FocO2s/UlCHfjAk= +github.com/kurin/blazer v0.5.3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw= +github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.2.6/go.mod h1:mQxQ0uHQ9FhEVPIcTSKwx2lqZEpXWWcCgA7R6NrWvvY= +github.com/nats-io/jwt v0.2.14 h1:wA50KvFz/JXGXMHRygTWsRGh/ixxgC5E3kHvmtGLNf4= +github.com/nats-io/jwt v0.2.14/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/nats-server/v2 v2.0.0/go.mod h1:RyVdsHHvY4B6c9pWG+uRLpZ0h0XsqiuKp2XCTurP5LI= +github.com/nats-io/nats-server/v2 v2.0.4 h1:XOMeQRbhl1lGNTIctPhih6pTa15NGif54Uas6ZW5q7g= +github.com/nats-io/nats-server/v2 v2.0.4/go.mod h1:AWdGEVbjKRS9ZIx4DSP5eKW48nfFm7q3uiSkP/1KD7M= +github.com/nats-io/nats.go v1.8.1 h1:6lF/f1/NN6kzUDBz6pyvQDEXO39jqXcWRLu/tKjtOUQ= +github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= +github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M= +github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= +github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/peterh/liner v1.1.0 h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os= +github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.2.7+incompatible h1:Eerk9aiqeZo2QzsbWOAsELUf9ddvAxEdMY9LYze/DEc= +github.com/pierrec/lz4 v2.2.7+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.4 h1:w8DjqFMJDjuVwdZBQoOozr4MVWOnwF7RcL/7uxBjY78= +github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg= +github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0= +github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E= +github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI= +github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= +github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA= +github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8= +github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= +github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547 h1:s71VGheLtWmCYsnNjf+s7XE8HsrZnd3EYGrLGWVm7nY= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547/go.mod h1:YoUyTScD3Vcv2RBm3eGVOq7i1ULiz3OuXoQFWOirmAM= +go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ= +go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= +go.mongodb.org/mongo-driver v1.3.2 h1:IYppNjEV/C+/3VPbhHVxQ4t04eVW0cLp0/pNdW++6Ug= +go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4= +go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM= +gocloud.dev v0.16.0/go.mod h1:xWGXD8t7bEhqPIuyAUFyXV9qHn+PvEY2F2GKPh7i/O0= +gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPGaGEI= +gocloud.dev/pubsub/natspubsub v0.16.0/go.mod h1:0n7pT7PkLMClBUHDrOkHfOFVr/o/6kawNMwsyAbwadI= +gocloud.dev/pubsub/rabbitpubsub v0.16.0 h1:Bkv2njMSl2tmT3tGbvbwpiIDAXBIpqzP9dmts+rhD4E= +gocloud.dev/pubsub/rabbitpubsub v0.16.0/go.mod h1:JJVdUUIqwgaaMJg/1xHQza0g4sI/4KHHSNiGE+pn4JM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= +golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI= +golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 h1:6S6bidS7O4yAwA5ORRbRIjvNQ9tGbLd5e+LRIaTeVDQ= +golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= +google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= +google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/karlseguin/expect.v1 v1.0.1 h1:9u0iUltnhFbJTHaSIH0EP+cuTU5rafIgmcsEsg2JQFw= +gopkg.in/karlseguin/expect.v1 v1.0.1/go.mod h1:uB7QIJBcclvYbwlUDkSCsGjAOMis3fP280LyhuDEf2I= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/k8s/README.md b/k8s/README.md new file mode 100644 index 000000000..5ec3ab407 --- /dev/null +++ b/k8s/README.md @@ -0,0 +1,23 @@ +## SEAWEEDFS - helm chart (2.x) + +### info: +* master/filer/volume are stateful sets with anti-affinity on the hostname, +so your deployment will be spread/HA. +* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances) +and backup/HA memsql can provide. +* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer +with ENV. +* cert config exists and can be enabled, but not been tested. + +### current instances config (AIO): +1 instance for each type (master/filer/volume/s3) + +instances need node labels: +* sw-volume: true (for volume instance, specific tag) +* sw-backend: true (for all others, as they less resource demanding) + +you can update the replicas count for each node type in values.yaml, +need to add more nodes with the corresponding label. + +most of the configuration are available through values.yaml + diff --git a/k8s/seaweedfs/.helmignore b/k8s/seaweedfs/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/k8s/seaweedfs/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml new file mode 100644 index 000000000..df9034b17 --- /dev/null +++ b/k8s/seaweedfs/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +description: SeaweedFS +name: seaweedfs +version: 1.84 \ No newline at end of file diff --git a/k8s/seaweedfs/templates/_helpers.tpl b/k8s/seaweedfs/templates/_helpers.tpl new file mode 100644 index 000000000..04a782f8b --- /dev/null +++ b/k8s/seaweedfs/templates/_helpers.tpl @@ -0,0 +1,114 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to +this (by the DNS naming spec). If release name contains chart name it will +be used as a full name. +*/}} +{{- define "seaweedfs.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "seaweedfs.chart" -}} +{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "seaweedfs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Inject extra environment vars in the format key:value, if populated +*/}} +{{- define "seaweedfs.extraEnvironmentVars" -}} +{{- if .extraEnvironmentVars -}} +{{- range $key, $value := .extraEnvironmentVars }} +- name: {{ $key }} + value: {{ $value | quote }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper filer image */}} +{{- define "filer.image" -}} +{{- if .Values.filer.imageOverride -}} +{{- $imageOverride := .Values.filer.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper postgresqlSchema image */}} +{{- define "filer.dbSchema.image" -}} +{{- if .Values.filer.dbSchema.imageOverride -}} +{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.global.repository | toString -}} +{{- $name := .Values.filer.dbSchema.imageName | toString -}} +{{- $tag := .Values.filer.dbSchema.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper master image */}} +{{- define "master.image" -}} +{{- if .Values.master.imageOverride -}} +{{- $imageOverride := .Values.master.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper s3 image */}} +{{- define "s3.image" -}} +{{- if .Values.s3.imageOverride -}} +{{- $imageOverride := .Values.s3.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper volume image */}} +{{- define "volume.image" -}} +{{- if .Values.volume.imageOverride -}} +{{- $imageOverride := .Values.volume.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Values.global.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/ca-cert.yaml b/k8s/seaweedfs/templates/ca-cert.yaml new file mode 100644 index 000000000..056f01502 --- /dev/null +++ b/k8s/seaweedfs/templates/ca-cert.yaml @@ -0,0 +1,14 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-ca-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + commonName: "{{ template "seaweedfs.name" . }}-root-ca" + isCA: true + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer +{{- end }} diff --git a/k8s/seaweedfs/templates/cert-clusterissuer.yaml b/k8s/seaweedfs/templates/cert-clusterissuer.yaml new file mode 100644 index 000000000..d0bd42593 --- /dev/null +++ b/k8s/seaweedfs/templates/cert-clusterissuer.yaml @@ -0,0 +1,8 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: ClusterIssuer +metadata: + name: {{ template "seaweedfs.name" . }}-clusterissuer +spec: + selfSigned: {} +{{- end }} diff --git a/k8s/seaweedfs/templates/client-cert.yaml b/k8s/seaweedfs/templates/client-cert.yaml new file mode 100644 index 000000000..4d27b5659 --- /dev/null +++ b/k8s/seaweedfs/templates/client-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-client-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-client-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/filer-cert.yaml b/k8s/seaweedfs/templates/filer-cert.yaml new file mode 100644 index 000000000..855183c54 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-filer-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/filer-service.yaml b/k8s/seaweedfs/templates/filer-service.yaml new file mode 100644 index 000000000..493859e36 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: filer +spec: + clusterIP: None + ports: + - name: "swfs-filer" + port: {{ .Values.filer.port }} + targetPort: {{ .Values.filer.port }} + protocol: TCP + - name: "swfs-filer-grpc" + port: {{ .Values.filer.grpcPort }} + targetPort: {{ .Values.filer.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: filer \ No newline at end of file diff --git a/k8s/seaweedfs/templates/filer-statefulset.yaml b/k8s/seaweedfs/templates/filer-statefulset.yaml new file mode 100644 index 000000000..43da74c43 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-statefulset.yaml @@ -0,0 +1,207 @@ +{{- if .Values.filer.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-filer + podManagementPolicy: Parallel + replicas: {{ .Values.filer.replicas }} + {{- if (gt (int .Values.filer.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.filer.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: filer + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: filer + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }} + {{- if .Values.filer.affinity }} + affinity: + {{ tpl .Values.filer.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.filer.tolerations }} + tolerations: + {{ tpl .Values.filer.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + serviceAccountName: seaweefds-rw-sa #hack for delete pod master after migration + terminationGracePeriodSeconds: 60 + {{- if .Values.filer.priorityClassName }} + priorityClassName: {{ .Values.filer.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "filer.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: WEED_MYSQL_USERNAME + valueFrom: + secretKeyRef: + name: secret-seaweedfs-db + key: user + - name: WEED_MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: secret-seaweedfs-db + key: password + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.filer.extraEnvironmentVars }} + {{- range $key, $value := .Values.filer.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.filer.loggingOverrideLevel }} + -v={{ .Values.filer.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + filer \ + -port={{ .Values.filer.port }} \ + {{- if .Values.filer.disableHttp }} + -disableHttp \ + {{- end }} + {{- if .Values.filer.disableDirListing }} + -disableDirListing \ + {{- end }} + -dirListLimit={{ .Values.filer.dirListLimit }} \ + -ip=${POD_IP} \ + -master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + {{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }} + volumeMounts: + - name: seaweedfs-filer-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }} + {{- end }} + ports: + - containerPort: {{ .Values.filer.port }} + name: swfs-filer + - containerPort: {{ .Values.filer.grpcPort }} + #name: swfs-filer-grpc + readinessProbe: + httpGet: + path: / + port: {{ .Values.filer.port }} + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + livenessProbe: + httpGet: + path: / + port: {{ .Values.filer.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 5 + {{- if .Values.filer.resources }} + resources: + {{ tpl .Values.filer.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-filer-log-volume + hostPath: + path: /storage/logs/seaweedfs/filer + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.filer.extraVolumes . | indent 8 | trim }} + {{- if .Values.filer.nodeSelector }} + nodeSelector: + {{ tpl .Values.filer.nodeSelector . | indent 8 | trim }} + {{- end }} +{{/* volumeClaimTemplates:*/}} +{{/* - metadata:*/}} +{{/* name: data-{{ .Release.Namespace }}*/}} +{{/* spec:*/}} +{{/* accessModes:*/}} +{{/* - ReadWriteOnce*/}} +{{/* resources:*/}} +{{/* requests:*/}} +{{/* storage: {{ .Values.filer.storage }}*/}} +{{/* {{- if .Values.filer.storageClass }}*/}} +{{/* storageClassName: {{ .Values.filer.storageClass }}*/}} +{{/* {{- end }}*/}} +{{- end }} diff --git a/k8s/seaweedfs/templates/ingress.yaml b/k8s/seaweedfs/templates/ingress.yaml new file mode 100644 index 000000000..dcd52c138 --- /dev/null +++ b/k8s/seaweedfs/templates/ingress.yaml @@ -0,0 +1,59 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-{{ template "seaweedfs.name" . }}-filer + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/auth-type: "basic" + nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer' + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + sub_filter '' ' '; #add base url + sub_filter '="/' '="./'; #make absolute paths to relative + sub_filter '=/' '=./'; + sub_filter '/seaweedfsstatic' './seaweedfsstatic'; + sub_filter_once off; +spec: + rules: + - http: + paths: + - path: /sw-filer/?(.*) + backend: + serviceName: {{ template "seaweedfs.name" . }}-filer + servicePort: {{ .Values.filer.port }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-{{ template "seaweedfs.name" . }}-master + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/auth-type: "basic" + nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master' + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + sub_filter '' ' '; #add base url + sub_filter '="/' '="./'; #make absolute paths to relative + sub_filter '=/' '=./'; + sub_filter '/seaweedfsstatic' './seaweedfsstatic'; + sub_filter_once off; +spec: + rules: + - http: + paths: + - path: /sw-master/?(.*) + backend: + serviceName: {{ template "seaweedfs.name" . }}-master + servicePort: {{ .Values.master.port }} diff --git a/k8s/seaweedfs/templates/master-cert.yaml b/k8s/seaweedfs/templates/master-cert.yaml new file mode 100644 index 000000000..a8b0fc1d1 --- /dev/null +++ b/k8s/seaweedfs/templates/master-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-master-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-master-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/master-service.yaml b/k8s/seaweedfs/templates/master-service.yaml new file mode 100644 index 000000000..f7603bd91 --- /dev/null +++ b/k8s/seaweedfs/templates/master-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: master + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None + ports: + - name: "swfs-master" + port: {{ .Values.master.port }} + targetPort: {{ .Values.master.port }} + protocol: TCP + - name: "swfs-master-grpc" + port: {{ .Values.master.grpcPort }} + targetPort: {{ .Values.master.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: master \ No newline at end of file diff --git a/k8s/seaweedfs/templates/master-statefulset.yaml b/k8s/seaweedfs/templates/master-statefulset.yaml new file mode 100644 index 000000000..87050534f --- /dev/null +++ b/k8s/seaweedfs/templates/master-statefulset.yaml @@ -0,0 +1,199 @@ +{{- if .Values.master.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-master + podManagementPolicy: Parallel + replicas: {{ .Values.master.replicas }} + {{- if (gt (int .Values.master.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.master.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: master + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: master + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }} + {{- if .Values.master.affinity }} + affinity: + {{ tpl .Values.master.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: + {{ tpl .Values.master.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "master.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.master.loggingOverrideLevel }} + -v={{ .Values.master.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + master \ + -port={{ .Values.master.port }} \ + -mdir=/data \ + -ip.bind={{ .Values.master.ipBind }} \ + {{- if .Values.master.volumePreallocate }} + -volumePreallocate \ + {{- end }} + {{- if .Values.global.monitoring.enabled }} + -metrics.address="{{ .Values.global.monitoring.gatewayHost }}:{{ .Values.global.monitoring.gatewayPort }}" \ + {{- end }} + -volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \ + {{- if .Values.master.disableHttp }} + -disableHttp \ + {{- end }} + -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \ + -peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name : data-{{ .Release.Namespace }} + mountPath: /data + - name: seaweedfs-master-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.master.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.master.port }} + name: swfs-master + - containerPort: {{ .Values.master.grpcPort }} + #name: swfs-master-grpc + readinessProbe: + httpGet: + path: /cluster/status + port: {{ .Values.master.port }} + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 15 + successThreshold: 2 + failureThreshold: 100 + livenessProbe: + httpGet: + path: /cluster/status + port: {{ .Values.master.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 6 + {{- if .Values.master.resources }} + resources: + {{ tpl .Values.master.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-master-log-volume + hostPath: + path: /storage/logs/seaweedfs/master + type: DirectoryOrCreate + - name: data-{{ .Release.Namespace }} + hostPath: + path: /ssd/seaweed-master/ + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.master.extraVolumes . | indent 8 | trim }} + {{- if .Values.master.nodeSelector }} + nodeSelector: + {{ tpl .Values.master.nodeSelector . | indent 8 | trim }} + {{- end }} +{{/* volumeClaimTemplates:*/}} +{{/* - metadata:*/}} +{{/* name: data-{{ .Release.Namespace }}*/}} +{{/* spec:*/}} +{{/* accessModes:*/}} +{{/* - ReadWriteOnce*/}} +{{/* resources:*/}} +{{/* requests:*/}} +{{/* storage: {{ .Values.master.storage }}*/}} +{{/* {{- if .Values.master.storageClass }}*/}} +{{/* storageClassName: {{ .Values.master.storageClass }}*/}} +{{/* {{- end }}*/}} +{{- end }} diff --git a/k8s/seaweedfs/templates/s3-deployment.yaml b/k8s/seaweedfs/templates/s3-deployment.yaml new file mode 100644 index 000000000..1bb3283f1 --- /dev/null +++ b/k8s/seaweedfs/templates/s3-deployment.yaml @@ -0,0 +1,158 @@ +{{- if .Values.s3.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-s3 + replicas: {{ .Values.s3.replicas }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: s3 + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: s3 + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }} + {{- if .Values.s3.tolerations }} + tolerations: + {{ tpl .Values.s3.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.s3.priorityClassName }} + priorityClassName: {{ .Values.s3.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "s3.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed \ + {{- if .Values.s3.loggingOverrideLevel }} + -v={{ .Values.s3.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + s3 \ + -port={{ .Values.s3.port }} \ + {{- if .Values.global.enableSecurity }} + -cert.file=/usr/local/share/ca-certificates/client/tls.crt \ + -key.file=/usr/local/share/ca-certificates/client/tls.key \ + {{- end }} + {{- if .Values.s3.domainName }} + -domainName={{ .Values.s3.domainName }} \ + {{- end }} + -filer={{ template "seaweedfs.name" . }}-filer:{{ .Values.filer.port }} + {{- if or (.Values.global.enableSecurity) (.Values.s3.extraVolumeMounts) }} + volumeMounts: + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }} + {{- end }} + ports: + - containerPort: {{ .Values.s3.port }} + name: swfs-s3 + readinessProbe: + httpGet: + path: / + port: {{ .Values.s3.port }} + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + livenessProbe: + httpGet: + path: / + port: {{ .Values.s3.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 60 + successThreshold: 1 + failureThreshold: 20 + {{- if .Values.s3.resources }} + resources: + {{ tpl .Values.s3.resources . | nindent 12 | trim }} + {{- end }} + volumes: + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.s3.extraVolumes . | indent 8 | trim }} + {{- if .Values.s3.nodeSelector }} + nodeSelector: + {{ tpl .Values.s3.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/k8s/seaweedfs/templates/s3-service.yaml b/k8s/seaweedfs/templates/s3-service.yaml new file mode 100644 index 000000000..b088e25fa --- /dev/null +++ b/k8s/seaweedfs/templates/s3-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: s3 +spec: + ports: + - name: "swfs-s3" + port: {{ .Values.s3.port }} + targetPort: {{ .Values.s3.port }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: s3 \ No newline at end of file diff --git a/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml b/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml new file mode 100644 index 000000000..c943ea50f --- /dev/null +++ b/k8s/seaweedfs/templates/seaweefs-grafana-dashboard.yaml @@ -0,0 +1,1352 @@ +{{- if .Values.global.monitoring.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: seaweefsfs-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + seaweedfs.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "Prometheus", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": 10423, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "refresh": "30s", + "rows": [ + { + "collapse": false, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 46, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 49, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 45, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 99th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(SeaweedFS_filer_request_total[1m]) * 5", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer QPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 252, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 47, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le, exported_instance))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{exported_instance}}`}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 40, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_volumeServer_request_total[1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_volumes) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{collection}}`}} {{`{{type}}`}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_max_volumes)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "id": 50, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{collection}}`}} {{`{{type}}`}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Collection and Type", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "id": 51, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (exported_instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{exported_instance}}`}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Host", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Volume Server", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_filerStore_request_total [1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{type}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Store", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 242, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 52, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_memstats_alloc_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "bytes allocated", + "refId": "B" + }, + { + "expr": "rate(go_memstats_alloc_bytes_total{exported_job=\"filer\"}[30s])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "alloc rate", + "refId": "A" + }, + { + "expr": "go_memstats_stack_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "stack inuse", + "refId": "C" + }, + { + "expr": "go_memstats_heap_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "heap inuse", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Memory Stats", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_gc_duration_seconds{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{quantile}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go GC duration quantiles", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 53, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{exported_instance}}`}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Routines", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Instances", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 250, + "panels": [], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "SeaweedFS", + "version": 3 + } +{{- end }} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml new file mode 100644 index 000000000..c6132c9ea --- /dev/null +++ b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: secret-seaweedfs-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/resource-policy": keep + "helm.sh/hook": "pre-install" +stringData: + user: "YourSWUser" + password: "HardCodedPassword" + # better to random generate and create in DB + # password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }} diff --git a/k8s/seaweedfs/templates/security-configmap.yaml b/k8s/seaweedfs/templates/security-configmap.yaml new file mode 100644 index 000000000..7d06614ec --- /dev/null +++ b/k8s/seaweedfs/templates/security-configmap.yaml @@ -0,0 +1,52 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "seaweedfs.name" . }}-security-config + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + security.toml: |- + # this file is read by master, volume server, and filer + + # the jwt signing key is read by master and volume server + # a jwt expires in 10 seconds + [jwt.signing] + key = "{{ randAlphaNum 10 | b64enc }}" + + # all grpc tls authentications are mutual + # the values for the following ca, cert, and key are paths to the PERM files. + [grpc] + ca = "/usr/local/share/ca-certificates/ca/tls.crt" + + [grpc.volume] + cert = "/usr/local/share/ca-certificates/volume/tls.crt" + key = "/usr/local/share/ca-certificates/volume/tls.key" + + [grpc.master] + cert = "/usr/local/share/ca-certificates/master/tls.crt" + key = "/usr/local/share/ca-certificates/master/tls.key" + + [grpc.filer] + cert = "/usr/local/share/ca-certificates/filer/tls.crt" + key = "/usr/local/share/ca-certificates/filer/tls.key" + + # use this for any place needs a grpc client + # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" + [grpc.client] + cert = "/usr/local/share/ca-certificates/client/tls.crt" + key = "/usr/local/share/ca-certificates/client/tls.key" + + # volume server https options + # Note: work in progress! + # this does not work with other clients, e.g., "weed filer|mount" etc, yet. + [https.client] + enabled = false + [https.volume] + cert = "" + key = "" +{{- end }} diff --git a/k8s/seaweedfs/templates/service-account.yaml b/k8s/seaweedfs/templates/service-account.yaml new file mode 100644 index 000000000..e82ef7d62 --- /dev/null +++ b/k8s/seaweedfs/templates/service-account.yaml @@ -0,0 +1,29 @@ +#hack for delete pod master after migration +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: seaweefds-rw-cr +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: seaweefds-rw-sa + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: system:serviceaccount:seaweefds-rw-sa:default +subjects: +- kind: ServiceAccount + name: seaweefds-rw-sa + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: seaweefds-rw-cr \ No newline at end of file diff --git a/k8s/seaweedfs/templates/volume-cert.yaml b/k8s/seaweedfs/templates/volume-cert.yaml new file mode 100644 index 000000000..72c62a0f5 --- /dev/null +++ b/k8s/seaweedfs/templates/volume-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-volume-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/volume-service.yaml b/k8s/seaweedfs/templates/volume-service.yaml new file mode 100644 index 000000000..fc7716681 --- /dev/null +++ b/k8s/seaweedfs/templates/volume-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: volume +spec: + clusterIP: None + ports: + - name: "swfs-volume" + port: {{ .Values.volume.port }} + targetPort: {{ .Values.volume.port }} + protocol: TCP + - name: "swfs-volume-18080" + port: {{ .Values.volume.grpcPort }} + targetPort: {{ .Values.volume.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: volume \ No newline at end of file diff --git a/k8s/seaweedfs/templates/volume-statefulset.yaml b/k8s/seaweedfs/templates/volume-statefulset.yaml new file mode 100644 index 000000000..6180bc7df --- /dev/null +++ b/k8s/seaweedfs/templates/volume-statefulset.yaml @@ -0,0 +1,184 @@ +{{- if .Values.volume.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-volume + replicas: {{ .Values.volume.replicas }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: volume + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: volume + spec: + {{- if .Values.volume.affinity }} + affinity: + {{ tpl .Values.volume.affinity . | nindent 8 | trim }} + {{- end }} + restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }} + {{- if .Values.volume.tolerations }} + tolerations: + {{ tpl .Values.volume.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.volume.priorityClassName }} + priorityClassName: {{ .Values.volume.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "volume.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.volume.loggingOverrideLevel }} + -v={{ .Values.volume.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + volume \ + -port={{ .Values.volume.port }} \ + -dir={{ .Values.volume.dir }} \ + -max={{ .Values.volume.maxVolumes }} \ + {{- if .Values.volume.rack }} + -rack={{ .Values.volume.rack }} \ + {{- end }} + {{- if .Values.volume.dataCenter }} + -dataCenter={{ .Values.volume.dataCenter }} \ + {{- end }} + -ip.bind={{ .Values.volume.ipBind }} \ + -read.redirect={{ .Values.volume.readRedirect }} \ + {{- if .Values.volume.whiteList }} + -whiteList={{ .Values.volume.whiteList }} \ + {{- end }} + -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \ + -compactionMBps={{ .Values.volume.compactionMBps }} \ + -mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name: seaweedfs-volume-storage + mountPath: "/data/" + - name: seaweedfs-volume-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.volume.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.volume.port }} + name: swfs-vol + - containerPort: {{ .Values.volume.grpcPort }} + #name: swfs-vol-grpc + readinessProbe: + httpGet: + path: /status + port: {{ .Values.volume.port }} + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + livenessProbe: + httpGet: + path: /status + port: {{ .Values.volume.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 10 + {{- if .Values.volume.resources }} + resources: + {{ tpl .Values.volume.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-volume-log-volume + hostPath: + path: /storage/logs/seaweedfs/volume + type: DirectoryOrCreate + - name: seaweedfs-volume-storage + hostPath: + path: /storage/object_store/ + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{- if .Values.volume.extraVolumes }} + {{ tpl .Values.volume.extraVolumes . | indent 8 | trim }} + {{- end }} + {{- if .Values.volume.nodeSelector }} + nodeSelector: + {{ tpl .Values.volume.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml new file mode 100644 index 000000000..0ccc5963b --- /dev/null +++ b/k8s/seaweedfs/values.yaml @@ -0,0 +1,306 @@ +# Available parameters and their default values for the SeaweedFS chart. + +global: + registry: "" + repository: "" + imageName: chrislusf/seaweedfs + imageTag: "1.84" + imagePullPolicy: IfNotPresent + imagePullSecrets: imagepullsecret + restartPolicy: Always + loggingLevel: 1 + enableSecurity: false + monitoring: + enabled: false + gatewayHost: null + gatewayPort: null + +image: + registry: "" + repository: "" + +master: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + replicas: 1 + port: 9333 + grpcPort: 19333 + ipBind: "0.0.0.0" + volumePreallocate: false + volumeSizeLimitMB: 30000 + loggingOverrideLevel: null + + # Disable http request, only gRpc operations are allowed + disableHttp: false + + extraVolumes: "" + extraVolumeMounts: "" + + # storage and storageClass are the settings for configuring stateful + # storage for the master pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 25Gi + storageClass: null + + # Resource requests, limits, etc. for the master cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # updatePartition is used to control a careful rolling update of SeaweedFS + # masters. + updatePartition: 0 + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: master + topologyKey: kubernetes.io/hostname + + # Toleration Settings for master pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for master pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to master pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + +volume: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + port: 8080 + grpcPort: 18080 + ipBind: "0.0.0.0" + replicas: 1 + loggingOverrideLevel: null + + # limit background compaction or copying speed in mega bytes per second + compactionMBps: "40" + + # Directories to store data files. dir[,dir]... (default "/tmp") + dir: "/data" + + # Maximum numbers of volumes, count[,count]... + # If set to zero on non-windows OS, the limit will be auto configured. (default "7") + maxVolumes: "0" + + # Volume server's rack name + rack: null + + # Volume server's data center name + dataCenter: null + + # Redirect moved or non-local volumes. (default true) + readRedirect: true + + # Comma separated Ip addresses having write permission. No limit if empty. + whiteList: null + + extraVolumes: "" + extraVolumeMounts: "" + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: volume + topologyKey: kubernetes.io/hostname + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-volume: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + +filer: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + replicas: 1 + port: 8888 + grpcPort: 18888 + loggingOverrideLevel: null + + # Limit sub dir listing size (default 100000) + dirListLimit: 100000 + + # Turn off directory listing + disableDirListing: false + + # Disable http request, only gRpc operations are allowed + disableHttp: false + + # storage and storageClass are the settings for configuring stateful + # storage for the master pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 25Gi + storageClass: null + + extraVolumes: "" + extraVolumeMounts: "" + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: filer + topologyKey: kubernetes.io/hostname + + # updatePartition is used to control a careful rolling update of SeaweedFS + # masters. + updatePartition: 0 + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + dbSchema: + imageName: db-schema + imageTag: "development" + imageOverride: "" + + # extraEnvVars is a list of extra enviroment variables to set with the stateful set. + extraEnvironmentVars: + WEED_MYSQL_ENABLED: "true" + WEED_MYSQL_HOSTNAME: "mysql-db-host" + WEED_MYSQL_PORT: "3306" + WEED_MYSQL_DATABASE: "sw_database" + WEED_MYSQL_CONNECTION_MAX_IDLE: "10" + WEED_MYSQL_CONNECTION_MAX_OPEN: "150" + # enable usage of memsql as filer backend + WEED_MYSQL_INTERPOLATEPARAMS: "true" + WEED_LEVELDB2_ENABLED: "false" + # with http DELETE, by default the filer would check whether a folder is empty. + # recursive_delete will delete all sub folders and files, similar to "rm -Rf" + WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false" + # directories under this folder will be automatically creating a separate bucket + WEED_FILER_BUCKETS_FOLDER: "/buckets" + # directories under this folder will be store message queue data + WEED_FILER_QUEUES_FOLDER: "/queues" + +s3: + enabled: true + repository: null + imageName: null + imageTag: null + restartPolicy: null + replicas: 1 + port: 8333 + loggingOverrideLevel: null + + # Suffix of the host name, {bucket}.{domainName} + domainName: "" + + extraVolumes: "" + extraVolumeMounts: "" + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + +certificates: + commonName: "SeaweedFS CA" + ipAddresses: [] + keyAlgorithm: rsa + keySize: 2048 + duration: 2160h # 90d + renewBefore: 360h # 15d diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 1ea39863f..05061e0f6 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -1,10 +1,11 @@ - + 4.0.0 com.github.chrislusf seaweedfs-client - 1.0.5 + 1.2.9 org.sonatype.oss @@ -13,12 +14,18 @@ - 3.5.1 - 1.16.1 - 26.0-jre + 3.9.1 + + 1.23.0 + 28.0-jre + + com.moandjiezana.toml + toml4j + 0.7.2 + com.google.protobuf @@ -74,7 +81,7 @@ kr.motd.maven os-maven-plugin - 1.5.0.Final + 1.6.2 @@ -82,18 +89,20 @@ org.apache.maven.plugins maven-compiler-plugin - 7 - 7 + 8 + 8 org.xolstice.maven.plugins protobuf-maven-plugin - 0.5.1 + 0.6.1 - com.google.protobuf:protoc:${protobuf.version}-1:exe:${os.detected.classifier} + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + grpc-java - io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + diff --git a/other/java/client/pom_debug.xml b/other/java/client/pom_debug.xml new file mode 100644 index 000000000..1d8454bf7 --- /dev/null +++ b/other/java/client/pom_debug.xml @@ -0,0 +1,139 @@ + + + 4.0.0 + + com.github.chrislusf + seaweedfs-client + 1.2.9 + + + org.sonatype.oss + oss-parent + 9 + + + + 3.9.1 + + 1.23.0 + 28.0-jre + + + + + com.moandjiezana.toml + toml4j + 0.7.2 + + + + com.google.protobuf + protobuf-java + ${protobuf.version} + + + com.google.guava + guava + ${guava.version} + + + io.grpc + grpc-netty-shaded + ${grpc.version} + + + io.grpc + grpc-protobuf + ${grpc.version} + + + io.grpc + grpc-stub + ${grpc.version} + + + org.slf4j + slf4j-api + 1.7.25 + + + org.apache.httpcomponents + httpmime + 4.5.6 + + + junit + junit + 4.12 + test + + + + + + + kr.motd.maven + os-maven-plugin + 1.6.2 + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 8 + 8 + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + + grpc-java + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + + + + + + compile + compile-custom + + + + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.9.1 + + + attach-javadocs + + jar + + + + + + + + diff --git a/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java b/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java new file mode 100644 index 000000000..e249d4524 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java @@ -0,0 +1,27 @@ +package seaweedfs.client; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; + +import java.util.concurrent.TimeUnit; + +public class ChunkCache { + + private final Cache cache; + + public ChunkCache(int maxEntries) { + this.cache = CacheBuilder.newBuilder() + .maximumSize(maxEntries) + .expireAfterAccess(1, TimeUnit.HOURS) + .build(); + } + + public byte[] getChunk(String fileId) { + return this.cache.getIfPresent(fileId); + } + + public void setChunk(String fileId, byte[] data) { + this.cache.put(fileId, data); + } + +} diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java index 63d0d8320..ef32c7e9a 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java @@ -7,13 +7,14 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; +import java.util.Iterator; import java.util.List; public class FilerClient { private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class); - private FilerGrpcClient filerGrpcClient; + private final FilerGrpcClient filerGrpcClient; public FilerClient(String host, int grpcPort) { filerGrpcClient = new FilerGrpcClient(host, grpcPort); @@ -34,13 +35,12 @@ public class FilerClient { public boolean mkdirs(String path, int mode, int uid, int gid, String userName, String[] groupNames) { - Path pathObject = Paths.get(path); - String parent = pathObject.getParent().toString(); - String name = pathObject.getFileName().toString(); - if ("/".equals(path)) { return true; } + Path pathObject = Paths.get(path); + String parent = pathObject.getParent().toString(); + String name = pathObject.getFileName().toString(); mkdirs(parent, mode, uid, gid, userName, groupNames); @@ -51,23 +51,38 @@ public class FilerClient { } return createEntry( - parent, - newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build() + parent, + newDirectoryEntry(name, mode, uid, gid, userName, groupNames).build() ); } - public boolean rm(String path, boolean isRecursive) { + public boolean mv(String oldPath, String newPath) { + + Path oldPathObject = Paths.get(oldPath); + String oldParent = oldPathObject.getParent().toString(); + String oldName = oldPathObject.getFileName().toString(); + + Path newPathObject = Paths.get(newPath); + String newParent = newPathObject.getParent().toString(); + String newName = newPathObject.getFileName().toString(); + + return atomicRenameEntry(oldParent, oldName, newParent, newName); + + } + + public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) { Path pathObject = Paths.get(path); String parent = pathObject.getParent().toString(); String name = pathObject.getFileName().toString(); return deleteEntry( - parent, - name, - true, - isRecursive); + parent, + name, + true, + isRecursive, + ignoreRecusiveError); } public boolean touch(String path, int mode) { @@ -84,18 +99,18 @@ public class FilerClient { FilerProto.Entry entry = lookupEntry(parent, name); if (entry == null) { return createEntry( - parent, - newFileEntry(name, mode, uid, gid, userName, groupNames).build() + parent, + newFileEntry(name, mode, uid, gid, userName, groupNames).build() ); } long now = System.currentTimeMillis() / 1000L; FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder() - .setMtime(now) - .setUid(uid) - .setGid(gid) - .setUserName(userName) - .clearGroupName() - .addAllGroupName(Arrays.asList(groupNames)); + .setMtime(now) + .setUid(uid) + .setGid(gid) + .setUserName(userName) + .clearGroupName() + .addAllGroupName(Arrays.asList(groupNames)); return updateEntry(parent, entry.toBuilder().setAttributes(attr).build()); } @@ -105,17 +120,17 @@ public class FilerClient { long now = System.currentTimeMillis() / 1000L; return FilerProto.Entry.newBuilder() - .setName(name) - .setIsDirectory(true) - .setAttributes(FilerProto.FuseAttributes.newBuilder() - .setMtime(now) - .setCrtime(now) - .setUid(uid) - .setGid(gid) - .setFileMode(mode | 1 << 31) - .setUserName(userName) - .clearGroupName() - .addAllGroupName(Arrays.asList(groupNames))); + .setName(name) + .setIsDirectory(true) + .setAttributes(FilerProto.FuseAttributes.newBuilder() + .setMtime(now) + .setCrtime(now) + .setUid(uid) + .setGid(gid) + .setFileMode(mode | 1 << 31) + .setUserName(userName) + .clearGroupName() + .addAllGroupName(Arrays.asList(groupNames))); } public FilerProto.Entry.Builder newFileEntry(String name, int mode, @@ -124,17 +139,17 @@ public class FilerClient { long now = System.currentTimeMillis() / 1000L; return FilerProto.Entry.newBuilder() - .setName(name) - .setIsDirectory(false) - .setAttributes(FilerProto.FuseAttributes.newBuilder() - .setMtime(now) - .setCrtime(now) - .setUid(uid) - .setGid(gid) - .setFileMode(mode) - .setUserName(userName) - .clearGroupName() - .addAllGroupName(Arrays.asList(groupNames))); + .setName(name) + .setIsDirectory(false) + .setAttributes(FilerProto.FuseAttributes.newBuilder() + .setMtime(now) + .setCrtime(now) + .setUid(uid) + .setGid(gid) + .setFileMode(mode) + .setUserName(userName) + .clearGroupName() + .addAllGroupName(Arrays.asList(groupNames))); } public List listEntries(String path) { @@ -159,22 +174,35 @@ public class FilerClient { } public List listEntries(String path, String entryPrefix, String lastEntryName, int limit) { - return filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() - .setDirectory(path) - .setPrefix(entryPrefix) - .setStartFromFileName(lastEntryName) - .setLimit(limit) - .build()).getEntriesList(); + Iterator iter = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() + .setDirectory(path) + .setPrefix(entryPrefix) + .setStartFromFileName(lastEntryName) + .setLimit(limit) + .build()); + List entries = new ArrayList<>(); + while (iter.hasNext()) { + FilerProto.ListEntriesResponse resp = iter.next(); + entries.add(fixEntryAfterReading(resp.getEntry())); + } + return entries; } public FilerProto.Entry lookupEntry(String directory, String entryName) { try { - return filerGrpcClient.getBlockingStub().lookupDirectoryEntry( - FilerProto.LookupDirectoryEntryRequest.newBuilder() - .setDirectory(directory) - .setName(entryName) - .build()).getEntry(); + FilerProto.Entry entry = filerGrpcClient.getBlockingStub().lookupDirectoryEntry( + FilerProto.LookupDirectoryEntryRequest.newBuilder() + .setDirectory(directory) + .setName(entryName) + .build()).getEntry(); + if (entry == null) { + return null; + } + return fixEntryAfterReading(entry); } catch (Exception e) { + if (e.getMessage().indexOf("filer: no entry is found in filer store") > 0) { + return null; + } LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e); return null; } @@ -184,9 +212,9 @@ public class FilerClient { public boolean createEntry(String parent, FilerProto.Entry entry) { try { filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder() - .setDirectory(parent) - .setEntry(entry) - .build()); + .setDirectory(parent) + .setEntry(entry) + .build()); } catch (Exception e) { LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e); return false; @@ -197,9 +225,9 @@ public class FilerClient { public boolean updateEntry(String parent, FilerProto.Entry entry) { try { filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder() - .setDirectory(parent) - .setEntry(entry) - .build()); + .setDirectory(parent) + .setEntry(entry) + .build()); } catch (Exception e) { LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e); return false; @@ -207,14 +235,15 @@ public class FilerClient { return true; } - public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive) { + public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive, boolean ignoreRecusiveError) { try { filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder() - .setDirectory(parent) - .setName(entryName) - .setIsDeleteData(isDeleteFileChunk) - .setIsRecursive(isRecursive) - .build()); + .setDirectory(parent) + .setName(entryName) + .setIsDeleteData(isDeleteFileChunk) + .setIsRecursive(isRecursive) + .setIgnoreRecursiveError(ignoreRecusiveError) + .build()); } catch (Exception e) { LOG.warn("deleteEntry {}/{}: {}", parent, entryName, e); return false; @@ -222,4 +251,39 @@ public class FilerClient { return true; } + public boolean atomicRenameEntry(String oldParent, String oldName, String newParent, String newName) { + try { + filerGrpcClient.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder() + .setOldDirectory(oldParent) + .setOldName(oldName) + .setNewDirectory(newParent) + .setNewName(newName) + .build()); + } catch (Exception e) { + LOG.warn("atomicRenameEntry {}/{} => {}/{}: {}", oldParent, oldName, newParent, newName, e); + return false; + } + return true; + } + + private FilerProto.Entry fixEntryAfterReading(FilerProto.Entry entry) { + if (entry.getChunksList().size() <= 0) { + return entry; + } + String fileId = entry.getChunks(0).getFileId(); + if (fileId != null && fileId.length() != 0) { + return entry; + } + FilerProto.Entry.Builder entryBuilder = entry.toBuilder(); + entryBuilder.clearChunks(); + for (FilerProto.FileChunk chunk : entry.getChunksList()) { + FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder(); + FilerProto.FileId fid = chunk.getFid(); + fileId = String.format("%d,%d%x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie()); + chunkBuilder.setFileId(fileId); + entryBuilder.addChunks(chunkBuilder); + } + return entryBuilder.build(); + } + } diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java index 16b7c3249..3f5d1e8e9 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java @@ -2,22 +2,55 @@ package seaweedfs.client; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; +import io.grpc.netty.shaded.io.grpc.netty.NegotiationType; +import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import javax.net.ssl.SSLException; import java.util.concurrent.TimeUnit; -import java.util.logging.Logger; public class FilerGrpcClient { - private static final Logger logger = Logger.getLogger(FilerGrpcClient.class.getName()); + private static final Logger logger = LoggerFactory.getLogger(FilerGrpcClient.class); + static SslContext sslContext; + + static { + try { + sslContext = FilerSslContext.loadSslContext(); + } catch (SSLException e) { + logger.warn("failed to load ssl context", e); + } + } private final ManagedChannel channel; private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub; private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub; private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub; - + private boolean cipher = false; + private String collection = ""; + private String replication = ""; public FilerGrpcClient(String host, int grpcPort) { - this(ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext()); + this(host, grpcPort, sslContext); + } + + public FilerGrpcClient(String host, int grpcPort, SslContext sslContext) { + + this(sslContext == null ? + ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext() : + NettyChannelBuilder.forAddress(host, grpcPort) + .negotiationType(NegotiationType.TLS) + .sslContext(sslContext)); + + FilerProto.GetFilerConfigurationResponse filerConfigurationResponse = + this.getBlockingStub().getFilerConfiguration( + FilerProto.GetFilerConfigurationRequest.newBuilder().build()); + cipher = filerConfigurationResponse.getCipher(); + collection = filerConfigurationResponse.getCollection(); + replication = filerConfigurationResponse.getReplication(); + } public FilerGrpcClient(ManagedChannelBuilder channelBuilder) { @@ -27,6 +60,18 @@ public class FilerGrpcClient { futureStub = SeaweedFilerGrpc.newFutureStub(channel); } + public boolean isCipher() { + return cipher; + } + + public String getCollection() { + return collection; + } + + public String getReplication() { + return replication; + } + public void shutdown() throws InterruptedException { channel.shutdown().awaitTermination(5, TimeUnit.SECONDS); } @@ -42,4 +87,5 @@ public class FilerGrpcClient { public SeaweedFilerGrpc.SeaweedFilerFutureStub getFutureStub() { return futureStub; } + } diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerSslContext.java b/other/java/client/src/main/java/seaweedfs/client/FilerSslContext.java new file mode 100644 index 000000000..5a88c1da3 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/FilerSslContext.java @@ -0,0 +1,64 @@ +package seaweedfs.client; + +import com.google.common.base.Strings; +import com.moandjiezana.toml.Toml; +import io.grpc.netty.shaded.io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext; +import io.grpc.netty.shaded.io.netty.handler.ssl.SslContextBuilder; +import io.grpc.netty.shaded.io.netty.handler.ssl.util.InsecureTrustManagerFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.SSLException; +import java.io.File; + +public class FilerSslContext { + + private static final Logger logger = LoggerFactory.getLogger(FilerSslContext.class); + + public static SslContext loadSslContext() throws SSLException { + String securityFileName = "security.toml"; + String home = System.getProperty("user.home"); + File f1 = new File("./"+securityFileName); + File f2 = new File(home + "/.seaweedfs/"+securityFileName); + File f3 = new File(home + "/etc/seaweedfs/"+securityFileName); + + File securityFile = f1.exists()? f1 : f2.exists() ? f2 : f3.exists()? f3 : null; + + if (securityFile==null){ + return null; + } + + Toml toml = new Toml().read(securityFile); + logger.debug("reading ssl setup from {}", securityFile); + + String trustCertCollectionFilePath = toml.getString("grpc.ca"); + logger.debug("loading ca from {}", trustCertCollectionFilePath); + String clientCertChainFilePath = toml.getString("grpc.client.cert"); + logger.debug("loading client ca from {}", clientCertChainFilePath); + String clientPrivateKeyFilePath = toml.getString("grpc.client.key"); + logger.debug("loading client key from {}", clientPrivateKeyFilePath); + + if (Strings.isNullOrEmpty(clientPrivateKeyFilePath) && Strings.isNullOrEmpty(clientPrivateKeyFilePath)){ + return null; + } + + // possibly fix the format https://netty.io/wiki/sslcontextbuilder-and-private-key.html + + return buildSslContext(trustCertCollectionFilePath, clientCertChainFilePath, clientPrivateKeyFilePath); + } + + + private static SslContext buildSslContext(String trustCertCollectionFilePath, + String clientCertChainFilePath, + String clientPrivateKeyFilePath) throws SSLException { + SslContextBuilder builder = GrpcSslContexts.forClient(); + if (trustCertCollectionFilePath != null) { + builder.trustManager(new File(trustCertCollectionFilePath)); + } + if (clientCertChainFilePath != null && clientPrivateKeyFilePath != null) { + builder.keyManager(new File(clientCertChainFilePath), new File(clientPrivateKeyFilePath)); + } + return builder.trustManager(InsecureTrustManagerFactory.INSTANCE).build(); + } +} diff --git a/other/java/client/src/main/java/seaweedfs/client/Gzip.java b/other/java/client/src/main/java/seaweedfs/client/Gzip.java new file mode 100644 index 000000000..248285dd3 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/Gzip.java @@ -0,0 +1,37 @@ +package seaweedfs.client; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +public class Gzip { + public static byte[] compress(byte[] data) throws IOException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(data.length); + GZIPOutputStream gzip = new GZIPOutputStream(bos); + gzip.write(data); + gzip.close(); + byte[] compressed = bos.toByteArray(); + bos.close(); + return compressed; + } + + public static byte[] decompress(byte[] compressed) throws IOException { + ByteArrayInputStream bis = new ByteArrayInputStream(compressed); + GZIPInputStream gis = new GZIPInputStream(bis); + return readAll(gis); + } + + private static byte[] readAll(InputStream input) throws IOException { + try( ByteArrayOutputStream output = new ByteArrayOutputStream()){ + byte[] buffer = new byte[4096]; + int n; + while (-1 != (n = input.read(buffer))) { + output.write(buffer, 0, n); + } + return output.toByteArray(); + } + } +} diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java new file mode 100644 index 000000000..8d0ebd755 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java @@ -0,0 +1,55 @@ +package seaweedfs.client; + +import javax.crypto.Cipher; +import javax.crypto.spec.GCMParameterSpec; +import javax.crypto.spec.SecretKeySpec; +import java.security.SecureRandom; + +public class SeaweedCipher { + // AES-GCM parameters + public static final int AES_KEY_SIZE = 256; // in bits + public static final int GCM_NONCE_LENGTH = 12; // in bytes + public static final int GCM_TAG_LENGTH = 16; // in bytes + + private static SecureRandom random = new SecureRandom(); + + public static byte[] genCipherKey() throws Exception { + byte[] key = new byte[AES_KEY_SIZE / 8]; + random.nextBytes(key); + return key; + } + + public static byte[] encrypt(byte[] clearTextbytes, byte[] cipherKey) throws Exception { + return encrypt(clearTextbytes, 0, clearTextbytes.length, cipherKey); + } + + public static byte[] encrypt(byte[] clearTextbytes, int offset, int length, byte[] cipherKey) throws Exception { + + final byte[] nonce = new byte[GCM_NONCE_LENGTH]; + random.nextBytes(nonce); + GCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, nonce); + SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES"); + + Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding"); + AES_cipherInstance.init(Cipher.ENCRYPT_MODE, keySpec, spec); + + byte[] encryptedText = AES_cipherInstance.doFinal(clearTextbytes, offset, length); + + byte[] iv = AES_cipherInstance.getIV(); + byte[] message = new byte[GCM_NONCE_LENGTH + clearTextbytes.length + GCM_TAG_LENGTH]; + System.arraycopy(iv, 0, message, 0, GCM_NONCE_LENGTH); + System.arraycopy(encryptedText, 0, message, GCM_NONCE_LENGTH, encryptedText.length); + + return message; + } + + public static byte[] decrypt(byte[] encryptedText, byte[] cipherKey) throws Exception { + final Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding"); + GCMParameterSpec params = new GCMParameterSpec(GCM_TAG_LENGTH * 8, encryptedText, 0, GCM_NONCE_LENGTH); + SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES"); + AES_cipherInstance.init(Cipher.DECRYPT_MODE, keySpec, params); + byte[] decryptedText = AES_cipherInstance.doFinal(encryptedText, GCM_NONCE_LENGTH, encryptedText.length - GCM_NONCE_LENGTH); + return decryptedText; + } + +} diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java index a906a689b..7be39da53 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java @@ -5,25 +5,25 @@ import org.apache.http.HttpHeaders; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; -import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.util.EntityUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.io.Closeable; import java.io.IOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Comparator; -import java.util.List; -import java.util.Map; +import java.util.*; public class SeaweedRead { - // private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class); + private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class); + + static ChunkCache chunkCache = new ChunkCache(1000); // returns bytesRead public static long read(FilerGrpcClient filerGrpcClient, List visibleIntervals, final long position, final byte[] buffer, final int bufferOffset, - final int bufferLength) { + final int bufferLength) throws IOException { List chunkViews = viewFromVisibles(visibleIntervals, position, bufferLength); @@ -34,7 +34,7 @@ public class SeaweedRead { } FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient - .getBlockingStub().lookupVolume(lookupRequest.build()); + .getBlockingStub().lookupVolume(lookupRequest.build()); Map vid2Locations = lookupResponse.getLocationsMapMap(); @@ -58,35 +58,64 @@ public class SeaweedRead { return readCount; } - private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) { - HttpClient client = HttpClientBuilder.create().build(); - HttpGet request = new HttpGet( - String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId)); + private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException { + + byte[] chunkData = chunkCache.getChunk(chunkView.fileId); - if (!chunkView.isFullChunk) { - request.setHeader(HttpHeaders.ACCEPT_ENCODING, ""); - request.setHeader(HttpHeaders.RANGE, - String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size)); + if (chunkData == null) { + chunkData = doFetchFullChunkData(chunkView, locations); } + int len = (int) chunkView.size; + LOG.debug("readChunkView fid:{} chunkData.length:{} chunkView.offset:{} buffer.length:{} startOffset:{} len:{}", + chunkView.fileId, chunkData.length, chunkView.offset, buffer.length, startOffset, len); + System.arraycopy(chunkData, (int) chunkView.offset, buffer, startOffset, len); + + chunkCache.setChunk(chunkView.fileId, chunkData); + + return len; + } + + private static byte[] doFetchFullChunkData(ChunkView chunkView, FilerProto.Locations locations) throws IOException { + + HttpClient client = new DefaultHttpClient(); + HttpGet request = new HttpGet( + String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId)); + + request.setHeader(HttpHeaders.ACCEPT_ENCODING, ""); + + byte[] data = null; + try { HttpResponse response = client.execute(request); HttpEntity entity = response.getEntity(); - int len = (int) (chunkView.logicOffset - position + chunkView.size); - OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len)); - entity.writeTo(outputStream); - // LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len); + data = EntityUtils.toByteArray(entity); - return len; + } finally { + if (client instanceof Closeable) { + Closeable t = (Closeable) client; + t.close(); + } + } - } catch (IOException e) { - e.printStackTrace(); + if (chunkView.isGzipped) { + data = Gzip.decompress(data); } - return 0; + + if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) { + try { + data = SeaweedCipher.decrypt(data, chunkView.cipherKey); + } catch (Exception e) { + throw new IOException("fail to decrypt", e); + } + } + + return data; + } - public static List viewFromVisibles(List visibleIntervals, long offset, long size) { + protected static List viewFromVisibles(List visibleIntervals, long offset, long size) { List views = new ArrayList<>(); long stop = offset + size; @@ -94,11 +123,13 @@ public class SeaweedRead { if (chunk.start <= offset && offset < chunk.stop && offset < stop) { boolean isFullChunk = chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop; views.add(new ChunkView( - chunk.fileId, - offset - chunk.start, - Math.min(chunk.stop, stop) - offset, - offset, - isFullChunk + chunk.fileId, + offset - chunk.start, + Math.min(chunk.stop, stop) - offset, + offset, + isFullChunk, + chunk.cipherKey, + chunk.isGzipped )); offset = Math.min(chunk.stop, stop); } @@ -128,11 +159,13 @@ public class SeaweedRead { List newVisibles, FilerProto.FileChunk chunk) { VisibleInterval newV = new VisibleInterval( - chunk.getOffset(), - chunk.getOffset() + chunk.getSize(), - chunk.getFileId(), - chunk.getMtime(), - true + chunk.getOffset(), + chunk.getOffset() + chunk.getSize(), + chunk.getFileId(), + chunk.getMtime(), + true, + chunk.getCipherKey().toByteArray(), + chunk.getIsGzipped() ); // easy cases to speed up @@ -148,21 +181,25 @@ public class SeaweedRead { for (VisibleInterval v : visibles) { if (v.start < chunk.getOffset() && chunk.getOffset() < v.stop) { newVisibles.add(new VisibleInterval( - v.start, - chunk.getOffset(), - v.fileId, - v.modifiedTime, - false + v.start, + chunk.getOffset(), + v.fileId, + v.modifiedTime, + false, + v.cipherKey, + v.isGzipped )); } long chunkStop = chunk.getOffset() + chunk.getSize(); if (v.start < chunkStop && chunkStop < v.stop) { newVisibles.add(new VisibleInterval( - chunkStop, - v.stop, - v.fileId, - v.modifiedTime, - false + chunkStop, + v.stop, + v.fileId, + v.modifiedTime, + false, + v.cipherKey, + v.isGzipped )); } if (chunkStop <= v.start || v.stop <= chunk.getOffset()) { @@ -209,24 +246,30 @@ public class SeaweedRead { public final long modifiedTime; public final String fileId; public final boolean isFullChunk; + public final byte[] cipherKey; + public final boolean isGzipped; - public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk) { + public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) { this.start = start; this.stop = stop; this.modifiedTime = modifiedTime; this.fileId = fileId; this.isFullChunk = isFullChunk; + this.cipherKey = cipherKey; + this.isGzipped = isGzipped; } @Override public String toString() { return "VisibleInterval{" + - "start=" + start + - ", stop=" + stop + - ", modifiedTime=" + modifiedTime + - ", fileId='" + fileId + '\'' + - ", isFullChunk=" + isFullChunk + - '}'; + "start=" + start + + ", stop=" + stop + + ", modifiedTime=" + modifiedTime + + ", fileId='" + fileId + '\'' + + ", isFullChunk=" + isFullChunk + + ", cipherKey=" + Arrays.toString(cipherKey) + + ", isGzipped=" + isGzipped + + '}'; } } @@ -236,24 +279,30 @@ public class SeaweedRead { public final long size; public final long logicOffset; public final boolean isFullChunk; + public final byte[] cipherKey; + public final boolean isGzipped; - public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk) { + public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk, byte[] cipherKey, boolean isGzipped) { this.fileId = fileId; this.offset = offset; this.size = size; this.logicOffset = logicOffset; this.isFullChunk = isFullChunk; + this.cipherKey = cipherKey; + this.isGzipped = isGzipped; } @Override public String toString() { return "ChunkView{" + - "fileId='" + fileId + '\'' + - ", offset=" + offset + - ", size=" + size + - ", logicOffset=" + logicOffset + - ", isFullChunk=" + isFullChunk + - '}'; + "fileId='" + fileId + '\'' + + ", offset=" + offset + + ", size=" + size + + ", logicOffset=" + logicOffset + + ", isFullChunk=" + isFullChunk + + ", cipherKey=" + Arrays.toString(cipherKey) + + ", isGzipped=" + isGzipped + + '}'; } } diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java index a7cede09f..18ec77b76 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java @@ -1,18 +1,23 @@ package seaweedfs.client; +import com.google.protobuf.ByteString; import org.apache.http.HttpResponse; +import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.mime.HttpMultipartMode; import org.apache.http.entity.mime.MultipartEntityBuilder; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.DefaultHttpClient; import java.io.ByteArrayInputStream; +import java.io.Closeable; import java.io.IOException; import java.io.InputStream; +import java.security.SecureRandom; public class SeaweedWrite { + private static SecureRandom random = new SecureRandom(); + public static void writeData(FilerProto.Entry.Builder entry, final String replication, final FilerGrpcClient filerGrpcClient, @@ -20,53 +25,83 @@ public class SeaweedWrite { final byte[] bytes, final long bytesOffset, final long bytesLength) throws IOException { FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume( - FilerProto.AssignVolumeRequest.newBuilder() - .setCollection("") - .setReplication(replication) - .setDataCenter("") - .setReplication("") - .setTtlSec(0) - .build()); + FilerProto.AssignVolumeRequest.newBuilder() + .setCollection(filerGrpcClient.getCollection()) + .setReplication(replication == null ? filerGrpcClient.getReplication() : replication) + .setDataCenter("") + .setTtlSec(0) + .build()); String fileId = response.getFileId(); String url = response.getUrl(); + String auth = response.getAuth(); String targetUrl = String.format("http://%s/%s", url, fileId); - String etag = multipartUpload(targetUrl, bytes, bytesOffset, bytesLength); + ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY; + byte[] cipherKey = null; + if (filerGrpcClient.isCipher()) { + cipherKey = genCipherKey(); + cipherKeyString = ByteString.copyFrom(cipherKey); + } + + String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey); + + synchronized (entry) { + entry.addChunks(FilerProto.FileChunk.newBuilder() + .setFileId(fileId) + .setOffset(offset) + .setSize(bytesLength) + .setMtime(System.currentTimeMillis() / 10000L) + .setETag(etag) + .setCipherKey(cipherKeyString) + ); + } - entry.addChunks(FilerProto.FileChunk.newBuilder() - .setFileId(fileId) - .setOffset(offset) - .setSize(bytesLength) - .setMtime(System.currentTimeMillis() / 10000L) - .setETag(etag) - ); + // cache fileId ~ bytes + SeaweedRead.chunkCache.setChunk(fileId, bytes); } public static void writeMeta(final FilerGrpcClient filerGrpcClient, final String parentDirectory, final FilerProto.Entry.Builder entry) { - filerGrpcClient.getBlockingStub().createEntry( - FilerProto.CreateEntryRequest.newBuilder() - .setDirectory(parentDirectory) - .setEntry(entry) - .build() - ); + synchronized (entry){ + filerGrpcClient.getBlockingStub().createEntry( + FilerProto.CreateEntryRequest.newBuilder() + .setDirectory(parentDirectory) + .setEntry(entry) + .build() + ); + } } private static String multipartUpload(String targetUrl, + String auth, final byte[] bytes, - final long bytesOffset, final long bytesLength) throws IOException { - - CloseableHttpClient client = HttpClientBuilder.create().setUserAgent("hdfs-client").build(); - - InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength); + final long bytesOffset, final long bytesLength, + byte[] cipherKey) throws IOException { + + HttpClient client = new DefaultHttpClient(); + + InputStream inputStream = null; + if (cipherKey == null || cipherKey.length == 0) { + inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength); + } else { + try { + byte[] encryptedBytes = SeaweedCipher.encrypt(bytes, (int) bytesOffset, (int) bytesLength, cipherKey); + inputStream = new ByteArrayInputStream(encryptedBytes, 0, encryptedBytes.length); + } catch (Exception e) { + throw new IOException("fail to encrypt data", e); + } + } HttpPost post = new HttpPost(targetUrl); + if (auth != null && auth.length() != 0) { + post.addHeader("Authorization", "BEARER " + auth); + } post.setEntity(MultipartEntityBuilder.create() - .setMode(HttpMultipartMode.BROWSER_COMPATIBLE) - .addBinaryBody("upload", inputStream) - .build()); + .setMode(HttpMultipartMode.BROWSER_COMPATIBLE) + .addBinaryBody("upload", inputStream) + .build()); try { HttpResponse response = client.execute(post); @@ -79,8 +114,17 @@ public class SeaweedWrite { return etag; } finally { - client.close(); + if (client instanceof Closeable) { + Closeable t = (Closeable) client; + t.close(); + } } } + + private static byte[] genCipherKey() { + byte[] b = new byte[32]; + random.nextBytes(b); + return b; + } } diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index 6cd4df6b4..37121f29c 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package filer_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "FilerProto"; @@ -12,7 +13,7 @@ service SeaweedFiler { rpc LookupDirectoryEntry (LookupDirectoryEntryRequest) returns (LookupDirectoryEntryResponse) { } - rpc ListEntries (ListEntriesRequest) returns (ListEntriesResponse) { + rpc ListEntries (ListEntriesRequest) returns (stream ListEntriesResponse) { } rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) { @@ -21,9 +22,15 @@ service SeaweedFiler { rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) { } + rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) { + } + rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } + rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { + } + rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { } @@ -36,6 +43,21 @@ service SeaweedFiler { rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { } + rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { + } + + rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { + } + + rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { + } + + rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) { + } + + rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) { + } + } ////////////////////////////////////////////////// @@ -58,7 +80,7 @@ message ListEntriesRequest { } message ListEntriesResponse { - repeated Entry entries = 1; + Entry entry = 1; } message Entry { @@ -69,19 +91,36 @@ message Entry { map extended = 5; } +message FullEntry { + string dir = 1; + Entry entry = 2; +} + message EventNotification { Entry old_entry = 1; Entry new_entry = 2; bool delete_chunks = 3; + string new_parent_path = 4; + bool is_from_other_cluster = 5; } message FileChunk { - string file_id = 1; + string file_id = 1; // to be deprecated int64 offset = 2; uint64 size = 3; int64 mtime = 4; string e_tag = 5; - string source_file_id = 6; + string source_file_id = 6; // to be deprecated + FileId fid = 7; + FileId source_fid = 8; + bytes cipher_key = 9; + bool is_compressed = 10; +} + +message FileId { + uint32 volume_id = 1; + uint64 file_key = 2; + fixed32 cookie = 3; } message FuseAttributes { @@ -98,32 +137,58 @@ message FuseAttributes { string user_name = 11; // for hdfs repeated string group_name = 12; // for hdfs string symlink_target = 13; + bytes md5 = 14; } message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; + bool is_from_other_cluster = 4; } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { string directory = 1; Entry entry = 2; + bool is_from_other_cluster = 3; } message UpdateEntryResponse { } +message AppendToEntryRequest { + string directory = 1; + string entry_name = 2; + repeated FileChunk chunks = 3; +} +message AppendToEntryResponse { +} + message DeleteEntryRequest { string directory = 1; string name = 2; // bool is_directory = 3; bool is_delete_data = 4; bool is_recursive = 5; + bool ignore_recursive_error = 6; + bool is_from_other_cluster = 7; } message DeleteEntryResponse { + string error = 1; +} + +message AtomicRenameEntryRequest { + string old_directory = 1; + string old_name = 2; + string new_directory = 3; + string new_name = 4; +} + +message AtomicRenameEntryResponse { } message AssignVolumeRequest { @@ -132,6 +197,7 @@ message AssignVolumeRequest { string replication = 3; int32 ttl_sec = 4; string data_center = 5; + string parent_path = 6; } message AssignVolumeResponse { @@ -139,6 +205,10 @@ message AssignVolumeResponse { string url = 2; string public_url = 3; int32 count = 4; + string auth = 5; + string collection = 6; + string replication = 7; + string error = 8; } message LookupVolumeRequest { @@ -177,3 +247,53 @@ message StatisticsResponse { uint64 used_size = 5; uint64 file_count = 6; } + +message GetFilerConfigurationRequest { +} +message GetFilerConfigurationResponse { + repeated string masters = 1; + string replication = 2; + string collection = 3; + uint32 max_mb = 4; + string dir_buckets = 5; + bool cipher = 7; +} + +message SubscribeMetadataRequest { + string client_name = 1; + string path_prefix = 2; + int64 since_ns = 3; +} +message SubscribeMetadataResponse { + string directory = 1; + EventNotification event_notification = 2; + int64 ts_ns = 3; +} + +message LogEntry { + int64 ts_ns = 1; + int32 partition_key_hash = 2; + bytes data = 3; +} + +message KeepConnectedRequest { + string name = 1; + uint32 grpc_port = 2; + repeated string resources = 3; +} +message KeepConnectedResponse { +} + +message LocateBrokerRequest { + string resource = 1; +} +message LocateBrokerResponse { + bool found = 1; + // if found, send the exact address + // if not found, send the full list of existing brokers + message Resource { + string grpc_addresses = 1; + int32 resource_count = 2; + } + repeated Resource resources = 2; +} diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java new file mode 100644 index 000000000..7b5e53e19 --- /dev/null +++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java @@ -0,0 +1,42 @@ +package seaweedfs.client; + +import org.junit.Test; + +import java.util.Base64; + +import static seaweedfs.client.SeaweedCipher.decrypt; +import static seaweedfs.client.SeaweedCipher.encrypt; + +public class SeaweedCipherTest { + + @Test + public void testSameAsGoImplemnetation() throws Exception { + byte[] secretKey = "256-bit key for AES 256 GCM encr".getBytes(); + + String plainText = "Now we need to generate a 256-bit key for AES 256 GCM"; + + System.out.println("Original Text : " + plainText); + + byte[] cipherText = encrypt(plainText.getBytes(), secretKey); + System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText)); + + byte[] decryptedText = decrypt(cipherText, secretKey); + System.out.println("DeCrypted Text : " + new String(decryptedText)); + } + + @Test + public void testEncryptDecrypt() throws Exception { + byte[] secretKey = SeaweedCipher.genCipherKey(); + + String plainText = "Now we need to generate a 256-bit key for AES 256 GCM"; + + System.out.println("Original Text : " + plainText); + + byte[] cipherText = encrypt(plainText.getBytes(), secretKey); + System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText)); + + byte[] decryptedText = decrypt(cipherText, secretKey); + System.out.println("DeCrypted Text : " + new String(decryptedText)); + } + +} diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java new file mode 100644 index 000000000..eaf17e5c6 --- /dev/null +++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java @@ -0,0 +1,23 @@ +package seaweedfs.client; + +import java.util.List; + +public class SeaweedFilerTest { + public static void main(String[] args){ + + FilerClient filerClient = new FilerClient("localhost", 18888); + + List entries = filerClient.listEntries("/"); + + for (FilerProto.Entry entry : entries) { + System.out.println(entry.toString()); + } + + filerClient.mkdirs("/new_folder", 0755); + filerClient.touch("/new_folder/new_empty_file", 0755); + filerClient.touch("/new_folder/new_empty_file2", 0755); + filerClient.rm("/new_folder/new_empty_file", false, true); + filerClient.rm("/new_folder", true, true); + + } +} diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml new file mode 100644 index 000000000..53fb62186 --- /dev/null +++ b/other/java/hdfs2/dependency-reduced-pom.xml @@ -0,0 +1,133 @@ + + + + oss-parent + org.sonatype.oss + 9 + ../pom.xml/pom.xml + + 4.0.0 + com.github.chrislusf + seaweedfs-hadoop2-client + ${seaweedfs.client.version} + + + + maven-compiler-plugin + + 7 + 7 + + + + maven-shade-plugin + 3.2.1 + + + package + + shade + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + org/slf4j/** + META-INF/maven/org.slf4j/** + + + + + + + + + com.google + shaded.com.google + + + io.grpc.internal + shaded.io.grpc.internal + + + org.apache.commons + shaded.org.apache.commons + + org.apache.hadoop + org.apache.log4j + + + + org.apache.http + shaded.org.apache.http + + + + + + + + maven-gpg-plugin + 1.5 + + + sign-artifacts + verify + + sign + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.7 + true + + ossrh + https://oss.sonatype.org/ + true + + + + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + maven-javadoc-plugin + 2.9.1 + + + attach-javadocs + + jar + + + + + + + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + + + 1.2.9 + 2.9.2 + + diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml new file mode 100644 index 000000000..0d5b138d5 --- /dev/null +++ b/other/java/hdfs2/pom.xml @@ -0,0 +1,163 @@ + + + 4.0.0 + + + 1.2.9 + 2.9.2 + + + com.github.chrislusf + seaweedfs-hadoop2-client + ${seaweedfs.client.version} + + + org.sonatype.oss + oss-parent + 9 + + + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 7 + 7 + + + + org.apache.maven.plugins + maven-shade-plugin + 3.2.1 + + + package + + shade + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + org/slf4j/** + META-INF/maven/org.slf4j/** + + + + + + + + + com.google + shaded.com.google + + + io.grpc.internal + shaded.io.grpc.internal + + + org.apache.commons + shaded.org.apache.commons + + org.apache.hadoop + org.apache.log4j + + + + org.apache.http + shaded.org.apache.http + + + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.5 + + + sign-artifacts + verify + + sign + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.7 + true + + ossrh + https://oss.sonatype.org/ + true + + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.9.1 + + + attach-javadocs + + jar + + + + + + + + + + org.apache.hadoop + hadoop-client + ${hadoop.version} + + + com.github.chrislusf + seaweedfs-client + ${seaweedfs.client.version} + + + org.apache.hadoop + hadoop-common + ${hadoop.version} + + + + diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/ReadBuffer.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBuffer.java similarity index 100% rename from other/java/hdfs/src/main/java/seaweed/hdfs/ReadBuffer.java rename to other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBuffer.java diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferManager.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferManager.java similarity index 100% rename from other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferManager.java rename to other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferManager.java diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferStatus.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferStatus.java similarity index 100% rename from other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferStatus.java rename to other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferStatus.java diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferWorker.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferWorker.java similarity index 100% rename from other/java/hdfs/src/main/java/seaweed/hdfs/ReadBufferWorker.java rename to other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferWorker.java diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java similarity index 73% rename from other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java rename to other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java index 2a0ef78af..d471d8440 100644 --- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystem.java +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java @@ -1,14 +1,7 @@ package seaweed.hdfs; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.ParentNotDirectoryException; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.UnsupportedFileSystemException; -import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -73,6 +66,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { this.uri = uri; seaweedFileSystemStore = new SeaweedFileSystemStore(host, port); + } @Override @@ -86,6 +80,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize); return new FSDataInputStream(inputStream); } catch (Exception ex) { + LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex); return null; } } @@ -103,10 +98,36 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement); return new FSDataOutputStream(outputStream, statistics); } catch (Exception ex) { + LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex); return null; } } + /** + * {@inheritDoc} + * @throws FileNotFoundException if the parent directory is not present -or + * is not a directory. + */ + @Override + public FSDataOutputStream createNonRecursive(Path path, + FsPermission permission, + EnumSet flags, + int bufferSize, + short replication, + long blockSize, + Progressable progress) throws IOException { + Path parent = path.getParent(); + if (parent != null) { + // expect this to raise an exception if there is no parent + if (!getFileStatus(parent).isDirectory()) { + throw new FileAlreadyExistsException("Not a directory: " + parent); + } + } + return create(path, permission, + flags.contains(CreateFlag.OVERWRITE), bufferSize, + replication, blockSize, progress); + } + @Override public FSDataOutputStream append(Path path, int bufferSize, Progressable progressable) throws IOException { @@ -117,6 +138,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, ""); return new FSDataOutputStream(outputStream, statistics); } catch (Exception ex) { + LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex); return null; } } @@ -206,8 +228,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); return seaweedFileSystemStore.createDirectory(path, currentUser, - fsPermission == null ? FsPermission.getDirDefault() : fsPermission, - FsPermission.getUMask(getConf())); + fsPermission == null ? FsPermission.getDirDefault() : fsPermission, + FsPermission.getUMask(getConf())); } @@ -238,7 +260,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { */ @Override public void setOwner(Path path, final String owner, final String group) - throws IOException { + throws IOException { LOG.debug("setOwner path: {}", path); path = qualify(path); @@ -271,54 +293,55 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { /** * Concat existing files together. - * @param trg the path to the target destination. + * + * @param trg the path to the target destination. * @param psrcs the paths to the sources to use for the concatenation. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default). + * (default). */ @Override - public void concat(final Path trg, final Path [] psrcs) throws IOException { + public void concat(final Path trg, final Path[] psrcs) throws IOException { throw new UnsupportedOperationException("Not implemented by the " + - getClass().getSimpleName() + " FileSystem implementation"); + getClass().getSimpleName() + " FileSystem implementation"); } /** * Truncate the file in the indicated path to the indicated size. *
    - *
  • Fails if path is a directory.
  • - *
  • Fails if path does not exist.
  • - *
  • Fails if path is not closed.
  • - *
  • Fails if new size is greater than current size.
  • + *
  • Fails if path is a directory.
  • + *
  • Fails if path does not exist.
  • + *
  • Fails if path is not closed.
  • + *
  • Fails if new size is greater than current size.
  • *
- * @param f The path to the file to be truncated - * @param newLength The size the file is to be truncated to * + * @param f The path to the file to be truncated + * @param newLength The size the file is to be truncated to * @return true if the file has been truncated to the desired * newLength and is immediately available to be reused for * write operations such as append, or * false if a background process of adjusting the length of * the last block has been started, and clients should wait for it to * complete before proceeding with further file updates. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default). + * (default). */ @Override public boolean truncate(Path f, long newLength) throws IOException { throw new UnsupportedOperationException("Not implemented by the " + - getClass().getSimpleName() + " FileSystem implementation"); + getClass().getSimpleName() + " FileSystem implementation"); } @Override public void createSymlink(final Path target, final Path link, final boolean createParent) throws AccessControlException, - FileAlreadyExistsException, FileNotFoundException, - ParentNotDirectoryException, UnsupportedFileSystemException, - IOException { + FileAlreadyExistsException, FileNotFoundException, + ParentNotDirectoryException, UnsupportedFileSystemException, + IOException { // Supporting filesystems should override this method throw new UnsupportedOperationException( - "Filesystem does not support symlinks!"); + "Filesystem does not support symlinks!"); } public boolean supportsSymlinks() { @@ -327,48 +350,51 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { /** * Create a snapshot. - * @param path The directory where snapshots will be taken. + * + * @param path The directory where snapshots will be taken. * @param snapshotName The name of the snapshot * @return the snapshot path. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported */ @Override public Path createSnapshot(Path path, String snapshotName) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support createSnapshot"); + + " doesn't support createSnapshot"); } /** * Rename a snapshot. - * @param path The directory path where the snapshot was taken + * + * @param path The directory path where the snapshot was taken * @param snapshotOldName Old name of the snapshot * @param snapshotNewName New name of the snapshot - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support renameSnapshot"); + + " doesn't support renameSnapshot"); } /** * Delete a snapshot of a directory. - * @param path The directory that the to-be-deleted snapshot belongs to + * + * @param path The directory that the to-be-deleted snapshot belongs to * @param snapshotName The name of the snapshot - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void deleteSnapshot(Path path, String snapshotName) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support deleteSnapshot"); + + " doesn't support deleteSnapshot"); } /** @@ -377,49 +403,49 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * ACL entries that are not specified in this call are retained without * changes. (Modifications are merged into the current ACL.) * - * @param path Path to modify + * @param path Path to modify * @param aclSpec List<AclEntry> describing modifications - * @throws IOException if an ACL could not be modified + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void modifyAclEntries(Path path, List aclSpec) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support modifyAclEntries"); + + " doesn't support modifyAclEntries"); } /** * Removes ACL entries from files and directories. Other ACL entries are * retained. * - * @param path Path to modify + * @param path Path to modify * @param aclSpec List describing entries to remove - * @throws IOException if an ACL could not be modified + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeAclEntries(Path path, List aclSpec) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeAclEntries"); + + " doesn't support removeAclEntries"); } /** * Removes all default ACL entries from files and directories. * * @param path Path to modify - * @throws IOException if an ACL could not be modified + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeDefaultAcl(Path path) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeDefaultAcl"); + + " doesn't support removeDefaultAcl"); } /** @@ -428,32 +454,32 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * bits. * * @param path Path to modify - * @throws IOException if an ACL could not be removed + * @throws IOException if an ACL could not be removed * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeAcl(Path path) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeAcl"); + + " doesn't support removeAcl"); } /** * Fully replaces ACL of files and directories, discarding all existing * entries. * - * @param path Path to modify + * @param path Path to modify * @param aclSpec List describing modifications, which must include entries - * for user, group, and others for compatibility with permission bits. - * @throws IOException if an ACL could not be modified + * for user, group, and others for compatibility with permission bits. + * @throws IOException if an ACL could not be modified * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void setAcl(Path path, List aclSpec) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support setAcl"); + + " doesn't support setAcl"); } /** @@ -461,14 +487,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to get * @return AclStatus describing the ACL of the file or directory - * @throws IOException if an ACL could not be read + * @throws IOException if an ACL could not be read * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public AclStatus getAclStatus(Path path) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getAclStatus"); + + " doesn't support getAclStatus"); } /** @@ -478,19 +504,19 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { *

* Refer to the HDFS extended attributes user documentation for details. * - * @param path Path to modify - * @param name xattr name. + * @param path Path to modify + * @param name xattr name. * @param value xattr value. - * @param flag xattr set flag - * @throws IOException IO failure + * @param flag xattr set flag + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void setXAttr(Path path, String name, byte[] value, EnumSet flag) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support setXAttr"); + + " doesn't support setXAttr"); } /** @@ -503,14 +529,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * @param path Path to get extended attribute * @param name xattr name. * @return byte[] xattr value. - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public byte[] getXAttr(Path path, String name) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getXAttr"); + + " doesn't support getXAttr"); } /** @@ -522,14 +548,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to get extended attributes * @return Map describing the XAttrs of the file or directory - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public Map getXAttrs(Path path) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getXAttrs"); + + " doesn't support getXAttrs"); } /** @@ -539,18 +565,18 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { *

* Refer to the HDFS extended attributes user documentation for details. * - * @param path Path to get extended attributes + * @param path Path to get extended attributes * @param names XAttr names. * @return Map describing the XAttrs of the file or directory - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public Map getXAttrs(Path path, List names) - throws IOException { + throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support getXAttrs"); + + " doesn't support getXAttrs"); } /** @@ -562,14 +588,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to get extended attributes * @return List{@literal } of the XAttr names of the file or directory - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public List listXAttrs(Path path) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support listXAttrs"); + + " doesn't support listXAttrs"); } /** @@ -581,14 +607,14 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { * * @param path Path to remove extended attribute * @param name xattr name - * @throws IOException IO failure + * @throws IOException IO failure * @throws UnsupportedOperationException if the operation is unsupported - * (default outcome). + * (default outcome). */ @Override public void removeXAttr(Path path, String name) throws IOException { throw new UnsupportedOperationException(getClass().getSimpleName() - + " doesn't support removeXAttr"); + + " doesn't support removeXAttr"); } } diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java similarity index 86% rename from other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java rename to other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java index 27678e615..9617a38be 100644 --- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java @@ -64,6 +64,16 @@ public class SeaweedFileSystemStore { public FileStatus[] listEntries(final Path path) { LOG.debug("listEntries path: {}", path); + FileStatus pathStatus = getFileStatus(path); + + if (pathStatus == null) { + return new FileStatus[0]; + } + + if (!pathStatus.isDirectory()) { + return new FileStatus[]{pathStatus}; + } + List fileStatuses = new ArrayList(); List entries = filerClient.listEntries(path.toUri().getPath()); @@ -74,7 +84,9 @@ public class SeaweedFileSystemStore { fileStatuses.add(fileStatus); } + LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size()); return fileStatuses.toArray(new FileStatus[0]); + } public FileStatus getFileStatus(final Path path) { @@ -106,7 +118,7 @@ public class SeaweedFileSystemStore { } } - return filerClient.deleteEntry(getParentDirectory(path), path.getName(), true, recursive); + return filerClient.deleteEntry(getParentDirectory(path), path.getName(), true, recursive, true); } private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) { @@ -137,41 +149,13 @@ public class SeaweedFileSystemStore { if (source.isRoot()) { return; } - LOG.warn("rename lookupEntry source: {}", source); + LOG.info("rename source: {} destination:{}", source, destination); FilerProto.Entry entry = lookupEntry(source); if (entry == null) { LOG.warn("rename non-existing source: {}", source); return; } - LOG.warn("rename moveEntry source: {}", source); - moveEntry(source.getParent(), entry, destination); - } - - private boolean moveEntry(Path oldParent, FilerProto.Entry entry, Path destination) { - - LOG.debug("moveEntry: {}/{} => {}", oldParent, entry.getName(), destination); - - FilerProto.Entry.Builder newEntry = entry.toBuilder().setName(destination.getName()); - boolean isDirectoryCreated = filerClient.createEntry(getParentDirectory(destination), newEntry.build()); - - if (!isDirectoryCreated) { - return false; - } - - if (entry.getIsDirectory()) { - Path entryPath = new Path(oldParent, entry.getName()); - List entries = filerClient.listEntries(entryPath.toUri().getPath()); - for (FilerProto.Entry ent : entries) { - boolean isSucess = moveEntry(entryPath, ent, new Path(destination, ent.getName())); - if (!isSucess) { - return false; - } - } - } - - return filerClient.deleteEntry( - oldParent.toUri().getPath(), entry.getName(), false, false); - + filerClient.mv(source.toUri().getPath(), destination.toUri().getPath()); } public OutputStream createFile(final Path path, @@ -199,10 +183,10 @@ public class SeaweedFileSystemStore { entry = FilerProto.Entry.newBuilder(); entry.mergeFrom(existingEntry); entry.getAttributesBuilder().setMtime(now); + LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry); + writePosition = SeaweedRead.totalSize(existingEntry.getChunksList()); + replication = existingEntry.getAttributes().getReplication(); } - LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry); - writePosition = SeaweedRead.totalSize(existingEntry.getChunksList()); - replication = existingEntry.getAttributes().getReplication(); } if (entry == null) { entry = FilerProto.Entry.newBuilder() @@ -294,4 +278,5 @@ public class SeaweedFileSystemStore { filerClient.updateEntry(getParentDirectory(path), entryBuilder.build()); } + } diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedInputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedInputStream.java similarity index 100% rename from other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedInputStream.java rename to other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedInputStream.java diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java new file mode 100644 index 000000000..e08843caa --- /dev/null +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java @@ -0,0 +1,280 @@ +package seaweed.hdfs; + +// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream + +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.FSExceptionMessages; +import org.apache.hadoop.fs.Path; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import seaweedfs.client.FilerGrpcClient; +import seaweedfs.client.FilerProto; +import seaweedfs.client.SeaweedWrite; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.io.OutputStream; +import java.util.concurrent.*; + +import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory; + +public class SeaweedOutputStream extends OutputStream { + + private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class); + + private final FilerGrpcClient filerGrpcClient; + private final Path path; + private final int bufferSize; + private final int maxConcurrentRequestCount; + private final ThreadPoolExecutor threadExecutor; + private final ExecutorCompletionService completionService; + private FilerProto.Entry.Builder entry; + private long position; + private boolean closed; + private boolean supportFlush = true; + private volatile IOException lastError; + private long lastFlushOffset; + private long lastTotalAppendOffset = 0; + private byte[] buffer; + private int bufferIndex; + private ConcurrentLinkedDeque writeOperations; + private String replication = "000"; + + public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry, + final long position, final int bufferSize, final String replication) { + this.filerGrpcClient = filerGrpcClient; + this.replication = replication; + this.path = path; + this.position = position; + this.closed = false; + this.lastError = null; + this.lastFlushOffset = 0; + this.bufferSize = bufferSize; + this.buffer = new byte[bufferSize]; + this.bufferIndex = 0; + this.writeOperations = new ConcurrentLinkedDeque<>(); + + this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors(); + + this.threadExecutor + = new ThreadPoolExecutor(maxConcurrentRequestCount, + maxConcurrentRequestCount, + 10L, + TimeUnit.SECONDS, + new LinkedBlockingQueue()); + this.completionService = new ExecutorCompletionService<>(this.threadExecutor); + + this.entry = entry; + + } + + private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException { + try { + SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry); + } catch (Exception ex) { + throw new IOException(ex); + } + this.lastFlushOffset = offset; + } + + @Override + public void write(final int byteVal) throws IOException { + write(new byte[]{(byte) (byteVal & 0xFF)}); + } + + @Override + public synchronized void write(final byte[] data, final int off, final int length) + throws IOException { + maybeThrowLastError(); + + Preconditions.checkArgument(data != null, "null data"); + + if (off < 0 || length < 0 || length > data.length - off) { + throw new IndexOutOfBoundsException(); + } + + int currentOffset = off; + int writableBytes = bufferSize - bufferIndex; + int numberOfBytesToWrite = length; + + while (numberOfBytesToWrite > 0) { + if (writableBytes <= numberOfBytesToWrite) { + System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes); + bufferIndex += writableBytes; + writeCurrentBufferToService(); + currentOffset += writableBytes; + numberOfBytesToWrite = numberOfBytesToWrite - writableBytes; + } else { + System.arraycopy(data, currentOffset, buffer, bufferIndex, numberOfBytesToWrite); + bufferIndex += numberOfBytesToWrite; + numberOfBytesToWrite = 0; + } + + writableBytes = bufferSize - bufferIndex; + } + } + + /** + * Flushes this output stream and forces any buffered output bytes to be + * written out. If any data remains in the payload it is committed to the + * service. Data is queued for writing and forced out to the service + * before the call returns. + */ + @Override + public void flush() throws IOException { + if (supportFlush) { + flushInternalAsync(); + } + } + + /** + * Force all data in the output stream to be written to Azure storage. + * Wait to return until this is complete. Close the access to the stream and + * shutdown the upload thread pool. + * If the blob was created, its lease will be released. + * Any error encountered caught in threads and stored will be rethrown here + * after cleanup. + */ + @Override + public synchronized void close() throws IOException { + if (closed) { + return; + } + + LOG.debug("close path: {}", path); + try { + flushInternal(); + threadExecutor.shutdown(); + } finally { + lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED); + buffer = null; + bufferIndex = 0; + closed = true; + writeOperations.clear(); + if (!threadExecutor.isShutdown()) { + threadExecutor.shutdownNow(); + } + } + } + + private synchronized void writeCurrentBufferToService() throws IOException { + if (bufferIndex == 0) { + return; + } + + final byte[] bytes = buffer; + final int bytesLength = bufferIndex; + + buffer = new byte[bufferSize]; + bufferIndex = 0; + final long offset = position; + position += bytesLength; + + if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) { + waitForTaskToComplete(); + } + + final Future job = completionService.submit(new Callable() { + @Override + public Void call() throws Exception { + // originally: client.append(path, offset, bytes, 0, bytesLength); + SeaweedWrite.writeData(entry, replication, filerGrpcClient, offset, bytes, 0, bytesLength); + return null; + } + }); + + writeOperations.add(new WriteOperation(job, offset, bytesLength)); + + // Try to shrink the queue + shrinkWriteOperationQueue(); + } + + private void waitForTaskToComplete() throws IOException { + boolean completed; + for (completed = false; completionService.poll() != null; completed = true) { + // keep polling until there is no data + } + + if (!completed) { + try { + completionService.take(); + } catch (InterruptedException e) { + lastError = (IOException) new InterruptedIOException(e.toString()).initCause(e); + throw lastError; + } + } + } + + private void maybeThrowLastError() throws IOException { + if (lastError != null) { + throw lastError; + } + } + + /** + * Try to remove the completed write operations from the beginning of write + * operation FIFO queue. + */ + private synchronized void shrinkWriteOperationQueue() throws IOException { + try { + while (writeOperations.peek() != null && writeOperations.peek().task.isDone()) { + writeOperations.peek().task.get(); + lastTotalAppendOffset += writeOperations.peek().length; + writeOperations.remove(); + } + } catch (Exception e) { + lastError = new IOException(e); + throw lastError; + } + } + + private synchronized void flushInternal() throws IOException { + maybeThrowLastError(); + writeCurrentBufferToService(); + flushWrittenBytesToService(); + } + + private synchronized void flushInternalAsync() throws IOException { + maybeThrowLastError(); + writeCurrentBufferToService(); + flushWrittenBytesToServiceAsync(); + } + + private synchronized void flushWrittenBytesToService() throws IOException { + for (WriteOperation writeOperation : writeOperations) { + try { + writeOperation.task.get(); + } catch (Exception ex) { + lastError = new IOException(ex); + throw lastError; + } + } + LOG.debug("flushWrittenBytesToService: {} position:{}", path, position); + flushWrittenBytesToServiceInternal(position); + } + + private synchronized void flushWrittenBytesToServiceAsync() throws IOException { + shrinkWriteOperationQueue(); + + if (this.lastTotalAppendOffset > this.lastFlushOffset) { + this.flushWrittenBytesToServiceInternal(this.lastTotalAppendOffset); + } + } + + private static class WriteOperation { + private final Future task; + private final long startOffset; + private final long length; + + WriteOperation(final Future task, final long startOffset, final long length) { + Preconditions.checkNotNull(task, "task"); + Preconditions.checkArgument(startOffset >= 0, "startOffset"); + Preconditions.checkArgument(length >= 0, "length"); + + this.task = task; + this.startOffset = startOffset; + this.length = length; + } + } + +} diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml new file mode 100644 index 000000000..f5d14acdd --- /dev/null +++ b/other/java/hdfs3/dependency-reduced-pom.xml @@ -0,0 +1,133 @@ + + + + oss-parent + org.sonatype.oss + 9 + ../pom.xml/pom.xml + + 4.0.0 + com.github.chrislusf + seaweedfs-hadoop3-client + ${seaweedfs.client.version} + + + + maven-compiler-plugin + + 7 + 7 + + + + maven-shade-plugin + 3.2.1 + + + package + + shade + + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + org/slf4j/** + META-INF/maven/org.slf4j/** + + + + + + + + + com.google + shaded.com.google + + + io.grpc.internal + shaded.io.grpc.internal + + + org.apache.commons + shaded.org.apache.commons + + org.apache.hadoop + org.apache.log4j + + + + org.apache.http + shaded.org.apache.http + + + + + + + + maven-gpg-plugin + 1.5 + + + sign-artifacts + verify + + sign + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.7 + true + + ossrh + https://oss.sonatype.org/ + true + + + + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + maven-javadoc-plugin + 2.9.1 + + + attach-javadocs + + jar + + + + + + + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + + + 1.2.9 + 3.1.1 + + diff --git a/other/java/hdfs/pom.xml b/other/java/hdfs3/pom.xml similarity index 94% rename from other/java/hdfs/pom.xml rename to other/java/hdfs3/pom.xml index a0cab8752..8c88b60df 100644 --- a/other/java/hdfs/pom.xml +++ b/other/java/hdfs3/pom.xml @@ -5,12 +5,12 @@ 4.0.0 - 1.0.5 + 1.2.9 3.1.1 com.github.chrislusf - seaweedfs-hadoop-client + seaweedfs-hadoop3-client ${seaweedfs.client.version} @@ -79,6 +79,10 @@ org.apache.log4j + + org.apache.http + shaded.org.apache.http + diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java new file mode 100644 index 000000000..926d0b83b --- /dev/null +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweed.hdfs; + +import java.util.concurrent.CountDownLatch; + +class ReadBuffer { + + private SeaweedInputStream stream; + private long offset; // offset within the file for the buffer + private int length; // actual length, set after the buffer is filles + private int requestedLength; // requested length of the read + private byte[] buffer; // the buffer itself + private int bufferindex = -1; // index in the buffers array in Buffer manager + private ReadBufferStatus status; // status of the buffer + private CountDownLatch latch = null; // signaled when the buffer is done reading, so any client + // waiting on this buffer gets unblocked + + // fields to help with eviction logic + private long timeStamp = 0; // tick at which buffer became available to read + private boolean isFirstByteConsumed = false; + private boolean isLastByteConsumed = false; + private boolean isAnyByteConsumed = false; + + public SeaweedInputStream getStream() { + return stream; + } + + public void setStream(SeaweedInputStream stream) { + this.stream = stream; + } + + public long getOffset() { + return offset; + } + + public void setOffset(long offset) { + this.offset = offset; + } + + public int getLength() { + return length; + } + + public void setLength(int length) { + this.length = length; + } + + public int getRequestedLength() { + return requestedLength; + } + + public void setRequestedLength(int requestedLength) { + this.requestedLength = requestedLength; + } + + public byte[] getBuffer() { + return buffer; + } + + public void setBuffer(byte[] buffer) { + this.buffer = buffer; + } + + public int getBufferindex() { + return bufferindex; + } + + public void setBufferindex(int bufferindex) { + this.bufferindex = bufferindex; + } + + public ReadBufferStatus getStatus() { + return status; + } + + public void setStatus(ReadBufferStatus status) { + this.status = status; + } + + public CountDownLatch getLatch() { + return latch; + } + + public void setLatch(CountDownLatch latch) { + this.latch = latch; + } + + public long getTimeStamp() { + return timeStamp; + } + + public void setTimeStamp(long timeStamp) { + this.timeStamp = timeStamp; + } + + public boolean isFirstByteConsumed() { + return isFirstByteConsumed; + } + + public void setFirstByteConsumed(boolean isFirstByteConsumed) { + this.isFirstByteConsumed = isFirstByteConsumed; + } + + public boolean isLastByteConsumed() { + return isLastByteConsumed; + } + + public void setLastByteConsumed(boolean isLastByteConsumed) { + this.isLastByteConsumed = isLastByteConsumed; + } + + public boolean isAnyByteConsumed() { + return isAnyByteConsumed; + } + + public void setAnyByteConsumed(boolean isAnyByteConsumed) { + this.isAnyByteConsumed = isAnyByteConsumed; + } + +} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java new file mode 100644 index 000000000..5b1e21529 --- /dev/null +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java @@ -0,0 +1,394 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweed.hdfs; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collection; +import java.util.LinkedList; +import java.util.Queue; +import java.util.Stack; +import java.util.concurrent.CountDownLatch; + +/** + * The Read Buffer Manager for Rest AbfsClient. + */ +final class ReadBufferManager { + private static final Logger LOGGER = LoggerFactory.getLogger(ReadBufferManager.class); + + private static final int NUM_BUFFERS = 16; + private static final int BLOCK_SIZE = 4 * 1024 * 1024; + private static final int NUM_THREADS = 8; + private static final int THRESHOLD_AGE_MILLISECONDS = 3000; // have to see if 3 seconds is a good threshold + + private Thread[] threads = new Thread[NUM_THREADS]; + private byte[][] buffers; // array of byte[] buffers, to hold the data that is read + private Stack freeList = new Stack<>(); // indices in buffers[] array that are available + + private Queue readAheadQueue = new LinkedList<>(); // queue of requests that are not picked up by any worker thread yet + private LinkedList inProgressList = new LinkedList<>(); // requests being processed by worker threads + private LinkedList completedReadList = new LinkedList<>(); // buffers available for reading + private static final ReadBufferManager BUFFER_MANAGER; // singleton, initialized in static initialization block + + static { + BUFFER_MANAGER = new ReadBufferManager(); + BUFFER_MANAGER.init(); + } + + static ReadBufferManager getBufferManager() { + return BUFFER_MANAGER; + } + + private void init() { + buffers = new byte[NUM_BUFFERS][]; + for (int i = 0; i < NUM_BUFFERS; i++) { + buffers[i] = new byte[BLOCK_SIZE]; // same buffers are reused. The byte array never goes back to GC + freeList.add(i); + } + for (int i = 0; i < NUM_THREADS; i++) { + Thread t = new Thread(new ReadBufferWorker(i)); + t.setDaemon(true); + threads[i] = t; + t.setName("SeaweedFS-prefetch-" + i); + t.start(); + } + ReadBufferWorker.UNLEASH_WORKERS.countDown(); + } + + // hide instance constructor + private ReadBufferManager() { + } + + + /* + * + * SeaweedInputStream-facing methods + * + */ + + + /** + * {@link SeaweedInputStream} calls this method to queue read-aheads. + * + * @param stream The {@link SeaweedInputStream} for which to do the read-ahead + * @param requestedOffset The offset in the file which shoukd be read + * @param requestedLength The length to read + */ + void queueReadAhead(final SeaweedInputStream stream, final long requestedOffset, final int requestedLength) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Start Queueing readAhead for {} offset {} length {}", + stream.getPath(), requestedOffset, requestedLength); + } + ReadBuffer buffer; + synchronized (this) { + if (isAlreadyQueued(stream, requestedOffset)) { + return; // already queued, do not queue again + } + if (freeList.isEmpty() && !tryEvict()) { + return; // no buffers available, cannot queue anything + } + + buffer = new ReadBuffer(); + buffer.setStream(stream); + buffer.setOffset(requestedOffset); + buffer.setLength(0); + buffer.setRequestedLength(requestedLength); + buffer.setStatus(ReadBufferStatus.NOT_AVAILABLE); + buffer.setLatch(new CountDownLatch(1)); + + Integer bufferIndex = freeList.pop(); // will return a value, since we have checked size > 0 already + + buffer.setBuffer(buffers[bufferIndex]); + buffer.setBufferindex(bufferIndex); + readAheadQueue.add(buffer); + notifyAll(); + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}", + stream.getPath(), requestedOffset, buffer.getBufferindex()); + } + } + + + /** + * {@link SeaweedInputStream} calls this method read any bytes already available in a buffer (thereby saving a + * remote read). This returns the bytes if the data already exists in buffer. If there is a buffer that is reading + * the requested offset, then this method blocks until that read completes. If the data is queued in a read-ahead + * but not picked up by a worker thread yet, then it cancels that read-ahead and reports cache miss. This is because + * depending on worker thread availability, the read-ahead may take a while - the calling thread can do it's own + * read to get the data faster (copmared to the read waiting in queue for an indeterminate amount of time). + * + * @param stream the file to read bytes for + * @param position the offset in the file to do a read for + * @param length the length to read + * @param buffer the buffer to read data into. Note that the buffer will be written into from offset 0. + * @return the number of bytes read + */ + int getBlock(final SeaweedInputStream stream, final long position, final int length, final byte[] buffer) { + // not synchronized, so have to be careful with locking + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("getBlock for file {} position {} thread {}", + stream.getPath(), position, Thread.currentThread().getName()); + } + + waitForProcess(stream, position); + + int bytesRead = 0; + synchronized (this) { + bytesRead = getBlockFromCompletedQueue(stream, position, length, buffer); + } + if (bytesRead > 0) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Done read from Cache for {} position {} length {}", + stream.getPath(), position, bytesRead); + } + return bytesRead; + } + + // otherwise, just say we got nothing - calling thread can do its own read + return 0; + } + + /* + * + * Internal methods + * + */ + + private void waitForProcess(final SeaweedInputStream stream, final long position) { + ReadBuffer readBuf; + synchronized (this) { + clearFromReadAheadQueue(stream, position); + readBuf = getFromList(inProgressList, stream, position); + } + if (readBuf != null) { // if in in-progress queue, then block for it + try { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("got a relevant read buffer for file {} offset {} buffer idx {}", + stream.getPath(), readBuf.getOffset(), readBuf.getBufferindex()); + } + readBuf.getLatch().await(); // blocking wait on the caller stream's thread + // Note on correctness: readBuf gets out of inProgressList only in 1 place: after worker thread + // is done processing it (in doneReading). There, the latch is set after removing the buffer from + // inProgressList. So this latch is safe to be outside the synchronized block. + // Putting it in synchronized would result in a deadlock, since this thread would be holding the lock + // while waiting, so no one will be able to change any state. If this becomes more complex in the future, + // then the latch cane be removed and replaced with wait/notify whenever inProgressList is touched. + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("latch done for file {} buffer idx {} length {}", + stream.getPath(), readBuf.getBufferindex(), readBuf.getLength()); + } + } + } + + /** + * If any buffer in the completedlist can be reclaimed then reclaim it and return the buffer to free list. + * The objective is to find just one buffer - there is no advantage to evicting more than one. + * + * @return whether the eviction succeeeded - i.e., were we able to free up one buffer + */ + private synchronized boolean tryEvict() { + ReadBuffer nodeToEvict = null; + if (completedReadList.size() <= 0) { + return false; // there are no evict-able buffers + } + + // first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed) + for (ReadBuffer buf : completedReadList) { + if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) { + nodeToEvict = buf; + break; + } + } + if (nodeToEvict != null) { + return evict(nodeToEvict); + } + + // next, try buffers where any bytes have been consumed (may be a bad idea? have to experiment and see) + for (ReadBuffer buf : completedReadList) { + if (buf.isAnyByteConsumed()) { + nodeToEvict = buf; + break; + } + } + + if (nodeToEvict != null) { + return evict(nodeToEvict); + } + + // next, try any old nodes that have not been consumed + long earliestBirthday = Long.MAX_VALUE; + for (ReadBuffer buf : completedReadList) { + if (buf.getTimeStamp() < earliestBirthday) { + nodeToEvict = buf; + earliestBirthday = buf.getTimeStamp(); + } + } + if ((currentTimeMillis() - earliestBirthday > THRESHOLD_AGE_MILLISECONDS) && (nodeToEvict != null)) { + return evict(nodeToEvict); + } + + // nothing can be evicted + return false; + } + + private boolean evict(final ReadBuffer buf) { + freeList.push(buf.getBufferindex()); + completedReadList.remove(buf); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}", + buf.getBufferindex(), buf.getStream().getPath(), buf.getOffset(), buf.getLength()); + } + return true; + } + + private boolean isAlreadyQueued(final SeaweedInputStream stream, final long requestedOffset) { + // returns true if any part of the buffer is already queued + return (isInList(readAheadQueue, stream, requestedOffset) + || isInList(inProgressList, stream, requestedOffset) + || isInList(completedReadList, stream, requestedOffset)); + } + + private boolean isInList(final Collection list, final SeaweedInputStream stream, final long requestedOffset) { + return (getFromList(list, stream, requestedOffset) != null); + } + + private ReadBuffer getFromList(final Collection list, final SeaweedInputStream stream, final long requestedOffset) { + for (ReadBuffer buffer : list) { + if (buffer.getStream() == stream) { + if (buffer.getStatus() == ReadBufferStatus.AVAILABLE + && requestedOffset >= buffer.getOffset() + && requestedOffset < buffer.getOffset() + buffer.getLength()) { + return buffer; + } else if (requestedOffset >= buffer.getOffset() + && requestedOffset < buffer.getOffset() + buffer.getRequestedLength()) { + return buffer; + } + } + } + return null; + } + + private void clearFromReadAheadQueue(final SeaweedInputStream stream, final long requestedOffset) { + ReadBuffer buffer = getFromList(readAheadQueue, stream, requestedOffset); + if (buffer != null) { + readAheadQueue.remove(buffer); + notifyAll(); // lock is held in calling method + freeList.push(buffer.getBufferindex()); + } + } + + private int getBlockFromCompletedQueue(final SeaweedInputStream stream, final long position, final int length, + final byte[] buffer) { + ReadBuffer buf = getFromList(completedReadList, stream, position); + if (buf == null || position >= buf.getOffset() + buf.getLength()) { + return 0; + } + int cursor = (int) (position - buf.getOffset()); + int availableLengthInBuffer = buf.getLength() - cursor; + int lengthToCopy = Math.min(length, availableLengthInBuffer); + System.arraycopy(buf.getBuffer(), cursor, buffer, 0, lengthToCopy); + if (cursor == 0) { + buf.setFirstByteConsumed(true); + } + if (cursor + lengthToCopy == buf.getLength()) { + buf.setLastByteConsumed(true); + } + buf.setAnyByteConsumed(true); + return lengthToCopy; + } + + /* + * + * ReadBufferWorker-thread-facing methods + * + */ + + /** + * ReadBufferWorker thread calls this to get the next buffer that it should work on. + * + * @return {@link ReadBuffer} + * @throws InterruptedException if thread is interrupted + */ + ReadBuffer getNextBlockToRead() throws InterruptedException { + ReadBuffer buffer = null; + synchronized (this) { + //buffer = readAheadQueue.take(); // blocking method + while (readAheadQueue.size() == 0) { + wait(); + } + buffer = readAheadQueue.remove(); + notifyAll(); + if (buffer == null) { + return null; // should never happen + } + buffer.setStatus(ReadBufferStatus.READING_IN_PROGRESS); + inProgressList.add(buffer); + } + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("ReadBufferWorker picked file {} for offset {}", + buffer.getStream().getPath(), buffer.getOffset()); + } + return buffer; + } + + /** + * ReadBufferWorker thread calls this method to post completion. + * + * @param buffer the buffer whose read was completed + * @param result the {@link ReadBufferStatus} after the read operation in the worker thread + * @param bytesActuallyRead the number of bytes that the worker thread was actually able to read + */ + void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("ReadBufferWorker completed file {} for offset {} bytes {}", + buffer.getStream().getPath(), buffer.getOffset(), bytesActuallyRead); + } + synchronized (this) { + inProgressList.remove(buffer); + if (result == ReadBufferStatus.AVAILABLE && bytesActuallyRead > 0) { + buffer.setStatus(ReadBufferStatus.AVAILABLE); + buffer.setTimeStamp(currentTimeMillis()); + buffer.setLength(bytesActuallyRead); + completedReadList.add(buffer); + } else { + freeList.push(buffer.getBufferindex()); + // buffer should go out of scope after the end of the calling method in ReadBufferWorker, and eligible for GC + } + } + //outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results + buffer.getLatch().countDown(); // wake up waiting threads (if any) + } + + /** + * Similar to System.currentTimeMillis, except implemented with System.nanoTime(). + * System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization), + * making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core. + * Note: it is not monotonic across Sockets, and even within a CPU, its only the + * more recent parts which share a clock across all cores. + * + * @return current time in milliseconds + */ + private long currentTimeMillis() { + return System.nanoTime() / 1000 / 1000; + } +} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferStatus.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferStatus.java new file mode 100644 index 000000000..d63674977 --- /dev/null +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferStatus.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweed.hdfs; + +/** + * The ReadBufferStatus for Rest AbfsClient + */ +public enum ReadBufferStatus { + NOT_AVAILABLE, // buffers sitting in readaheadqueue have this stats + READING_IN_PROGRESS, // reading is in progress on this buffer. Buffer should be in inProgressList + AVAILABLE, // data is available in buffer. It should be in completedList + READ_FAILED // read completed, but failed. +} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java new file mode 100644 index 000000000..6ffbc4644 --- /dev/null +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweed.hdfs; + +import java.util.concurrent.CountDownLatch; + +class ReadBufferWorker implements Runnable { + + protected static final CountDownLatch UNLEASH_WORKERS = new CountDownLatch(1); + private int id; + + ReadBufferWorker(final int id) { + this.id = id; + } + + /** + * return the ID of ReadBufferWorker. + */ + public int getId() { + return this.id; + } + + /** + * Waits until a buffer becomes available in ReadAheadQueue. + * Once a buffer becomes available, reads the file specified in it and then posts results back to buffer manager. + * Rinse and repeat. Forever. + */ + public void run() { + try { + UNLEASH_WORKERS.await(); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + ReadBufferManager bufferManager = ReadBufferManager.getBufferManager(); + ReadBuffer buffer; + while (true) { + try { + buffer = bufferManager.getNextBlockToRead(); // blocks, until a buffer is available for this thread + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + return; + } + if (buffer != null) { + try { + // do the actual read, from the file. + int bytesRead = buffer.getStream().readRemote(buffer.getOffset(), buffer.getBuffer(), 0, buffer.getRequestedLength()); + bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager + } catch (Exception ex) { + bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0); + } + } + } + } +} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java new file mode 100644 index 000000000..c12da8261 --- /dev/null +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java @@ -0,0 +1,620 @@ +package seaweed.hdfs; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.*; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Progressable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URI; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; + +public class SeaweedFileSystem extends FileSystem { + + public static final int FS_SEAWEED_DEFAULT_PORT = 8888; + public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host"; + public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; + + private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); + private static int BUFFER_SIZE = 16 * 1024 * 1024; + + private URI uri; + private Path workingDirectory = new Path("/"); + private SeaweedFileSystemStore seaweedFileSystemStore; + + public URI getUri() { + return uri; + } + + public String getScheme() { + return "seaweedfs"; + } + + @Override + public void initialize(URI uri, Configuration conf) throws IOException { // get + super.initialize(uri, conf); + + // get host information from uri (overrides info in conf) + String host = uri.getHost(); + host = (host == null) ? conf.get(FS_SEAWEED_FILER_HOST, "localhost") : host; + if (host == null) { + throw new IOException("Invalid host specified"); + } + conf.set(FS_SEAWEED_FILER_HOST, host); + + // get port information from uri, (overrides info in conf) + int port = uri.getPort(); + port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port; + conf.setInt(FS_SEAWEED_FILER_PORT, port); + + conf.setInt(IO_FILE_BUFFER_SIZE_KEY, BUFFER_SIZE); + + setConf(conf); + this.uri = uri; + + seaweedFileSystemStore = new SeaweedFileSystemStore(host, port); + + } + + @Override + public FSDataInputStream open(Path path, int bufferSize) throws IOException { + + LOG.debug("open path: {} bufferSize:{}", path, bufferSize); + + path = qualify(path); + + try { + InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize); + return new FSDataInputStream(inputStream); + } catch (Exception ex) { + LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex); + return null; + } + } + + @Override + public FSDataOutputStream create(Path path, FsPermission permission, final boolean overwrite, final int bufferSize, + final short replication, final long blockSize, final Progressable progress) throws IOException { + + LOG.debug("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize); + + path = qualify(path); + + try { + String replicaPlacement = String.format("%03d", replication - 1); + OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement); + return new FSDataOutputStream(outputStream, statistics); + } catch (Exception ex) { + LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex); + return null; + } + } + + /** + * {@inheritDoc} + * @throws FileNotFoundException if the parent directory is not present -or + * is not a directory. + */ + @Override + public FSDataOutputStream createNonRecursive(Path path, + FsPermission permission, + EnumSet flags, + int bufferSize, + short replication, + long blockSize, + Progressable progress) throws IOException { + Path parent = path.getParent(); + if (parent != null) { + // expect this to raise an exception if there is no parent + if (!getFileStatus(parent).isDirectory()) { + throw new FileAlreadyExistsException("Not a directory: " + parent); + } + } + return create(path, permission, + flags.contains(CreateFlag.OVERWRITE), bufferSize, + replication, blockSize, progress); + } + + @Override + public FSDataOutputStream append(Path path, int bufferSize, Progressable progressable) throws IOException { + + LOG.debug("append path: {} bufferSize:{}", path, bufferSize); + + path = qualify(path); + try { + OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, ""); + return new FSDataOutputStream(outputStream, statistics); + } catch (Exception ex) { + LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex); + return null; + } + } + + @Override + public boolean rename(Path src, Path dst) { + + LOG.debug("rename path: {} => {}", src, dst); + + if (src.isRoot()) { + return false; + } + + if (src.equals(dst)) { + return true; + } + FileStatus dstFileStatus = getFileStatus(dst); + + String sourceFileName = src.getName(); + Path adjustedDst = dst; + + if (dstFileStatus != null) { + if (!dstFileStatus.isDirectory()) { + return false; + } + adjustedDst = new Path(dst, sourceFileName); + } + + Path qualifiedSrcPath = qualify(src); + Path qualifiedDstPath = qualify(adjustedDst); + + seaweedFileSystemStore.rename(qualifiedSrcPath, qualifiedDstPath); + return true; + } + + @Override + public boolean delete(Path path, boolean recursive) { + + LOG.debug("delete path: {} recursive:{}", path, recursive); + + path = qualify(path); + + FileStatus fileStatus = getFileStatus(path); + + if (fileStatus == null) { + return true; + } + + return seaweedFileSystemStore.deleteEntries(path, fileStatus.isDirectory(), recursive); + + } + + @Override + public FileStatus[] listStatus(Path path) throws IOException { + + LOG.debug("listStatus path: {}", path); + + path = qualify(path); + + return seaweedFileSystemStore.listEntries(path); + } + + @Override + public Path getWorkingDirectory() { + return workingDirectory; + } + + @Override + public void setWorkingDirectory(Path path) { + if (path.isAbsolute()) { + workingDirectory = path; + } else { + workingDirectory = new Path(workingDirectory, path); + } + } + + @Override + public boolean mkdirs(Path path, FsPermission fsPermission) throws IOException { + + LOG.debug("mkdirs path: {}", path); + + path = qualify(path); + + FileStatus fileStatus = getFileStatus(path); + + if (fileStatus == null) { + + UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); + return seaweedFileSystemStore.createDirectory(path, currentUser, + fsPermission == null ? FsPermission.getDirDefault() : fsPermission, + FsPermission.getUMask(getConf())); + + } + + if (fileStatus.isDirectory()) { + return true; + } else { + throw new FileAlreadyExistsException("Path is a file: " + path); + } + } + + @Override + public FileStatus getFileStatus(Path path) { + + LOG.debug("getFileStatus path: {}", path); + + path = qualify(path); + + return seaweedFileSystemStore.getFileStatus(path); + } + + /** + * Set owner of a path (i.e. a file or a directory). + * The parameters owner and group cannot both be null. + * + * @param path The path + * @param owner If it is null, the original username remains unchanged. + * @param group If it is null, the original groupname remains unchanged. + */ + @Override + public void setOwner(Path path, final String owner, final String group) + throws IOException { + LOG.debug("setOwner path: {}", path); + path = qualify(path); + + seaweedFileSystemStore.setOwner(path, owner, group); + } + + + /** + * Set permission of a path. + * + * @param path The path + * @param permission Access permission + */ + @Override + public void setPermission(Path path, final FsPermission permission) throws IOException { + LOG.debug("setPermission path: {}", path); + + if (permission == null) { + throw new IllegalArgumentException("The permission can't be null"); + } + + path = qualify(path); + + seaweedFileSystemStore.setPermission(path, permission); + } + + Path qualify(Path path) { + return path.makeQualified(uri, workingDirectory); + } + + /** + * Concat existing files together. + * + * @param trg the path to the target destination. + * @param psrcs the paths to the sources to use for the concatenation. + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + * (default). + */ + @Override + public void concat(final Path trg, final Path[] psrcs) throws IOException { + throw new UnsupportedOperationException("Not implemented by the " + + getClass().getSimpleName() + " FileSystem implementation"); + } + + /** + * Truncate the file in the indicated path to the indicated size. + *

    + *
  • Fails if path is a directory.
  • + *
  • Fails if path does not exist.
  • + *
  • Fails if path is not closed.
  • + *
  • Fails if new size is greater than current size.
  • + *
+ * + * @param f The path to the file to be truncated + * @param newLength The size the file is to be truncated to + * @return true if the file has been truncated to the desired + * newLength and is immediately available to be reused for + * write operations such as append, or + * false if a background process of adjusting the length of + * the last block has been started, and clients should wait for it to + * complete before proceeding with further file updates. + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + * (default). + */ + @Override + public boolean truncate(Path f, long newLength) throws IOException { + throw new UnsupportedOperationException("Not implemented by the " + + getClass().getSimpleName() + " FileSystem implementation"); + } + + @Override + public void createSymlink(final Path target, final Path link, + final boolean createParent) throws AccessControlException, + FileAlreadyExistsException, FileNotFoundException, + ParentNotDirectoryException, UnsupportedFileSystemException, + IOException { + // Supporting filesystems should override this method + throw new UnsupportedOperationException( + "Filesystem does not support symlinks!"); + } + + public boolean supportsSymlinks() { + return false; + } + + /** + * Create a snapshot. + * + * @param path The directory where snapshots will be taken. + * @param snapshotName The name of the snapshot + * @return the snapshot path. + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + */ + @Override + public Path createSnapshot(Path path, String snapshotName) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support createSnapshot"); + } + + /** + * Rename a snapshot. + * + * @param path The directory path where the snapshot was taken + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public void renameSnapshot(Path path, String snapshotOldName, + String snapshotNewName) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support renameSnapshot"); + } + + /** + * Delete a snapshot of a directory. + * + * @param path The directory that the to-be-deleted snapshot belongs to + * @param snapshotName The name of the snapshot + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public void deleteSnapshot(Path path, String snapshotName) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support deleteSnapshot"); + } + + /** + * Modifies ACL entries of files and directories. This method can add new ACL + * entries or modify the permissions on existing ACL entries. All existing + * ACL entries that are not specified in this call are retained without + * changes. (Modifications are merged into the current ACL.) + * + * @param path Path to modify + * @param aclSpec List<AclEntry> describing modifications + * @throws IOException if an ACL could not be modified + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public void modifyAclEntries(Path path, List aclSpec) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support modifyAclEntries"); + } + + /** + * Removes ACL entries from files and directories. Other ACL entries are + * retained. + * + * @param path Path to modify + * @param aclSpec List describing entries to remove + * @throws IOException if an ACL could not be modified + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public void removeAclEntries(Path path, List aclSpec) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support removeAclEntries"); + } + + /** + * Removes all default ACL entries from files and directories. + * + * @param path Path to modify + * @throws IOException if an ACL could not be modified + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public void removeDefaultAcl(Path path) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support removeDefaultAcl"); + } + + /** + * Removes all but the base ACL entries of files and directories. The entries + * for user, group, and others are retained for compatibility with permission + * bits. + * + * @param path Path to modify + * @throws IOException if an ACL could not be removed + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public void removeAcl(Path path) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support removeAcl"); + } + + /** + * Fully replaces ACL of files and directories, discarding all existing + * entries. + * + * @param path Path to modify + * @param aclSpec List describing modifications, which must include entries + * for user, group, and others for compatibility with permission bits. + * @throws IOException if an ACL could not be modified + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public void setAcl(Path path, List aclSpec) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support setAcl"); + } + + /** + * Gets the ACL of a file or directory. + * + * @param path Path to get + * @return AclStatus describing the ACL of the file or directory + * @throws IOException if an ACL could not be read + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public AclStatus getAclStatus(Path path) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getAclStatus"); + } + + /** + * Set an xattr of a file or directory. + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". + *

+ * Refer to the HDFS extended attributes user documentation for details. + * + * @param path Path to modify + * @param name xattr name. + * @param value xattr value. + * @param flag xattr set flag + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support setXAttr"); + } + + /** + * Get an xattr name and value for a file or directory. + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". + *

+ * Refer to the HDFS extended attributes user documentation for details. + * + * @param path Path to get extended attribute + * @param name xattr name. + * @return byte[] xattr value. + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getXAttr"); + } + + /** + * Get all of the xattr name/value pairs for a file or directory. + * Only those xattrs which the logged-in user has permissions to view + * are returned. + *

+ * Refer to the HDFS extended attributes user documentation for details. + * + * @param path Path to get extended attributes + * @return Map describing the XAttrs of the file or directory + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public Map getXAttrs(Path path) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getXAttrs"); + } + + /** + * Get all of the xattrs name/value pairs for a file or directory. + * Only those xattrs which the logged-in user has permissions to view + * are returned. + *

+ * Refer to the HDFS extended attributes user documentation for details. + * + * @param path Path to get extended attributes + * @param names XAttr names. + * @return Map describing the XAttrs of the file or directory + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support getXAttrs"); + } + + /** + * Get all of the xattr names for a file or directory. + * Only those xattr names which the logged-in user has permissions to view + * are returned. + *

+ * Refer to the HDFS extended attributes user documentation for details. + * + * @param path Path to get extended attributes + * @return List{@literal } of the XAttr names of the file or directory + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public List listXAttrs(Path path) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support listXAttrs"); + } + + /** + * Remove an xattr of a file or directory. + * The name must be prefixed with the namespace followed by ".". For example, + * "user.attr". + *

+ * Refer to the HDFS extended attributes user documentation for details. + * + * @param path Path to remove extended attribute + * @param name xattr name + * @throws IOException IO failure + * @throws UnsupportedOperationException if the operation is unsupported + * (default outcome). + */ + @Override + public void removeXAttr(Path path, String name) throws IOException { + throw new UnsupportedOperationException(getClass().getSimpleName() + + " doesn't support removeXAttr"); + } + +} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java new file mode 100644 index 000000000..9617a38be --- /dev/null +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java @@ -0,0 +1,282 @@ +package seaweed.hdfs; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import seaweedfs.client.FilerClient; +import seaweedfs.client.FilerGrpcClient; +import seaweedfs.client.FilerProto; +import seaweedfs.client.SeaweedRead; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class SeaweedFileSystemStore { + + private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class); + + private FilerGrpcClient filerGrpcClient; + private FilerClient filerClient; + + public SeaweedFileSystemStore(String host, int port) { + int grpcPort = 10000 + port; + filerGrpcClient = new FilerGrpcClient(host, grpcPort); + filerClient = new FilerClient(filerGrpcClient); + } + + public static String getParentDirectory(Path path) { + return path.isRoot() ? "/" : path.getParent().toUri().getPath(); + } + + static int permissionToMode(FsPermission permission, boolean isDirectory) { + int p = permission.toShort(); + if (isDirectory) { + p = p | 1 << 31; + } + return p; + } + + public boolean createDirectory(final Path path, UserGroupInformation currentUser, + final FsPermission permission, final FsPermission umask) { + + LOG.debug("createDirectory path: {} permission: {} umask: {}", + path, + permission, + umask); + + return filerClient.mkdirs( + path.toUri().getPath(), + permissionToMode(permission, true), + currentUser.getUserName(), + currentUser.getGroupNames() + ); + } + + public FileStatus[] listEntries(final Path path) { + LOG.debug("listEntries path: {}", path); + + FileStatus pathStatus = getFileStatus(path); + + if (pathStatus == null) { + return new FileStatus[0]; + } + + if (!pathStatus.isDirectory()) { + return new FileStatus[]{pathStatus}; + } + + List fileStatuses = new ArrayList(); + + List entries = filerClient.listEntries(path.toUri().getPath()); + + for (FilerProto.Entry entry : entries) { + + FileStatus fileStatus = doGetFileStatus(new Path(path, entry.getName()), entry); + + fileStatuses.add(fileStatus); + } + LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size()); + return fileStatuses.toArray(new FileStatus[0]); + + } + + public FileStatus getFileStatus(final Path path) { + + FilerProto.Entry entry = lookupEntry(path); + if (entry == null) { + return null; + } + LOG.debug("doGetFileStatus path:{} entry:{}", path, entry); + + FileStatus fileStatus = doGetFileStatus(path, entry); + return fileStatus; + } + + public boolean deleteEntries(final Path path, boolean isDirectory, boolean recursive) { + LOG.debug("deleteEntries path: {} isDirectory {} recursive: {}", + path, + String.valueOf(isDirectory), + String.valueOf(recursive)); + + if (path.isRoot()) { + return true; + } + + if (recursive && isDirectory) { + List entries = filerClient.listEntries(path.toUri().getPath()); + for (FilerProto.Entry entry : entries) { + deleteEntries(new Path(path, entry.getName()), entry.getIsDirectory(), true); + } + } + + return filerClient.deleteEntry(getParentDirectory(path), path.getName(), true, recursive, true); + } + + private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) { + FilerProto.FuseAttributes attributes = entry.getAttributes(); + long length = SeaweedRead.totalSize(entry.getChunksList()); + boolean isDir = entry.getIsDirectory(); + int block_replication = 1; + int blocksize = 512; + long modification_time = attributes.getMtime() * 1000; // milliseconds + long access_time = 0; + FsPermission permission = FsPermission.createImmutable((short) attributes.getFileMode()); + String owner = attributes.getUserName(); + String group = attributes.getGroupNameCount() > 0 ? attributes.getGroupName(0) : ""; + return new FileStatus(length, isDir, block_replication, blocksize, + modification_time, access_time, permission, owner, group, null, path); + } + + private FilerProto.Entry lookupEntry(Path path) { + + return filerClient.lookupEntry(getParentDirectory(path), path.getName()); + + } + + public void rename(Path source, Path destination) { + + LOG.debug("rename source: {} destination:{}", source, destination); + + if (source.isRoot()) { + return; + } + LOG.info("rename source: {} destination:{}", source, destination); + FilerProto.Entry entry = lookupEntry(source); + if (entry == null) { + LOG.warn("rename non-existing source: {}", source); + return; + } + filerClient.mv(source.toUri().getPath(), destination.toUri().getPath()); + } + + public OutputStream createFile(final Path path, + final boolean overwrite, + FsPermission permission, + int bufferSize, + String replication) throws IOException { + + permission = permission == null ? FsPermission.getFileDefault() : permission; + + LOG.debug("createFile path: {} overwrite: {} permission: {}", + path, + overwrite, + permission.toString()); + + UserGroupInformation userGroupInformation = UserGroupInformation.getCurrentUser(); + long now = System.currentTimeMillis() / 1000L; + + FilerProto.Entry.Builder entry = null; + long writePosition = 0; + if (!overwrite) { + FilerProto.Entry existingEntry = lookupEntry(path); + LOG.debug("createFile merged entry path:{} existingEntry:{}", path, existingEntry); + if (existingEntry != null) { + entry = FilerProto.Entry.newBuilder(); + entry.mergeFrom(existingEntry); + entry.getAttributesBuilder().setMtime(now); + LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry); + writePosition = SeaweedRead.totalSize(existingEntry.getChunksList()); + replication = existingEntry.getAttributes().getReplication(); + } + } + if (entry == null) { + entry = FilerProto.Entry.newBuilder() + .setName(path.getName()) + .setIsDirectory(false) + .setAttributes(FilerProto.FuseAttributes.newBuilder() + .setFileMode(permissionToMode(permission, false)) + .setReplication(replication) + .setCrtime(now) + .setMtime(now) + .setUserName(userGroupInformation.getUserName()) + .clearGroupName() + .addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames())) + ); + } + + return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication); + + } + + public InputStream openFileForRead(final Path path, FileSystem.Statistics statistics, + int bufferSize) throws IOException { + + LOG.debug("openFileForRead path:{} bufferSize:{}", path, bufferSize); + + int readAheadQueueDepth = 2; + FilerProto.Entry entry = lookupEntry(path); + + if (entry == null) { + throw new FileNotFoundException("read non-exist file " + path); + } + + return new SeaweedInputStream(filerGrpcClient, + statistics, + path.toUri().getPath(), + entry, + bufferSize, + readAheadQueueDepth); + } + + public void setOwner(Path path, String owner, String group) { + + LOG.debug("setOwner path:{} owner:{} group:{}", path, owner, group); + + FilerProto.Entry entry = lookupEntry(path); + if (entry == null) { + LOG.debug("setOwner path:{} entry:{}", path, entry); + return; + } + + FilerProto.Entry.Builder entryBuilder = entry.toBuilder(); + FilerProto.FuseAttributes.Builder attributesBuilder = entry.getAttributes().toBuilder(); + + if (owner != null) { + attributesBuilder.setUserName(owner); + } + if (group != null) { + attributesBuilder.clearGroupName(); + attributesBuilder.addGroupName(group); + } + + entryBuilder.setAttributes(attributesBuilder); + + LOG.debug("setOwner path:{} entry:{}", path, entryBuilder); + + filerClient.updateEntry(getParentDirectory(path), entryBuilder.build()); + + } + + public void setPermission(Path path, FsPermission permission) { + + LOG.debug("setPermission path:{} permission:{}", path, permission); + + FilerProto.Entry entry = lookupEntry(path); + if (entry == null) { + LOG.debug("setPermission path:{} entry:{}", path, entry); + return; + } + + FilerProto.Entry.Builder entryBuilder = entry.toBuilder(); + FilerProto.FuseAttributes.Builder attributesBuilder = entry.getAttributes().toBuilder(); + + attributesBuilder.setFileMode(permissionToMode(permission, entry.getIsDirectory())); + + entryBuilder.setAttributes(attributesBuilder); + + LOG.debug("setPermission path:{} entry:{}", path, entryBuilder); + + filerClient.updateEntry(getParentDirectory(path), entryBuilder.build()); + + } + +} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java new file mode 100644 index 000000000..90c14c772 --- /dev/null +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java @@ -0,0 +1,371 @@ +package seaweed.hdfs; + +// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream + +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.FSExceptionMessages; +import org.apache.hadoop.fs.FSInputStream; +import org.apache.hadoop.fs.FileSystem.Statistics; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import seaweedfs.client.FilerGrpcClient; +import seaweedfs.client.FilerProto; +import seaweedfs.client.SeaweedRead; + +import java.io.EOFException; +import java.io.IOException; +import java.util.List; + +public class SeaweedInputStream extends FSInputStream { + + private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class); + + private final FilerGrpcClient filerGrpcClient; + private final Statistics statistics; + private final String path; + private final FilerProto.Entry entry; + private final List visibleIntervalList; + private final long contentLength; + private final int bufferSize; // default buffer size + private final int readAheadQueueDepth; // initialized in constructor + private final boolean readAheadEnabled; // whether enable readAhead; + + private byte[] buffer = null; // will be initialized on first use + + private long fCursor = 0; // cursor of buffer within file - offset of next byte to read from remote server + private long fCursorAfterLastRead = -1; + private int bCursor = 0; // cursor of read within buffer - offset of next byte to be returned from buffer + private int limit = 0; // offset of next byte to be read into buffer from service (i.e., upper marker+1 + // of valid bytes in buffer) + private boolean closed = false; + + public SeaweedInputStream( + final FilerGrpcClient filerGrpcClient, + final Statistics statistics, + final String path, + final FilerProto.Entry entry, + final int bufferSize, + final int readAheadQueueDepth) { + this.filerGrpcClient = filerGrpcClient; + this.statistics = statistics; + this.path = path; + this.entry = entry; + this.contentLength = SeaweedRead.totalSize(entry.getChunksList()); + this.bufferSize = bufferSize; + this.readAheadQueueDepth = (readAheadQueueDepth >= 0) ? readAheadQueueDepth : Runtime.getRuntime().availableProcessors(); + this.readAheadEnabled = true; + + this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(entry.getChunksList()); + + LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList); + + } + + public String getPath() { + return path; + } + + @Override + public int read() throws IOException { + byte[] b = new byte[1]; + int numberOfBytesRead = read(b, 0, 1); + if (numberOfBytesRead < 0) { + return -1; + } else { + return (b[0] & 0xFF); + } + } + + @Override + public synchronized int read(final byte[] b, final int off, final int len) throws IOException { + int currentOff = off; + int currentLen = len; + int lastReadBytes; + int totalReadBytes = 0; + do { + lastReadBytes = readOneBlock(b, currentOff, currentLen); + if (lastReadBytes > 0) { + currentOff += lastReadBytes; + currentLen -= lastReadBytes; + totalReadBytes += lastReadBytes; + } + if (currentLen <= 0 || currentLen > b.length - currentOff) { + break; + } + } while (lastReadBytes > 0); + return totalReadBytes > 0 ? totalReadBytes : lastReadBytes; + } + + private int readOneBlock(final byte[] b, final int off, final int len) throws IOException { + if (closed) { + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); + } + + Preconditions.checkNotNull(b); + + if (len == 0) { + return 0; + } + + if (this.available() == 0) { + return -1; + } + + if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } + + //If buffer is empty, then fill the buffer. + if (bCursor == limit) { + //If EOF, then return -1 + if (fCursor >= contentLength) { + return -1; + } + + long bytesRead = 0; + //reset buffer to initial state - i.e., throw away existing data + bCursor = 0; + limit = 0; + if (buffer == null) { + buffer = new byte[bufferSize]; + } + + // Enable readAhead when reading sequentially + if (-1 == fCursorAfterLastRead || fCursorAfterLastRead == fCursor || b.length >= bufferSize) { + bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false); + } else { + bytesRead = readInternal(fCursor, buffer, 0, b.length, true); + } + + if (bytesRead == -1) { + return -1; + } + + limit += bytesRead; + fCursor += bytesRead; + fCursorAfterLastRead = fCursor; + } + + //If there is anything in the buffer, then return lesser of (requested bytes) and (bytes in buffer) + //(bytes returned may be less than requested) + int bytesRemaining = limit - bCursor; + int bytesToRead = Math.min(len, bytesRemaining); + System.arraycopy(buffer, bCursor, b, off, bytesToRead); + bCursor += bytesToRead; + if (statistics != null) { + statistics.incrementBytesRead(bytesToRead); + } + return bytesToRead; + } + + + private int readInternal(final long position, final byte[] b, final int offset, final int length, + final boolean bypassReadAhead) throws IOException { + if (readAheadEnabled && !bypassReadAhead) { + // try reading from read-ahead + if (offset != 0) { + throw new IllegalArgumentException("readahead buffers cannot have non-zero buffer offsets"); + } + int receivedBytes; + + // queue read-aheads + int numReadAheads = this.readAheadQueueDepth; + long nextSize; + long nextOffset = position; + while (numReadAheads > 0 && nextOffset < contentLength) { + nextSize = Math.min((long) bufferSize, contentLength - nextOffset); + ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize); + nextOffset = nextOffset + nextSize; + numReadAheads--; + } + + // try reading from buffers first + receivedBytes = ReadBufferManager.getBufferManager().getBlock(this, position, length, b); + if (receivedBytes > 0) { + return receivedBytes; + } + + // got nothing from read-ahead, do our own read now + receivedBytes = readRemote(position, b, offset, length); + return receivedBytes; + } else { + return readRemote(position, b, offset, length); + } + } + + int readRemote(long position, byte[] b, int offset, int length) throws IOException { + if (position < 0) { + throw new IllegalArgumentException("attempting to read from negative offset"); + } + if (position >= contentLength) { + return -1; // Hadoop prefers -1 to EOFException + } + if (b == null) { + throw new IllegalArgumentException("null byte array passed in to read() method"); + } + if (offset >= b.length) { + throw new IllegalArgumentException("offset greater than length of array"); + } + if (length < 0) { + throw new IllegalArgumentException("requested read length is less than zero"); + } + if (length > (b.length - offset)) { + throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer"); + } + + long bytesRead = SeaweedRead.read(filerGrpcClient, visibleIntervalList, position, b, offset, length); + if (bytesRead > Integer.MAX_VALUE) { + throw new IOException("Unexpected Content-Length"); + } + return (int) bytesRead; + } + + /** + * Seek to given position in stream. + * + * @param n position to seek to + * @throws IOException if there is an error + * @throws EOFException if attempting to seek past end of file + */ + @Override + public synchronized void seek(long n) throws IOException { + if (closed) { + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); + } + if (n < 0) { + throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK); + } + if (n > contentLength) { + throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF); + } + + if (n >= fCursor - limit && n <= fCursor) { // within buffer + bCursor = (int) (n - (fCursor - limit)); + return; + } + + // next read will read from here + fCursor = n; + + //invalidate buffer + limit = 0; + bCursor = 0; + } + + @Override + public synchronized long skip(long n) throws IOException { + if (closed) { + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); + } + long currentPos = getPos(); + if (currentPos == contentLength) { + if (n > 0) { + throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF); + } + } + long newPos = currentPos + n; + if (newPos < 0) { + newPos = 0; + n = newPos - currentPos; + } + if (newPos > contentLength) { + newPos = contentLength; + n = newPos - currentPos; + } + seek(newPos); + return n; + } + + /** + * Return the size of the remaining available bytes + * if the size is less than or equal to {@link Integer#MAX_VALUE}, + * otherwise, return {@link Integer#MAX_VALUE}. + *

+ * This is to match the behavior of DFSInputStream.available(), + * which some clients may rely on (HBase write-ahead log reading in + * particular). + */ + @Override + public synchronized int available() throws IOException { + if (closed) { + throw new IOException( + FSExceptionMessages.STREAM_IS_CLOSED); + } + final long remaining = this.contentLength - this.getPos(); + return remaining <= Integer.MAX_VALUE + ? (int) remaining : Integer.MAX_VALUE; + } + + /** + * Returns the length of the file that this stream refers to. Note that the length returned is the length + * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file, + * they wont be reflected in the returned length. + * + * @return length of the file. + * @throws IOException if the stream is closed + */ + public long length() throws IOException { + if (closed) { + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); + } + return contentLength; + } + + /** + * Return the current offset from the start of the file + * + * @throws IOException throws {@link IOException} if there is an error + */ + @Override + public synchronized long getPos() throws IOException { + if (closed) { + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); + } + return fCursor - limit + bCursor; + } + + /** + * Seeks a different copy of the data. Returns true if + * found a new source, false otherwise. + * + * @throws IOException throws {@link IOException} if there is an error + */ + @Override + public boolean seekToNewSource(long l) throws IOException { + return false; + } + + @Override + public synchronized void close() throws IOException { + closed = true; + buffer = null; // de-reference the buffer so it can be GC'ed sooner + } + + /** + * Not supported by this stream. Throws {@link UnsupportedOperationException} + * + * @param readlimit ignored + */ + @Override + public synchronized void mark(int readlimit) { + throw new UnsupportedOperationException("mark()/reset() not supported on this stream"); + } + + /** + * Not supported by this stream. Throws {@link UnsupportedOperationException} + */ + @Override + public synchronized void reset() throws IOException { + throw new UnsupportedOperationException("mark()/reset() not supported on this stream"); + } + + /** + * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false. + * + * @return always {@code false} + */ + @Override + public boolean markSupported() { + return false; + } +} diff --git a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedOutputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java similarity index 99% rename from other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedOutputStream.java rename to other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java index 4f307ff96..96af27fe0 100644 --- a/other/java/hdfs/src/main/java/seaweed/hdfs/SeaweedOutputStream.java +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java @@ -78,9 +78,6 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea } private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException { - - LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry); - try { SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry); } catch (Exception ex) { diff --git a/snap/README.md b/snap/README.md new file mode 100644 index 000000000..5752bd4af --- /dev/null +++ b/snap/README.md @@ -0,0 +1,49 @@ +Hi + +This PR adds support for building a snap package of seaweedfs. Snaps are cross distro Linux software packages. One snap can be installed on Ubuntu all supported LTS and non LTS releases from 14.04 onward. Additionally they can installed on Debian, Manjaro, Fedora, OpenSUSE and others. Making a snap of seaweedfs enables you to provide automatic updates on your schedule to your users via the snap store. + +If accepted, you can use snapcraft locally, a CI system such as travis or circle-ci, or our free build system (build.snapcraft.io) to create snaps and upload to the store (snapcraft.io/store). The store supports + +To test this PR locally, I used an Ubuntu 16.04 VM, with the following steps. + +``` +snap install snapcraft --classic +git clone https://github.com/popey/seaweedfs +cd seaweedfs +git checkout add-snapcraft +snapcraft +``` + +The generated a .snap file from the tip of master (I could have checked out a stable release instead). It can be installed with:- + + snap install seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --dangerous + +(the --dangerous is necessary because we’re installing an app which hasn’t gone through the snap store review process) + +Once installed, the (namespaced) weed command can be executed. If you accept this and land the snap in the store, we can request an ‘alias’ so users can use the ‘weed’ command rather than the namespaced ‘seaweedfs.weed’ + +- Run the command +- Create sample config. Snaps are securely confined so their home directory is in a different place + mkdir ~/snap/seaweedfs/current/.seaweedfs + seaweedfs.weed scaffold > ~/snap/seaweed/current/.seaweedfs/filer.toml +- Run a server + seaweedfs.weed server +- Run a benchmark + seaweedfs.weed benchmark + +Results from my test run: https://paste.ubuntu.com/p/95Xk8zFQ7w/ + +If landed, you will need to:- + +- Register an account in the snap store https://snapcraft.io/account +- Register the ‘seaweedfs’ name in the store + - snapcraft login + - snapcraft register seaweedfs +- Upload a built snap to the store + - snapcraft push seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --release edge +- Test installing on a clean Ubuntu 16.04 machine + - snap install seaweedfs --edge + +The store supports multiple risk levels as “channels” with the edge channel typically used to host the latest build from git master. Stable is where stable releases are pushed. Optionally beta and candidate channels can also be used if needed. + +Once you are happy, you can push a stable release to the stable channel, update the store page, and promote the application online (we can help there). diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml new file mode 100644 index 000000000..6449e9bfb --- /dev/null +++ b/snap/snapcraft.yaml @@ -0,0 +1,53 @@ +# Name of snap as registered in the store +name: seaweedfs +# Automatically derive snap version from git tags +version: git +# Short human readable name as seen in 'snap find $SNAPNAME' +summary: SeaweedFS +# Longer multi-line description found in 'snap info $SNAPNAME' +description: | + SeaweedFS is a simple and highly scalable distributed file system, + to store and serve billions of files fast! + SeaweedFS implements an object store with O(1) disk seek, + transparent cloud integration, and an optional Filer with POSIX interface, + supporting S3 API, Rack-Aware Erasure Coding for warm storage, + FUSE mount, Hadoop compatible, WebDAV. + +# Grade is stable for snaps expected to land in the stable channel +grade: stable +# Uses the strict confinement model and uses interfaces to open up access to +# resources on the target host +confinement: strict + +# List of parts which comprise the snap +parts: + # The main part which defines how to build the application in the snap + seaweedfs: + # This part needs a newer version of golang, so we use a separate part + # which defines how to get a newer golang during the build + after: [go] + # The go plugin knows how to build go applications into a snap + plugin: go + # Snapcraft will look in this location for the source of the application + source: . + go-importpath: github.com/chrislusf/seaweedfs + go: + # Defines the version of golang which will be bootstrapped into the snap + source-tag: go1.14 + +# Apps exposes the binaries inside the snap to the host system once installed +apps: + # We expose the weed command. + # This differs from the snap name, so it will be namespaced as seaweedfs.weed + # An alias can be added to expose this as 'weed' if requested in the snapcraft forum + weed: + # The path to the binary inside the snap, relative to the $SNAP home + command: bin/weed + # Plugs connect the snap to resources on the host system. We enable network connectivity + # We also add home and removable-media (latter not autoconnected by default) + # so users can access files in their home or on removable disks + plugs: + - network + - network-bind + - home + - removable-media diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go index 779580a9b..afe651c4e 100644 --- a/unmaintained/change_superblock/change_superblock.go +++ b/unmaintained/change_superblock/change_superblock.go @@ -8,7 +8,9 @@ import ( "strconv" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -46,9 +48,10 @@ func main() { if err != nil { glog.Fatalf("Open Volume Data File [ERROR]: %v", err) } - defer datFile.Close() + datBackend := backend.NewDiskFile(datFile) + defer datBackend.Close() - superBlock, err := storage.ReadSuperBlock(datFile) + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { glog.Fatalf("cannot parse existing super block: %v", err) @@ -60,7 +63,7 @@ func main() { hasChange := false if *targetReplica != "" { - replica, err := storage.NewReplicaPlacementFromString(*targetReplica) + replica, err := super_block.NewReplicaPlacementFromString(*targetReplica) if err != nil { glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err) @@ -73,7 +76,7 @@ func main() { } if *targetTTL != "" { - ttl, err := storage.ReadTTL(*targetTTL) + ttl, err := needle.ReadTTL(*targetTTL) if err != nil { glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err) diff --git a/unmaintained/check_disk_size/check_disk_size.go b/unmaintained/check_disk_size/check_disk_size.go new file mode 100644 index 000000000..4a8b92b88 --- /dev/null +++ b/unmaintained/check_disk_size/check_disk_size.go @@ -0,0 +1,42 @@ +package main + +import ( + "flag" + "fmt" + "runtime" + "syscall" +) + +var ( + dir = flag.String("dir", ".", "the directory which uses a disk") +) + +func main() { + flag.Parse() + + fillInDiskStatus(*dir) + + fmt.Printf("OS: %v\n", runtime.GOOS) + fmt.Printf("Arch: %v\n", runtime.GOARCH) + +} + +func fillInDiskStatus(dir string) { + fs := syscall.Statfs_t{} + err := syscall.Statfs(dir, &fs) + if err != nil { + fmt.Printf("failed to statfs on %s: %v\n", dir, err) + return + } + fmt.Printf("statfs: %+v\n", fs) + fmt.Println() + + total := fs.Blocks * uint64(fs.Bsize) + free := fs.Bfree * uint64(fs.Bsize) + fmt.Printf("Total: %d blocks x %d block size = %d bytes\n", fs.Blocks, uint64(fs.Bsize), total) + fmt.Printf("Free : %d blocks x %d block size = %d bytes\n", fs.Bfree, uint64(fs.Bsize), free) + fmt.Printf("Used : %d blocks x %d block size = %d bytes\n", fs.Blocks-fs.Bfree, uint64(fs.Bsize), total-free) + fmt.Printf("Free Percentage : %.2f%%\n", float32((float64(free)/float64(total))*100)) + fmt.Printf("Used Percentage : %.2f%%\n", float32((float64(total-free)/float64(total))*100)) + return +} diff --git a/unmaintained/compact_leveldb/compact_leveldb.go b/unmaintained/compact_leveldb/compact_leveldb.go new file mode 100644 index 000000000..9be5697de --- /dev/null +++ b/unmaintained/compact_leveldb/compact_leveldb.go @@ -0,0 +1,39 @@ +package main + +import ( + "flag" + "log" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" +) + +var ( + dir = flag.String("dir", ".", "data directory to store leveldb files") +) + +func main() { + + flag.Parse() + + opts := &opt.Options{ + BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, + OpenFilesCacheCapacity: -1, + } + + db, err := leveldb.OpenFile(*dir, opts) + if errors.IsCorrupted(err) { + db, err = leveldb.RecoverFile(*dir, opts) + } + if err != nil { + log.Fatal(err) + } + defer db.Close() + if err := db.CompactRange(util.Range{}); err != nil { + log.Fatal(err) + } +} diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go index 9eb64b3b4..d6110d870 100644 --- a/unmaintained/fix_dat/fix_dat.go +++ b/unmaintained/fix_dat/fix_dat.go @@ -9,7 +9,9 @@ import ( "strconv" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -43,11 +45,13 @@ func main() { glog.Fatalf("Read Volume Index %v", err) } defer indexFile.Close() - datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDONLY, 0644) + datFileName := path.Join(*fixVolumePath, fileName+".dat") + datFile, err := os.OpenFile(datFileName, os.O_RDONLY, 0644) if err != nil { glog.Fatalf("Read Volume Data %v", err) } - defer datFile.Close() + datBackend := backend.NewDiskFile(datFile) + defer datBackend.Close() newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed")) if err != nil { @@ -55,21 +59,21 @@ func main() { } defer newDatFile.Close() - superBlock, err := storage.ReadSuperBlock(datFile) + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { glog.Fatalf("Read Volume Data superblock %v", err) } newDatFile.Write(superBlock.Bytes()) - iterateEntries(datFile, indexFile, func(n *storage.Needle, offset int64) { + iterateEntries(datBackend, indexFile, func(n *needle.Needle, offset int64) { fmt.Printf("needle id=%v name=%s size=%d dataSize=%d\n", n.Id, string(n.Name), n.Size, n.DataSize) - _, s, _, e := n.Append(newDatFile, superBlock.Version()) + _, s, _, e := n.Append(datBackend, superBlock.Version) fmt.Printf("size %d error %v\n", s, e) }) } -func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needle, offset int64)) { +func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, visitNeedle func(n *needle.Needle, offset int64)) { // start to read index file var readerOffset int64 bytes := make([]byte, 16) @@ -77,14 +81,14 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl readerOffset += int64(count) // start to read dat file - superBlock, err := storage.ReadSuperBlock(datFile) + superBlock, err := super_block.ReadSuperBlock(datBackend) if err != nil { fmt.Printf("cannot read dat file super block: %v", err) return } offset := int64(superBlock.BlockSize()) - version := superBlock.Version() - n, rest, err := storage.ReadNeedleHeader(datFile, version, offset) + version := superBlock.Version + n, _, rest, err := needle.ReadNeedleHeader(datBackend, version, offset) if err != nil { fmt.Printf("cannot read needle header: %v", err) return @@ -106,7 +110,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl fmt.Printf("key: %d offsetFromIndex %d n.Size %d sizeFromIndex:%d\n", key, offsetFromIndex, n.Size, sizeFromIndex) - rest = storage.NeedleBodyLength(sizeFromIndex, version) + rest = needle.NeedleBodyLength(sizeFromIndex, version) func() { defer func() { @@ -114,7 +118,7 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl fmt.Println("Recovered in f", r) } }() - if err = n.ReadNeedleBody(datFile, version, offset+int64(types.NeedleEntrySize), rest); err != nil { + if _, err = n.ReadNeedleBody(datBackend, version, offset+int64(types.NeedleHeaderSize), rest); err != nil { fmt.Printf("cannot read needle body: offset %d body %d %v\n", offset, rest, err) } }() @@ -124,9 +128,9 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl } visitNeedle(n, offset) - offset += types.NeedleEntrySize + rest + offset += types.NeedleHeaderSize + rest //fmt.Printf("==> new entry offset %d\n", offset) - if n, rest, err = storage.ReadNeedleHeader(datFile, version, offset); err != nil { + if n, _, rest, err = needle.ReadNeedleHeader(datBackend, version, offset); err != nil { if err == io.EOF { return } diff --git a/unmaintained/load_test/load_test_leveldb/load_test_leveldb.go b/unmaintained/load_test/load_test_leveldb/load_test_leveldb.go new file mode 100644 index 000000000..43dfb0e21 --- /dev/null +++ b/unmaintained/load_test/load_test_leveldb/load_test_leveldb.go @@ -0,0 +1,155 @@ +package load_test_leveldb + +import ( + "crypto/md5" + "flag" + "fmt" + "io" + "log" + "math/rand" + "os" + "sync" + "time" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" +) + +var ( + dir = flag.String("dir", "./t", "directory to store level db files") + useHash = flag.Bool("isHash", false, "hash the path as the key") + dbCount = flag.Int("dbCount", 1, "the number of leveldb") +) + +func main() { + + flag.Parse() + + totalTenants := 300 + totalYears := 3 + + opts := &opt.Options{ + BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 4, + } + + var dbs []*leveldb.DB + var chans []chan string + for d := 0; d < *dbCount; d++ { + dbFolder := fmt.Sprintf("%s/%02d", *dir, d) + os.MkdirAll(dbFolder, 0755) + db, err := leveldb.OpenFile(dbFolder, opts) + if err != nil { + log.Printf("filer store open dir %s: %v", *dir, err) + return + } + dbs = append(dbs, db) + chans = append(chans, make(chan string, 1024)) + } + + var wg sync.WaitGroup + for d := 0; d < *dbCount; d++ { + wg.Add(1) + go func(d int) { + defer wg.Done() + + ch := chans[d] + db := dbs[d] + + for p := range ch { + if *useHash { + insertAsHash(db, p) + } else { + insertAsFullPath(db, p) + } + } + }(d) + } + + counter := int64(0) + lastResetTime := time.Now() + + r := rand.New(rand.NewSource(35)) + + for y := 0; y < totalYears; y++ { + for m := 0; m < 12; m++ { + for d := 0; d < 31; d++ { + for h := 0; h < 24; h++ { + for min := 0; min < 60; min++ { + for i := 0; i < totalTenants; i++ { + p := fmt.Sprintf("tenent%03d/%4d/%02d/%02d/%02d/%02d", i, 2015+y, 1+m, 1+d, h, min) + + x := r.Intn(*dbCount) + + chans[x] <- p + + counter++ + } + + t := time.Now() + if lastResetTime.Add(time.Second).Before(t) { + p := fmt.Sprintf("%4d/%02d/%02d/%02d/%02d", 2015+y, 1+m, 1+d, h, min) + fmt.Printf("%s = %4d put/sec\n", p, counter) + counter = 0 + lastResetTime = t + } + } + } + } + } + } + + for d := 0; d < *dbCount; d++ { + close(chans[d]) + } + + wg.Wait() + +} + +func insertAsFullPath(db *leveldb.DB, p string) { + _, getErr := db.Get([]byte(p), nil) + if getErr == leveldb.ErrNotFound { + putErr := db.Put([]byte(p), []byte(p), nil) + if putErr != nil { + log.Printf("failed to put %s", p) + } + } +} + +func insertAsHash(db *leveldb.DB, p string) { + key := fmt.Sprintf("%d:%s", hashToLong(p), p) + _, getErr := db.Get([]byte(key), nil) + if getErr == leveldb.ErrNotFound { + putErr := db.Put([]byte(key), []byte(p), nil) + if putErr != nil { + log.Printf("failed to put %s", p) + } + } +} + +func hashToLong(dir string) (v int64) { + h := md5.New() + io.WriteString(h, dir) + + b := h.Sum(nil) + + v += int64(b[0]) + v <<= 8 + v += int64(b[1]) + v <<= 8 + v += int64(b[2]) + v <<= 8 + v += int64(b[3]) + v <<= 8 + v += int64(b[4]) + v <<= 8 + v += int64(b[5]) + v <<= 8 + v += int64(b[6]) + v <<= 8 + v += int64(b[7]) + + return +} diff --git a/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go new file mode 100644 index 000000000..84173a663 --- /dev/null +++ b/unmaintained/remove_duplicate_fids/remove_duplicate_fids.go @@ -0,0 +1,95 @@ +package main + +import ( + "flag" + "fmt" + "os" + "path/filepath" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" +) + +var ( + volumePath = flag.String("dir", "/tmp", "data directory to store files") + volumeCollection = flag.String("collection", "", "the volume collection name") + volumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.") +) + +func Checksum(n *needle.Needle) string { + return fmt.Sprintf("%s%x", n.Id, n.Cookie) +} + +type VolumeFileScanner4SeeDat struct { + version needle.Version + block super_block.SuperBlock + + dir string + hashes map[string]bool + dat *os.File + datBackend backend.BackendStorageFile +} + +func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version + scanner.block = superBlock + return nil + +} +func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { + return true +} + +func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { + + if scanner.datBackend == nil { + newFileName := filepath.Join(*volumePath, "dat_fixed") + newDatFile, err := os.Create(newFileName) + if err != nil { + glog.Fatalf("Write New Volume Data %v", err) + } + scanner.datBackend = backend.NewDiskFile(newDatFile) + scanner.datBackend.WriteAt(scanner.block.Bytes(), 0) + } + + checksum := Checksum(n) + + if scanner.hashes[checksum] { + glog.V(0).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset) + return nil + } + scanner.hashes[checksum] = true + + _, s, _, e := n.Append(scanner.datBackend, scanner.version) + fmt.Printf("size %d error %v\n", s, e) + + return nil +} + +func main() { + flag.Parse() + + vid := needle.VolumeId(*volumeId) + + outpath, _ := filepath.Abs(filepath.Dir(os.Args[0])) + + scanner := &VolumeFileScanner4SeeDat{ + dir: filepath.Join(outpath, "out"), + hashes: map[string]bool{}, + } + + if _, err := os.Stat(scanner.dir); err != nil { + if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil { + glog.Fatalf("could not create output dir : %s", err) + } + } + + err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) + if err != nil { + glog.Fatalf("Reading Volume File [ERROR] %s\n", err) + } + +} diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 7cc583f56..12ac42dbe 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -1,45 +1,73 @@ package main import ( - "bytes" "flag" "fmt" "log" "math/rand" + "time" + + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) var ( - master = flag.String("master", "127.0.0.1:9333", "the master server") - repeat = flag.Int("n", 5, "repeat how many times") + master = flag.String("master", "127.0.0.1:9333", "the master server") + repeat = flag.Int("n", 5, "repeat how many times") + garbageThreshold = flag.Float64("garbageThreshold", 0.3, "garbageThreshold") + replication = flag.String("replication", "", "replication 000, 001, 002, etc") ) func main() { flag.Parse() - for i := 0; i < *repeat; i++ { - assignResult, err := operation.Assign(*master, &operation.VolumeAssignRequest{Count: 1}) - if err != nil { - log.Fatalf("assign: %v", err) + util.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + genFile(grpcDialOption, 0) + + go func() { + for { + println("vacuum threshold", *garbageThreshold) + _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold)) + if err != nil { + log.Fatalf("vacuum: %v", err) + } + time.Sleep(time.Second) } + }() - data := make([]byte, 1024) - rand.Read(data) - reader := bytes.NewReader(data) + for i := 0; i < *repeat; i++ { + // create 2 files, and delete one of them - targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + assignResult, targetUrl := genFile(grpcDialOption, i) - _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, "") - if err != nil { - log.Fatalf("upload: %v", err) - } + util.Delete(targetUrl, string(assignResult.Auth)) - util.Delete(targetUrl, "") + } - util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master)) +} +func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) { + assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{ + Count: 1, + Replication: *replication, + }) + if err != nil { + log.Fatalf("assign: %v", err) } + data := make([]byte, 1024) + rand.Read(data) + + targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + + _, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth) + if err != nil { + log.Fatalf("upload: %v", err) + } + return assignResult, targetUrl } diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go index f79c0a6a9..17c494841 100644 --- a/unmaintained/see_dat/see_dat.go +++ b/unmaintained/see_dat/see_dat.go @@ -2,8 +2,13 @@ package main import ( "flag" + "github.com/chrislusf/seaweedfs/weed/util" + "time" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) var ( @@ -13,32 +18,33 @@ var ( ) type VolumeFileScanner4SeeDat struct { - version storage.Version + version needle.Version } -func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version return nil } func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { - return false + return true } -func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *storage.Needle, offset int64) error { - glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie) +func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { + t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second)) + glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v", + *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t) return nil } func main() { flag.Parse() - vid := storage.VolumeId(*volumeId) + vid := needle.VolumeId(*volumeId) scanner := &VolumeFileScanner4SeeDat{} err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) if err != nil { glog.Fatalf("Reading Volume File [ERROR] %s\n", err) } - } diff --git a/unmaintained/see_dat/see_dat_gzip.go b/unmaintained/see_dat/see_dat_gzip.go new file mode 100644 index 000000000..cec073e3f --- /dev/null +++ b/unmaintained/see_dat/see_dat_gzip.go @@ -0,0 +1,83 @@ +package main + +import ( + "bytes" + "compress/gzip" + "crypto/md5" + "flag" + "io" + "io/ioutil" + "net/http" + "time" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type VolumeFileScanner4SeeDat struct { + version needle.Version +} + +func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version + return nil +} + +func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { + return true +} + +var ( + files = int64(0) + filebytes = int64(0) + diffbytes = int64(0) +) + +func Compresssion(data []byte) float64 { + if len(data) <= 128 { + return 100.0 + } + compressed, _ := util.GzipData(data[0:128]) + return float64(len(compressed)*10) / 1280.0 +} + +func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { + t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second)) + glog.V(0).Info("----------------------------------------------------------------------------------") + glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v hasmime[%t] mime[%s] (len: %d)", + *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t, n.HasMime(), string(n.Mime), len(n.Mime)) + r, err := gzip.NewReader(bytes.NewReader(n.Data)) + if err == nil { + buf := bytes.Buffer{} + h := md5.New() + c, _ := io.Copy(&buf, r) + d := buf.Bytes() + io.Copy(h, bytes.NewReader(d)) + diff := (int64(n.DataSize) - int64(c)) + diffbytes += diff + glog.V(0).Infof("was gzip! stored_size: %d orig_size: %d diff: %d(%d) mime:%s compression-of-128: %.2f md5: %x", n.DataSize, c, diff, diffbytes, http.DetectContentType(d), Compresssion(d), h.Sum(nil)) + } else { + glog.V(0).Infof("no gzip!") + } + return nil +} + +var ( + _ = ioutil.ReadAll + volumePath = flag.String("dir", "/tmp", "data directory to store files") + volumeCollection = flag.String("collection", "", "the volume collection name") + volumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.") +) + +func main() { + flag.Parse() + vid := needle.VolumeId(*volumeId) + glog.V(0).Info("Starting") + scanner := &VolumeFileScanner4SeeDat{} + err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner) + if err != nil { + glog.Fatalf("Reading Volume File [ERROR] %s\n", err) + } +} diff --git a/unmaintained/see_idx/see_idx.go b/unmaintained/see_idx/see_idx.go index 23ca04c2e..47cbd291b 100644 --- a/unmaintained/see_idx/see_idx.go +++ b/unmaintained/see_idx/see_idx.go @@ -3,12 +3,13 @@ package main import ( "flag" "fmt" + "github.com/chrislusf/seaweedfs/weed/util" "os" "path" "strconv" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/idx" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -35,8 +36,8 @@ func main() { } defer indexFile.Close() - storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { - fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size) + idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { + fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size))) return nil }) diff --git a/unmaintained/see_log_entry/see_log_entry.go b/unmaintained/see_log_entry/see_log_entry.go new file mode 100644 index 000000000..34965f6be --- /dev/null +++ b/unmaintained/see_log_entry/see_log_entry.go @@ -0,0 +1,75 @@ +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + logdataFile = flag.String("logdata", "", "log data file saved under "+ filer2.SystemLogDir) +) + +func main() { + flag.Parse() + + dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644) + if err != nil { + log.Fatalf("failed to open %s: %v", *logdataFile, err) + } + defer dst.Close() + + err = walkLogEntryFile(dst) + if err != nil { + log.Fatalf("failed to visit %s: %v", *logdataFile, err) + } + +} + +func walkLogEntryFile(dst *os.File) error { + + sizeBuf := make([]byte, 4) + + for { + if n, err := dst.Read(sizeBuf); n != 4 { + if err == io.EOF { + return nil + } + return err + } + + size := util.BytesToUint32(sizeBuf) + + data := make([]byte, int(size)) + + if n, err := dst.Read(data); n != len(data) { + return err + } + + logEntry := &filer_pb.LogEntry{} + err := proto.Unmarshal(data, logEntry) + if err != nil { + log.Printf("unexpected unmarshal filer_pb.LogEntry: %v", err) + return nil + } + + event := &filer_pb.SubscribeMetadataResponse{} + err = proto.Unmarshal(logEntry.Data, event) + if err != nil { + log.Printf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) + return nil + } + + fmt.Printf("event: %+v\n", event) + + } + +} diff --git a/unmaintained/see_meta/see_meta.go b/unmaintained/see_meta/see_meta.go new file mode 100644 index 000000000..452badfd6 --- /dev/null +++ b/unmaintained/see_meta/see_meta.go @@ -0,0 +1,68 @@ +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + metaFile = flag.String("meta", "", "meta file generated via fs.meta.save") +) + +func main() { + flag.Parse() + + dst, err := os.OpenFile(*metaFile, os.O_RDONLY, 0644) + if err != nil { + log.Fatalf("failed to open %s: %v", *metaFile, err) + } + defer dst.Close() + + err = walkMetaFile(dst) + if err != nil { + log.Fatalf("failed to visit %s: %v", *metaFile, err) + } + +} + +func walkMetaFile(dst *os.File) error { + + sizeBuf := make([]byte, 4) + + for { + if n, err := dst.Read(sizeBuf); n != 4 { + if err == io.EOF { + return nil + } + return err + } + + size := util.BytesToUint32(sizeBuf) + + data := make([]byte, int(size)) + + if n, err := dst.Read(data); n != len(data) { + return err + } + + fullEntry := &filer_pb.FullEntry{} + if err := proto.Unmarshal(data, fullEntry); err != nil { + return err + } + + fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String()) + for i, chunk := range fullEntry.Entry.Chunks { + fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String()) + } + + } + +} diff --git a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go new file mode 100644 index 000000000..b2e4b28c6 --- /dev/null +++ b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go @@ -0,0 +1,136 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "mime/multipart" + "net/http" + "os" + "strings" + "sync" + "time" +) + +var ( + size = flag.Int("size", 1024, "file size") + concurrency = flag.Int("c", 4, "concurrent number of uploads") + times = flag.Int("n", 1024, "repeated number of times") + fileCount = flag.Int("fileCount", 1, "number of files to write") + destination = flag.String("to", "http://localhost:8888/", "destination directory on filer") + + statsChan = make(chan stat, 8) +) + +type stat struct { + size int64 +} + +func main() { + + flag.Parse() + + data := make([]byte, *size) + println("data len", len(data)) + + var wg sync.WaitGroup + for x := 0; x < *concurrency; x++ { + wg.Add(1) + + go func(x int) { + defer wg.Done() + + client := &http.Client{Transport: &http.Transport{ + MaxConnsPerHost: 1024, + MaxIdleConnsPerHost: 1024, + }} + r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x))) + + for t := 0; t < *times; t++ { + for f := 0; f < *fileCount; f++ { + fn := r.Intn(*fileCount) + if size, err := uploadFileToFiler(client, data, fmt.Sprintf("file%04d", fn), *destination); err == nil { + statsChan <- stat{ + size: size, + } + } else { + log.Fatalf("client %d upload %d times: %v", x, t, err) + } + } + } + }(x) + } + + go func() { + ticker := time.NewTicker(1000 * time.Millisecond) + + var lastTime time.Time + var counter, size int64 + for { + select { + case stat := <-statsChan: + size += stat.size + counter++ + case x := <-ticker.C: + if !lastTime.IsZero() { + elapsed := x.Sub(lastTime).Seconds() + fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n", + float64(counter)/elapsed, + float64(size/1024/1024)/elapsed) + } + lastTime = x + size = 0 + counter = 0 + } + } + }() + + wg.Wait() + +} + +func uploadFileToFiler(client *http.Client, data []byte, filename, destination string) (size int64, err error) { + + if !strings.HasSuffix(destination, "/") { + destination = destination + "/" + } + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile("file", filename) + if err != nil { + return 0, fmt.Errorf("fail to create form %v: %v", filename, err) + } + + part.Write(data) + + err = writer.Close() + if err != nil { + return 0, fmt.Errorf("fail to write part %v: %v", filename, err) + } + + uri := destination + filename + + request, err := http.NewRequest("POST", uri, body) + request.Header.Set("Content-Type", writer.FormDataContentType()) + // request.Close = true // can not use this, which do not reuse http connection, impacting filer->volume also. + + resp, err := client.Do(request) + if err != nil { + return 0, fmt.Errorf("http POST %s: %v", uri, err) + } else { + body := &bytes.Buffer{} + _, err := body.ReadFrom(resp.Body) + if err != nil { + return 0, fmt.Errorf("read http POST %s response: %v", uri, err) + } + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + + return int64(len(data)), nil +} diff --git a/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go new file mode 100644 index 000000000..8b986b546 --- /dev/null +++ b/unmaintained/stress_filer_upload/stress_filer_upload_actual/stress_filer_upload.go @@ -0,0 +1,150 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "mime/multipart" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" +) + +var ( + dir = flag.String("dir", ".", "upload files under this directory") + concurrency = flag.Int("c", 1, "concurrent number of uploads") + times = flag.Int("n", 1, "repeated number of times") + destination = flag.String("to", "http://localhost:8888/", "destination directory on filer") + + statsChan = make(chan stat, 8) +) + +type stat struct { + size int64 +} + +func main() { + + flag.Parse() + + var fileNames []string + + files, err := ioutil.ReadDir(*dir) + if err != nil { + log.Fatalf("fail to read dir %v: %v", *dir, err) + } + + for _, file := range files { + if file.IsDir() { + continue + } + fileNames = append(fileNames, filepath.Join(*dir, file.Name())) + } + + var wg sync.WaitGroup + for x := 0; x < *concurrency; x++ { + wg.Add(1) + + client := &http.Client{} + + go func() { + defer wg.Done() + rand.Shuffle(len(fileNames), func(i, j int) { + fileNames[i], fileNames[j] = fileNames[j], fileNames[i] + }) + for t := 0; t < *times; t++ { + for _, filename := range fileNames { + if size, err := uploadFileToFiler(client, filename, *destination); err == nil { + statsChan <- stat{ + size: size, + } + } + } + } + }() + } + + go func() { + ticker := time.NewTicker(500 * time.Millisecond) + + var lastTime time.Time + var counter, size int64 + for { + select { + case stat := <-statsChan: + size += stat.size + counter++ + case x := <-ticker.C: + if !lastTime.IsZero() { + elapsed := x.Sub(lastTime).Seconds() + fmt.Fprintf(os.Stdout, "%.2f files/s, %.2f MB/s\n", + float64(counter)/elapsed, + float64(size/1024/1024)/elapsed) + } + lastTime = x + size = 0 + counter = 0 + } + } + }() + + wg.Wait() + +} + +func uploadFileToFiler(client *http.Client, filename, destination string) (size int64, err error) { + file, err := os.Open(filename) + if err != nil { + panic(err) + } + defer file.Close() + + fi, err := file.Stat() + + if !strings.HasSuffix(destination, "/") { + destination = destination + "/" + } + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile("file", file.Name()) + if err != nil { + return 0, fmt.Errorf("fail to create form %v: %v", file.Name(), err) + } + _, err = io.Copy(part, file) + if err != nil { + return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err) + } + + err = writer.Close() + if err != nil { + return 0, fmt.Errorf("fail to write part %v: %v", file.Name(), err) + } + + uri := destination + file.Name() + + request, err := http.NewRequest("POST", uri, body) + request.Header.Set("Content-Type", writer.FormDataContentType()) + + resp, err := client.Do(request) + if err != nil { + return 0, fmt.Errorf("http POST %s: %v", uri, err) + } else { + body := &bytes.Buffer{} + _, err := body.ReadFrom(resp.Body) + if err != nil { + return 0, fmt.Errorf("read http POST %s response: %v", uri, err) + } + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + + return fi.Size(), nil +} diff --git a/unmaintained/volume_tailer/volume_tailer.go b/unmaintained/volume_tailer/volume_tailer.go new file mode 100644 index 000000000..e93f1cc13 --- /dev/null +++ b/unmaintained/volume_tailer/volume_tailer.go @@ -0,0 +1,69 @@ +package main + +import ( + "flag" + "log" + "time" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + util2 "github.com/chrislusf/seaweedfs/weed/util" + "golang.org/x/tools/godoc/util" +) + +var ( + master = flag.String("master", "localhost:9333", "master server host and port") + volumeId = flag.Int("volumeId", -1, "a volume id") + rewindDuration = flag.Duration("rewind", -1, "rewind back in time. -1 means from the first entry. 0 means from now.") + timeoutSeconds = flag.Int("timeoutSeconds", 0, "disconnect if no activity after these seconds") + showTextFile = flag.Bool("showTextFile", false, "display textual file content") +) + +func main() { + flag.Parse() + + util2.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(util2.GetViper(), "grpc.client") + + vid := needle.VolumeId(*volumeId) + + var sinceTimeNs int64 + if *rewindDuration == 0 { + sinceTimeNs = time.Now().UnixNano() + } else if *rewindDuration == -1 { + sinceTimeNs = 0 + } else if *rewindDuration > 0 { + sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano() + } + + err := operation.TailVolume(*master, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) { + if n.Size == 0 { + println("-", n.String()) + return nil + } else { + println("+", n.String()) + } + + if *showTextFile { + + data := n.Data + if n.IsCompressed() { + if data, err = util2.DecompressData(data); err != nil { + return err + } + } + if util.IsText(data) { + println(string(data)) + } + + println("-", n.String(), "compressed", n.IsCompressed(), "original size", len(data)) + } + return nil + }) + + if err != nil { + log.Printf("Error VolumeTailSender volume %d: %v", vid, err) + } + +} diff --git a/util/gostd b/util/gostd new file mode 100755 index 000000000..e9fc783d1 --- /dev/null +++ b/util/gostd @@ -0,0 +1,98 @@ +#!/usr/bin/env bash + +############################ GLOBAL VARIABLES +regex=' ' +branch="master" +max_length=150 + +REGEX_SUFFIX_GO=".+\.go$" + +############################ FUNCTIONS +msg() { + printf '%b' "$1" >&2 +} + +die() { + msg "\33[31m[✘]\33[0m ${1}${2}" + exit 1 +} + +succ() { + msg "\33[34m[√]\33[0m ${1}${2}" +} + +gostd() { + local branch=$1 + local reg4exclude=$2 + local max_length=$3 + + for file in `git diff $branch --name-only` + do + if ! [[ $file =~ $REGEX_SUFFIX_GO ]] || [[ $file =~ $reg4exclude ]]; then + continue + fi + + error=`go fmt $file 2>&1` + if ! [ $? -eq 0 ]; then + die "go fmt $file:" "$error" + fi + + succ "$file\n" + + grep -n -E --color=always ".{$max_length}" $file | awk '{ printf ("%4s %s\n", "", $0) }' + done +} + +get_options() { + while getopts "b:e:hl:" opts + do + case $opts in + b) + branch=$OPTARG + ;; + e) + regex=$OPTARG + ;; + h) + usage + exit 0 + ;; + l) + max_length=$OPTARG + ;; + \?) + usage + exit 1 + ;; + esac + done +} + +usage () { + cat << _EOC_ +Usage: + gostd [options] + +Options: + -b Specify the git diff branch or commit. + (default: master) + -e Regex for excluding file or directory. + -h Print this usage. + -l Show files that exceed the limit line length. + (default: 150) + +Examples: + gostd + gostd -b master -l 100 + gostd -b 59d532a -e weed/pb -l 100 +_EOC_ +} + +main() { + get_options "$@" + + gostd "$branch" "$regex" "$max_length" +} + +############################ MAIN() +main "$@" diff --git a/weed/command/backup.go b/weed/command/backup.go index 072aea75b..615be80cf 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -3,6 +3,11 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/storage" ) @@ -12,10 +17,12 @@ var ( ) type BackupOptions struct { - master *string - collection *string - dir *string - volumeId *int + master *string + collection *string + dir *string + volumeId *int + ttl *string + replication *string } func init() { @@ -24,32 +31,45 @@ func init() { s.collection = cmdBackup.Flag.String("collection", "", "collection name") s.dir = cmdBackup.Flag.String("dir", ".", "directory to store volume data files") s.volumeId = cmdBackup.Flag.Int("volumeId", -1, "a volume id. The volume .dat and .idx files should already exist in the dir.") + s.ttl = cmdBackup.Flag.String("ttl", "", `backup volume's time to live, format: + 3m: 3 minutes + 4h: 4 hours + 5d: 5 days + 6w: 6 weeks + 7M: 7 months + 8y: 8 years + default is the same with origin`) + s.replication = cmdBackup.Flag.String("replication", "", "backup volume's replication, default is the same with origin") } var cmdBackup = &Command{ UsageLine: "backup -dir=. -volumeId=234 -server=localhost:9333", Short: "incrementally backup a volume to local folder", Long: `Incrementally backup volume data. - + It is expected that you use this inside a script, to loop through all possible volume ids that needs to be backup to local folder. - + The volume id does not need to exist locally or even remotely. This will help to backup future new volumes. - + Usually backing up is just copying the .dat (and .idx) files. But it's tricky to incrementally copy the differences. - + The complexity comes when there are multiple addition, deletion and compaction. - This tool will handle them correctly and efficiently, avoiding unnecessary data transporation. + This tool will handle them correctly and efficiently, avoiding unnecessary data transportation. `, } func runBackup(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + if *s.volumeId == -1 { return false } - vid := storage.VolumeId(*s.volumeId) + vid := needle.VolumeId(*s.volumeId) // find volume location, replication, ttl info lookup, err := operation.Lookup(*s.master, vid.String()) @@ -59,29 +79,73 @@ func runBackup(cmd *Command, args []string) bool { } volumeServer := lookup.Locations[0].Url - stats, err := operation.GetVolumeSyncStatus(volumeServer, uint32(vid)) + stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid)) if err != nil { fmt.Printf("Error get volume %d status: %v\n", vid, err) return true } - ttl, err := storage.ReadTTL(stats.Ttl) - if err != nil { - fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err) - return true + var ttl *needle.TTL + if *s.ttl != "" { + ttl, err = needle.ReadTTL(*s.ttl) + if err != nil { + fmt.Printf("Error generate volume %d ttl %s: %v\n", vid, *s.ttl, err) + return true + } + } else { + ttl, err = needle.ReadTTL(stats.Ttl) + if err != nil { + fmt.Printf("Error get volume %d ttl %s: %v\n", vid, stats.Ttl, err) + return true + } } - replication, err := storage.NewReplicaPlacementFromString(stats.Replication) - if err != nil { - fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err) - return true + var replication *super_block.ReplicaPlacement + if *s.replication != "" { + replication, err = super_block.NewReplicaPlacementFromString(*s.replication) + if err != nil { + fmt.Printf("Error generate volume %d replication %s : %v\n", vid, *s.replication, err) + return true + } + } else { + replication, err = super_block.NewReplicaPlacementFromString(stats.Replication) + if err != nil { + fmt.Printf("Error get volume %d replication %s : %v\n", vid, stats.Replication, err) + return true + } } - - v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0) + v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) if err != nil { fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) return true } - if err := v.Synchronize(volumeServer); err != nil { + if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) { + if err = v.Compact2(30*1024*1024*1024, 0); err != nil { + fmt.Printf("Compact Volume before synchronizing %v\n", err) + return true + } + if err = v.CommitCompact(); err != nil { + fmt.Printf("Commit Compact before synchronizing %v\n", err) + return true + } + v.SuperBlock.CompactionRevision = uint16(stats.CompactRevision) + v.DataBackend.WriteAt(v.SuperBlock.Bytes(), 0) + } + + datSize, _, _ := v.FileStat() + + if datSize > stats.TailOffset { + // remove the old data + v.Destroy() + // recreate an empty volume + v, err = storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) + if err != nil { + fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) + return true + } + } + defer v.Close() + + if err := v.IncrementalBackup(volumeServer, grpcDialOption); err != nil { fmt.Printf("Error synchronizing volume %d: %v\n", vid, err) return true } diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 60fd88ccd..4a9a9619a 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -2,7 +2,6 @@ package command import ( "bufio" - "context" "fmt" "io" "math" @@ -15,6 +14,8 @@ import ( "sync" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" @@ -33,15 +34,18 @@ type BenchmarkOptions struct { read *bool sequentialRead *bool collection *string + replication *string cpuprofile *string maxCpu *int - secretKey *string + grpcDialOption grpc.DialOption + masterClient *wdclient.MasterClient + fsync *bool } var ( - b BenchmarkOptions - sharedBytes []byte - masterClient *wdclient.MasterClient + b BenchmarkOptions + sharedBytes []byte + isSecure bool ) func init() { @@ -57,14 +61,15 @@ func init() { b.read = cmdBenchmark.Flag.Bool("read", true, "enable read") b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file") b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection") + b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type") b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") - b.secretKey = cmdBenchmark.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") + b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write") sharedBytes = make([]byte, 1024) } var cmdBenchmark = &Command{ - UsageLine: "benchmark -server=localhost:9333 -c=10 -n=100000", + UsageLine: "benchmark -master=localhost:9333 -c=10 -n=100000", Short: "benchmark on writing millions of files and read out", Long: `benchmark on an empty SeaweedFS file system. @@ -102,7 +107,11 @@ var ( ) func runBenchmark(cmd *Command, args []string) bool { - fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) + + util.LoadConfiguration("security", false) + b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) if *b.maxCpu < 1 { *b.maxCpu = runtime.NumCPU() } @@ -116,9 +125,9 @@ func runBenchmark(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - masterClient = wdclient.NewMasterClient(context.Background(), "benchmark", strings.Split(*b.masters, ",")) - go masterClient.KeepConnectedToMaster() - masterClient.WaitUntilConnected() + b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, strings.Split(*b.masters, ",")) + go b.masterClient.KeepConnectedToMaster() + b.masterClient.WaitUntilConnected() if *b.write { benchWrite() @@ -188,7 +197,6 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { defer wait.Done() delayedDeleteChan := make(chan *delayedFile, 100) var waitForDeletions sync.WaitGroup - secret := security.Secret(*b.secretKey) for i := 0; i < 7; i++ { waitForDeletions.Add(1) @@ -198,8 +206,11 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { if df.enterTime.After(time.Now()) { time.Sleep(df.enterTime.Sub(time.Now())) } - if e := util.Delete("http://"+df.fp.Server+"/"+df.fp.Fid, - security.GenJwt(secret, df.fp.Fid)); e == nil { + var jwtAuthorization security.EncodedJwt + if isSecure { + jwtAuthorization = operation.LookupJwt(b.masterClient.GetMaster(), df.fp.Fid) + } + if e := util.Delete(fmt.Sprintf("http://%s/%s", df.fp.Server, df.fp.Fid), string(jwtAuthorization)); e == nil { s.completed++ } else { s.failed++ @@ -214,17 +225,22 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { start := time.Now() fileSize := int64(*b.fileSize + random.Intn(64)) fp := &operation.FilePart{ - Reader: &FakeReader{id: uint64(id), size: fileSize}, + Reader: &FakeReader{id: uint64(id), size: fileSize, random: random}, FileSize: fileSize, MimeType: "image/bench", // prevent gzip benchmark content + Fsync: *b.fsync, } ar := &operation.VolumeAssignRequest{ - Count: 1, - Collection: *b.collection, + Count: 1, + Collection: *b.collection, + Replication: *b.replication, } - if assignResult, err := operation.Assign(masterClient.GetMaster(), ar); err == nil { + if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil { fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection - if _, err := fp.Upload(0, masterClient.GetMaster(), secret); err == nil { + if !isSecure && assignResult.Auth != "" { + isSecure = true + } + if _, err := fp.Upload(0, b.masterClient.GetMaster(), false, assignResult.Auth, b.grpcDialOption); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} @@ -264,19 +280,24 @@ func readFiles(fileIdLineChan chan string, s *stat) { fmt.Printf("reading file %s\n", fid) } start := time.Now() - url, err := masterClient.LookupFileId(fid) + var bytesRead int + var err error + url, err := b.masterClient.LookupFileId(fid) if err != nil { s.failed++ println("!!!! ", fid, " location not found!!!!!") continue } - if bytesRead, err := util.Get(url); err == nil { + var bytes []byte + bytes, err = util.Get(url) + bytesRead = len(bytes) + if err == nil { s.completed++ - s.transferred += int64(len(bytesRead)) + s.transferred += int64(bytesRead) readStats.addSample(time.Now().Sub(start)) } else { s.failed++ - fmt.Printf("Failed to read %s error:%v\n", url, err) + fmt.Printf("Failed to read %s error:%v\n", fid, err) } } } @@ -338,7 +359,7 @@ func readFileIds(fileName string, fileIdLineChan chan string) { } const ( - benchResolution = 10000 //0.1 microsecond + benchResolution = 10000 // 0.1 microsecond benchBucket = 1000000000 / benchResolution ) @@ -461,7 +482,7 @@ func (s *stats) printStats() { fmt.Printf("\nConnection Times (ms)\n") fmt.Printf(" min avg max std\n") fmt.Printf("Total: %2.1f %3.1f %3.1f %3.1f\n", float32(min)/10, float32(avg)/10, float32(max)/10, std/10) - //printing percentiles + // printing percentiles fmt.Printf("\nPercentage of the requests served within a certain time (ms)\n") percentiles := make([]int, len(percentages)) for i := 0; i < len(percentages); i++ { @@ -495,8 +516,9 @@ func (s *stats) printStats() { // a fake reader to generate content to upload type FakeReader struct { - id uint64 // an id number - size int64 // max bytes + id uint64 // an id number + size int64 // max bytes + random *rand.Rand } func (l *FakeReader) Read(p []byte) (n int, err error) { @@ -512,6 +534,7 @@ func (l *FakeReader) Read(p []byte) (n int, err error) { for i := 0; i < 8; i++ { p[i] = byte(l.id >> uint(i*8)) } + l.random.Read(p[8:]) } l.size -= int64(n) return diff --git a/weed/command/command.go b/weed/command/command.go index 91b9bf3fc..9a41a8a7c 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -12,21 +12,23 @@ var Commands = []*Command{ cmdBackup, cmdCompact, cmdCopy, - cmdFix, - cmdFilerExport, + cmdDownload, + cmdExport, + cmdFiler, cmdFilerReplicate, - cmdServer, + cmdFix, cmdMaster, - cmdFiler, + cmdMount, cmdS3, - cmdUpload, - cmdDownload, + cmdMsgBroker, cmdScaffold, + cmdServer, cmdShell, + cmdWatch, + cmdUpload, cmdVersion, cmdVolume, - cmdExport, - cmdMount, + cmdWebDav, } type Command struct { diff --git a/weed/command/compact.go b/weed/command/compact.go index 0dd4efe0e..4e28aa725 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -3,6 +3,7 @@ package command import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func init() { @@ -16,6 +17,9 @@ var cmdCompact = &Command{ The compacted .dat file is stored as .cpd file. The compacted .idx file is stored as .cpx file. + For method=0, it compacts based on the .dat file, works if .idx file is corrupted. + For method=1, it compacts based on the .idx file, works if deletion happened but not written to .dat files. + `, } @@ -35,18 +39,18 @@ func runCompact(cmd *Command, args []string) bool { preallocate := *compactVolumePreallocate * (1 << 20) - vid := storage.VolumeId(*compactVolumeId) + vid := needle.VolumeId(*compactVolumeId) v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid, - storage.NeedleMapInMemory, nil, nil, preallocate) + storage.NeedleMapInMemory, nil, nil, preallocate, 0) if err != nil { glog.Fatalf("Load Volume [ERROR] %s\n", err) } if *compactMethod == 0 { - if err = v.Compact(preallocate); err != nil { + if err = v.Compact(preallocate, 0); err != nil { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } else { - if err = v.Compact2(); err != nil { + if err = v.Compact2(preallocate, 0); err != nil { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } diff --git a/weed/command/download.go b/weed/command/download.go index b3e33defd..be0eb47e5 100644 --- a/weed/command/download.go +++ b/weed/command/download.go @@ -71,6 +71,7 @@ func downloadToFile(server, fileId, saveDir string) error { } f, err := os.OpenFile(path.Join(saveDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) if err != nil { + io.Copy(ioutil.Discard, rc) return err } defer f.Close() diff --git a/weed/command/export.go b/weed/command/export.go index 5c7e064ce..5d304b5a0 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -4,6 +4,7 @@ import ( "archive/tar" "bytes" "fmt" + "io" "os" "path" "path/filepath" @@ -14,8 +15,11 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" - "io" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -66,17 +70,17 @@ var ( localLocation, _ = time.LoadLocation("Local") ) -func printNeedle(vid storage.VolumeId, n *storage.Needle, version storage.Version, deleted bool) { - key := storage.NewFileIdFromNeedle(vid, n).String() +func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, deleted bool) { + key := needle.NewFileIdFromNeedle(vid, n).String() size := n.DataSize - if version == storage.Version1 { + if version == needle.Version1 { size = n.Size } fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\n", key, n.Name, size, - n.IsGzipped(), + n.IsCompressed(), n.Mime, n.LastModifiedString(), n.Ttl.String(), @@ -85,14 +89,14 @@ func printNeedle(vid storage.VolumeId, n *storage.Needle, version storage.Versio } type VolumeFileScanner4Export struct { - version storage.Version + version needle.Version counter int - needleMap *storage.NeedleMap - vid storage.VolumeId + needleMap *needle_map.MemDb + vid needle.VolumeId } -func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4Export) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version return nil } @@ -100,14 +104,14 @@ func (scanner *VolumeFileScanner4Export) ReadNeedleBody() bool { return true } -func (scanner *VolumeFileScanner4Export) VisitNeedle(n *storage.Needle, offset int64) error { +func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { needleMap := scanner.needleMap vid := scanner.vid nv, ok := needleMap.Get(n.Id) - glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v", - n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv) - if ok && nv.Size > 0 && int64(nv.Offset)*types.NeedlePaddingSize == offset { + glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v", + n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv) + if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && nv.Offset.ToAcutalOffset() == offset { if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) { glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d", n.LastModified, newerThanUnix) @@ -189,16 +193,13 @@ func runExport(cmd *Command, args []string) bool { if *export.collection != "" { fileName = *export.collection + "_" + fileName } - vid := storage.VolumeId(*export.volumeId) - indexFile, err := os.OpenFile(path.Join(*export.dir, fileName+".idx"), os.O_RDONLY, 0644) - if err != nil { - glog.Fatalf("Create Volume Index [ERROR] %s\n", err) - } - defer indexFile.Close() + vid := needle.VolumeId(*export.volumeId) - needleMap, err := storage.LoadBtreeNeedleMap(indexFile) - if err != nil { - glog.Fatalf("cannot load needle map from %s: %s", indexFile.Name(), err) + needleMap := needle_map.NewMemDb() + defer needleMap.Close() + + if err := needleMap.LoadFromIdx(path.Join(*export.dir, fileName+".idx")); err != nil { + glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err) } volumeFileScanner := &VolumeFileScanner4Export{ @@ -225,8 +226,8 @@ type nameParams struct { Ext string } -func writeFile(vid storage.VolumeId, n *storage.Needle) (err error) { - key := storage.NewFileIdFromNeedle(vid, n).String() +func writeFile(vid needle.VolumeId, n *needle.Needle) (err error) { + key := needle.NewFileIdFromNeedle(vid, n).String() fileNameTemplateBuffer.Reset() if err = fileNameTemplate.Execute(fileNameTemplateBuffer, nameParams{ @@ -242,8 +243,11 @@ func writeFile(vid storage.VolumeId, n *storage.Needle) (err error) { fileName := fileNameTemplateBuffer.String() - if n.IsGzipped() && path.Ext(fileName) != ".gz" { - fileName = fileName + ".gz" + if n.IsCompressed() { + if util.IsGzippedContent(n.Data) && path.Ext(fileName) != ".gz" { + fileName = fileName + ".gz" + } + // TODO other compression method } tarHeader.Name, tarHeader.Size = fileName, int64(len(n.Data)) diff --git a/weed/command/filer.go b/weed/command/filer.go index 0c1950f96..b52b01149 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -6,11 +6,14 @@ import ( "strings" "time" + "google.golang.org/grpc/reflection" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( @@ -20,18 +23,19 @@ var ( type FilerOptions struct { masters *string ip *string + bindIp *string port *int - grpcPort *int publicPort *int collection *string defaultReplicaPlacement *string - redirectOnRead *bool disableDirListing *bool maxMB *int - secretKey *string dirListingLimit *int dataCenter *string enableNotification *bool + disableHttp *bool + cipher *bool + peers *string // default leveldb directory, used in "weed server" mode defaultLevelDbDirectory *string @@ -41,17 +45,18 @@ func init() { cmdFiler.Run = runFiler // break init cycle f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers") f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection") - f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address") + f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address") + f.bindIp = cmdFiler.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port") - f.grpcPort = cmdFiler.Flag.Int("port.grpc", 0, "filer grpc server listen port, default to http port + 10000") - f.publicPort = cmdFiler.Flag.Int("port.public", 0, "port opened to public") + f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public") f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified") - f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") - f.secretKey = cmdFiler.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") + f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") + f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") + f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list") } var cmdFiler = &Command{ @@ -70,13 +75,15 @@ var cmdFiler = &Command{ The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. - The example filer.toml configuration file can be generated by "weed scaffold filer" + The example filer.toml configuration file can be generated by "weed scaffold -config=filer" `, } func runFiler(cmd *Command, args []string) bool { + util.LoadConfiguration("security", false) + f.startFiler() return true @@ -91,30 +98,38 @@ func (fo *FilerOptions) startFiler() { publicVolumeMux = http.NewServeMux() } - defaultLevelDbDirectory := "./filerdb" + defaultLevelDbDirectory := "./filerldb2" if fo.defaultLevelDbDirectory != nil { - defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerdb" + defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerldb2" + } + + var peers []string + if *fo.peers != "" { + peers = strings.Split(*fo.peers, ",") } fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{ - Masters: strings.Split(*f.masters, ","), + Masters: strings.Split(*fo.masters, ","), Collection: *fo.collection, DefaultReplication: *fo.defaultReplicaPlacement, - RedirectOnRead: *fo.redirectOnRead, DisableDirListing: *fo.disableDirListing, MaxMB: *fo.maxMB, - SecretKey: *fo.secretKey, DirListingLimit: *fo.dirListingLimit, DataCenter: *fo.dataCenter, DefaultLevelDbDir: defaultLevelDbDirectory, + DisableHttp: *fo.disableHttp, + Host: *fo.ip, + Port: uint32(*fo.port), + Cipher: *fo.cipher, + Filers: peers, }) if nfs_err != nil { glog.Fatalf("Filer startup error: %v", nfs_err) } if *fo.publicPort != 0 { - publicListeningAddress := *fo.ip + ":" + strconv.Itoa(*fo.publicPort) - glog.V(0).Infoln("Start Seaweed filer server", util.VERSION, "public at", publicListeningAddress) + publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort) + glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress) publicListener, e := util.NewListener(publicListeningAddress, 0) if e != nil { glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e) @@ -126,9 +141,9 @@ func (fo *FilerOptions) startFiler() { }() } - glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.VERSION, *fo.ip, *fo.port) + glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port) filerListener, e := util.NewListener( - ":"+strconv.Itoa(*fo.port), + *fo.bindIp+":"+strconv.Itoa(*fo.port), time.Duration(10)*time.Second, ) if e != nil { @@ -136,15 +151,12 @@ func (fo *FilerOptions) startFiler() { } // starting grpc server - grpcPort := *fo.grpcPort - if grpcPort == 0 { - grpcPort = *fo.port + 10000 - } + grpcPort := *fo.port + 10000 grpcL, err := util.NewListener(":"+strconv.Itoa(grpcPort), 0) if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer() + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer")) filer_pb.RegisterSeaweedFilerServer(grpcS, fs) reflection.Register(grpcS) go grpcS.Serve(grpcL) diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index 3638bcb27..2d6ba94d6 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -1,52 +1,62 @@ package command import ( + "context" "fmt" + "io" "io/ioutil" + "net/http" "net/url" "os" "path/filepath" + "strconv" "strings" + "sync" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/util/grace" - "context" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" - "io" - "net/http" - "strconv" - "time" + "github.com/chrislusf/seaweedfs/weed/wdclient" ) var ( - copy CopyOptions + copy CopyOptions + waitGroup sync.WaitGroup ) type CopyOptions struct { - filerGrpcPort *int - master *string - include *string - replication *string - collection *string - ttl *string - maxMB *int - secretKey *string - - secret security.Secret + include *string + replication *string + collection *string + ttl *string + maxMB *int + masterClient *wdclient.MasterClient + concurrenctFiles *int + concurrenctChunks *int + grpcDialOption grpc.DialOption + masters []string + cipher bool + ttlSec int32 } func init() { cmdCopy.Run = runCopy // break init cycle cmdCopy.IsDebug = cmdCopy.Flag.Bool("debug", false, "verbose debug information") - copy.master = cmdCopy.Flag.String("master", "localhost:9333", "SeaweedFS master location") copy.include = cmdCopy.Flag.String("include", "", "pattens of files to copy, e.g., *.pdf, *.html, ab?d.txt, works together with -dir") copy.replication = cmdCopy.Flag.String("replication", "", "replication type") copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name") copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - copy.maxMB = cmdCopy.Flag.Int("maxMB", 0, "split files larger than the limit") - copy.filerGrpcPort = cmdCopy.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to filer port + 10000") - copy.secretKey = cmdCopy.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") + copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit") + copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines") + copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file") } var cmdCopy = &Command{ @@ -66,7 +76,9 @@ var cmdCopy = &Command{ } func runCopy(cmd *Command, args []string) bool { - copy.secret = security.Secret(*copy.secretKey) + + util.LoadConfiguration("security", false) + if len(args) <= 1 { return false } @@ -96,221 +108,380 @@ func runCopy(cmd *Command, args []string) bool { } filerGrpcPort := filerPort + 10000 - if *copy.filerGrpcPort != 0 { - filerGrpcPort = uint64(*copy.filerGrpcPort) + filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) + copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + masters, collection, replication, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress) + if err != nil { + fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err) + return false + } + if *copy.collection == "" { + *copy.collection = collection + } + if *copy.replication == "" { + *copy.replication = replication } + if *copy.maxMB == 0 { + *copy.maxMB = int(maxMB) + } + copy.masters = masters + copy.cipher = cipher - filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) + ttl, err := needle.ReadTTL(*copy.ttl) + if err != nil { + fmt.Printf("parsing ttl %s: %v\n", *copy.ttl, err) + return false + } + copy.ttlSec = int32(ttl.Minutes()) * 60 + + if *cmdCopy.IsDebug { + grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") + } + + fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrenctFiles) - for _, fileOrDir := range fileOrDirs { - if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, urlPath) { - return false + go func() { + defer close(fileCopyTaskChan) + for _, fileOrDir := range fileOrDirs { + if err := genFileCopyTask(fileOrDir, urlPath, fileCopyTaskChan); err != nil { + fmt.Fprintf(os.Stderr, "gen file list error: %v\n", err) + break + } } + }() + for i := 0; i < *copy.concurrenctFiles; i++ { + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + worker := FileCopyWorker{ + options: ©, + filerHost: filerUrl.Host, + filerGrpcAddress: filerGrpcAddress, + } + if err := worker.copyFiles(fileCopyTaskChan); err != nil { + fmt.Fprintf(os.Stderr, "copy file error: %v\n", err) + return + } + }() } + waitGroup.Wait() + return true } -func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path string) bool { - f, err := os.Open(fileOrDir) - if err != nil { - fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err) - return false - } - defer f.Close() +func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, cipher bool, err error) { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb + cipher = resp.Cipher + return nil + }) + return +} + +func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan FileCopyTask) error { - fi, err := f.Stat() + fi, err := os.Stat(fileOrDir) if err != nil { - fmt.Printf("Failed to get stat for file %s: %v\n", fileOrDir, err) - return false + fmt.Fprintf(os.Stderr, "Failed to get stat for file %s: %v\n", fileOrDir, err) + return nil } mode := fi.Mode() if mode.IsDir() { files, _ := ioutil.ReadDir(fileOrDir) for _, subFileOrDir := range files { - if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, path+fi.Name()+"/") { - return false + if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil { + return err } } - return true + return nil } + uid, gid := util.GetFileUidGid(fi) + + fileCopyTaskChan <- FileCopyTask{ + sourceLocation: fileOrDir, + destinationUrlPath: destPath, + fileSize: fi.Size(), + fileMode: fi.Mode(), + uid: uid, + gid: gid, + } + + return nil +} + +type FileCopyWorker struct { + options *CopyOptions + filerHost string + filerGrpcAddress string +} + +func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error { + for task := range fileCopyTaskChan { + if err := worker.doEachCopy(task); err != nil { + return err + } + } + return nil +} + +type FileCopyTask struct { + sourceLocation string + destinationUrlPath string + fileSize int64 + fileMode os.FileMode + uid uint32 + gid uint32 +} + +func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error { + + f, err := os.Open(task.sourceLocation) + if err != nil { + fmt.Printf("Failed to open file %s: %v\n", task.sourceLocation, err) + if _, ok := err.(*os.PathError); ok { + fmt.Printf("skipping %s\n", task.sourceLocation) + return nil + } + return err + } + defer f.Close() + // this is a regular file - if *copy.include != "" { - if ok, _ := filepath.Match(*copy.include, filepath.Base(fileOrDir)); !ok { - return true + if *worker.options.include != "" { + if ok, _ := filepath.Match(*worker.options.include, filepath.Base(task.sourceLocation)); !ok { + return nil } } // find the chunk count - chunkSize := int64(*copy.maxMB * 1024 * 1024) + chunkSize := int64(*worker.options.maxMB * 1024 * 1024) chunkCount := 1 - if chunkSize > 0 && fi.Size() > chunkSize { - chunkCount = int(fi.Size()/chunkSize) + 1 + if chunkSize > 0 && task.fileSize > chunkSize { + chunkCount = int(task.fileSize/chunkSize) + 1 } if chunkCount == 1 { - return uploadFileAsOne(filerAddress, filerGrpcAddress, path, f, fi) + return worker.uploadFileAsOne(task, f) } - return uploadFileInChunks(filerAddress, filerGrpcAddress, path, f, fi, chunkCount, chunkSize) + return worker.uploadFileInChunks(task, f, chunkCount, chunkSize) } -func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo) bool { +func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error { // upload the file content fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) + data, err := ioutil.ReadAll(f) + if err != nil { + return err + } var chunks []*filer_pb.FileChunk + var assignResult *filer_pb.AssignVolumeResponse + var assignError error - if fi.Size() > 0 { + if task.fileSize > 0 { // assign a volume - assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{ - Count: 1, - Replication: *copy.replication, - Collection: *copy.collection, - Ttl: *copy.ttl, + err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + ParentPath: task.destinationUrlPath, + } + + assignResult, assignError = client.AssignVolume(context.Background(), request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } + return nil }) if err != nil { - fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err) + return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err) } - targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId - uploadResult, err := operation.Upload(targetUrl, fileName, f, false, mimeType, nil, "") + uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth)) if err != nil { - fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err) - return false + return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } if uploadResult.Error != "" { - fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) - return false + return fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) } fmt.Printf("uploaded %s to %s\n", fileName, targetUrl) - chunks = append(chunks, &filer_pb.FileChunk{ - FileId: assignResult.Fid, - Offset: 0, - Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, - }) + chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0)) - fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) + fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) } - if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ - Directory: urlFolder, + Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ Name: fileName, Attributes: &filer_pb.FuseAttributes{ Crtime: time.Now().Unix(), Mtime: time.Now().Unix(), - Gid: uint32(os.Getgid()), - Uid: uint32(os.Getuid()), - FileSize: uint64(fi.Size()), - FileMode: uint32(fi.Mode()), + Gid: task.gid, + Uid: task.uid, + FileSize: uint64(task.fileSize), + FileMode: uint32(task.fileMode), Mime: mimeType, - Replication: *copy.replication, - Collection: *copy.collection, - TtlSec: int32(util.ParseInt(*copy.ttl, 0)), + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, }, Chunks: chunks, }, } - if _, err := client.CreateEntry(context.Background(), request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil }); err != nil { - fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err) - return false + return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err) } - return true + return nil } -func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool { +func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) - var chunks []*filer_pb.FileChunk + chunksChan := make(chan *filer_pb.FileChunk, chunkCount) + + concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks) + var wg sync.WaitGroup + var uploadError error + var collection, replication string + + fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount) + for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ { + wg.Add(1) + concurrentChunks <- struct{}{} + go func(i int64) { + defer func() { + wg.Done() + <-concurrentChunks + }() + // assign a volume + var assignResult *filer_pb.AssignVolumeResponse + var assignError error + err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + ParentPath: task.destinationUrlPath, + } + + assignResult, assignError = client.AssignVolume(context.Background(), request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } + return nil + }) + if err != nil { + fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) + } + if err != nil { + fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) + } - for i := int64(0); i < int64(chunkCount); i++ { + targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId + if collection == "" { + collection = assignResult.Collection + } + if replication == "" { + replication = assignResult.Replication + } - // assign a volume - assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{ - Count: 1, - Replication: *copy.replication, - Collection: *copy.collection, - Ttl: *copy.ttl, - }) - if err != nil { - fmt.Printf("Failed to assign from %s: %v\n", *copy.master, err) - } + uploadResult, err, _ := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth)) + if err != nil { + uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) + return + } + if uploadResult.Error != "" { + uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) + return + } + chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i*chunkSize) - targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) + }(i) + } + wg.Wait() + close(chunksChan) - uploadResult, err := operation.Upload(targetUrl, - fileName+"-"+strconv.FormatInt(i+1, 10), - io.LimitReader(f, chunkSize), - false, "application/octet-stream", nil, "") - if err != nil { - fmt.Printf("upload data %v to %s: %v\n", fileName, targetUrl, err) - return false - } - if uploadResult.Error != "" { - fmt.Printf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) - return false + var chunks []*filer_pb.FileChunk + for chunk := range chunksChan { + chunks = append(chunks, chunk) + } + + if uploadError != nil { + var fileIds []string + for _, chunk := range chunks { + fileIds = append(fileIds, chunk.FileId) } - chunks = append(chunks, &filer_pb.FileChunk{ - FileId: assignResult.Fid, - Offset: i * chunkSize, - Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, - }) - fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) + operation.DeleteFiles(copy.masters[0], false, worker.options.grpcDialOption, fileIds) + return uploadError } - if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ - Directory: urlFolder, + Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ Name: fileName, Attributes: &filer_pb.FuseAttributes{ Crtime: time.Now().Unix(), Mtime: time.Now().Unix(), - Gid: uint32(os.Getgid()), - Uid: uint32(os.Getuid()), - FileSize: uint64(fi.Size()), - FileMode: uint32(fi.Mode()), + Gid: task.gid, + Uid: task.uid, + FileSize: uint64(task.fileSize), + FileMode: uint32(task.fileMode), Mime: mimeType, - Replication: *copy.replication, - Collection: *copy.collection, - TtlSec: int32(util.ParseInt(*copy.ttl, 0)), + Replication: replication, + Collection: collection, + TtlSec: worker.options.ttlSec, }, Chunks: chunks, }, } - if _, err := client.CreateEntry(context.Background(), request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil }); err != nil { - fmt.Printf("upload data %v to http://%s%s%s: %v\n", fileName, filerAddress, urlFolder, fileName, err) - return false + return fmt.Errorf("upload data %v to http://%s%s%s: %v\n", fileName, worker.filerHost, task.destinationUrlPath, fileName, err) } - fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) + fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) - return true + return nil } func detectMimeType(f *os.File) string { @@ -322,22 +493,12 @@ func detectMimeType(f *os.File) string { } if err != nil { fmt.Printf("read head of %v: %v\n", f.Name(), err) - return "application/octet-stream" + return "" } f.Seek(0, io.SeekStart) mimeType := http.DetectContentType(head[:n]) - return mimeType -} - -func withFilerClient(filerAddress string, fn func(filer_pb.SeaweedFilerClient) error) error { - - grpcConnection, err := util.GrpcDial(filerAddress) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", filerAddress, err) + if mimeType == "application/octet-stream" { + return "" } - defer grpcConnection.Close() - - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - - return fn(client) + return mimeType } diff --git a/weed/command/filer_export.go b/weed/command/filer_export.go deleted file mode 100644 index 7a2e7920a..000000000 --- a/weed/command/filer_export.go +++ /dev/null @@ -1,187 +0,0 @@ -package command - -import ( - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/notification" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/server" - "github.com/spf13/viper" -) - -func init() { - cmdFilerExport.Run = runFilerExport // break init cycle -} - -var cmdFilerExport = &Command{ - UsageLine: "filer.export -sourceStore=mysql -targetStore=cassandra", - Short: "export meta data in filer store", - Long: `Iterate the file tree and export all metadata out - - Both source and target store: - * should be a store name already specified in filer.toml - * do not need to be enabled state - - If target store is empty, only the directory tree will be listed. - - If target store is "notification", the list of entries will be sent to notification. - This is usually used to bootstrap filer replication to a remote system. - - `, -} - -var ( - // filerExportOutputFile = cmdFilerExport.Flag.String("output", "", "the output file. If empty, only list out the directory tree") - filerExportSourceStore = cmdFilerExport.Flag.String("sourceStore", "", "the source store name in filer.toml, default to currently enabled store") - filerExportTargetStore = cmdFilerExport.Flag.String("targetStore", "", "the target store name in filer.toml, or \"notification\" to export all files to message queue") - dir = cmdFilerExport.Flag.String("dir", "/", "only process files under this directory") - dirListLimit = cmdFilerExport.Flag.Int("dirListLimit", 100000, "limit directory list size") - dryRun = cmdFilerExport.Flag.Bool("dryRun", false, "not actually moving data") - verboseFilerExport = cmdFilerExport.Flag.Bool("v", false, "verbose entry details") -) - -type statistics struct { - directoryCount int - fileCount int -} - -func runFilerExport(cmd *Command, args []string) bool { - - weed_server.LoadConfiguration("filer", true) - config := viper.GetViper() - - var sourceStore, targetStore filer2.FilerStore - - for _, store := range filer2.Stores { - if store.GetName() == *filerExportSourceStore || *filerExportSourceStore == "" && config.GetBool(store.GetName()+".enabled") { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { - glog.Fatalf("Failed to initialize source store for %s: %+v", - store.GetName(), err) - } else { - sourceStore = store - } - break - } - } - - for _, store := range filer2.Stores { - if store.GetName() == *filerExportTargetStore { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { - glog.Fatalf("Failed to initialize target store for %s: %+v", - store.GetName(), err) - } else { - targetStore = store - } - break - } - } - - if sourceStore == nil { - glog.Errorf("Failed to find source store %s", *filerExportSourceStore) - println("existing data sources are:") - for _, store := range filer2.Stores { - println(" " + store.GetName()) - } - return false - } - - if targetStore == nil && *filerExportTargetStore != "" && *filerExportTargetStore != "notification" { - glog.Errorf("Failed to find target store %s", *filerExportTargetStore) - println("existing data sources are:") - for _, store := range filer2.Stores { - println(" " + store.GetName()) - } - return false - } - - stat := statistics{} - - var fn func(level int, entry *filer2.Entry) error - - if *filerExportTargetStore == "notification" { - weed_server.LoadConfiguration("notification", false) - v := viper.GetViper() - notification.LoadConfiguration(v.Sub("notification")) - - fn = func(level int, entry *filer2.Entry) error { - printout(level, entry) - if *dryRun { - return nil - } - return notification.Queue.SendMessage( - string(entry.FullPath), - &filer_pb.EventNotification{ - NewEntry: entry.ToProtoEntry(), - }, - ) - } - } else if targetStore == nil { - fn = printout - } else { - fn = func(level int, entry *filer2.Entry) error { - printout(level, entry) - if *dryRun { - return nil - } - return targetStore.InsertEntry(entry) - } - } - - doTraverse(&stat, sourceStore, filer2.FullPath(*dir), 0, fn) - - glog.Infof("processed %d directories, %d files", stat.directoryCount, stat.fileCount) - - return true -} - -func doTraverse(stat *statistics, filerStore filer2.FilerStore, parentPath filer2.FullPath, level int, fn func(level int, entry *filer2.Entry) error) { - - limit := *dirListLimit - lastEntryName := "" - for { - entries, err := filerStore.ListDirectoryEntries(parentPath, lastEntryName, false, limit) - if err != nil { - break - } - for _, entry := range entries { - if fnErr := fn(level, entry); fnErr != nil { - glog.Errorf("failed to process entry: %s", entry.FullPath) - } - if entry.IsDirectory() { - stat.directoryCount++ - doTraverse(stat, filerStore, entry.FullPath, level+1, fn) - } else { - stat.fileCount++ - } - } - if len(entries) < limit { - break - } - } -} - -func printout(level int, entry *filer2.Entry) error { - for i := 0; i < level; i++ { - if i == level-1 { - print("+-") - } else { - print("| ") - } - } - print(entry.FullPath.Name()) - if *verboseFilerExport { - for _, chunk := range entry.Chunks { - print("[") - print(chunk.FileId) - print(",") - print(chunk.Offset) - print(",") - print(chunk.Size) - print(")") - } - } - println() - return nil -} diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index 3384e4023..40f2b570b 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -1,6 +1,7 @@ package command import ( + "context" "strings" "github.com/chrislusf/seaweedfs/weed/glog" @@ -12,7 +13,7 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink" "github.com/chrislusf/seaweedfs/weed/replication/sub" - "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/spf13/viper" ) @@ -28,16 +29,17 @@ var cmdFilerReplicate = &Command{ filer.replicate listens on filer notifications. If any file is updated, it will fetch the updated content, and write to the other destination. - Run "weed scaffold -config replication" to generate a replication.toml file and customize the parameters. + Run "weed scaffold -config=replication" to generate a replication.toml file and customize the parameters. `, } func runFilerReplicate(cmd *Command, args []string) bool { - weed_server.LoadConfiguration("replication", true) - weed_server.LoadConfiguration("notification", true) - config := viper.GetViper() + util.LoadConfiguration("security", false) + util.LoadConfiguration("replication", true) + util.LoadConfiguration("notification", true) + config := util.GetViper() var notificationInput sub.NotificationInput @@ -45,8 +47,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { for _, input := range sub.NotificationInputs { if config.GetBool("notification." + input.GetName() + ".enabled") { - viperSub := config.Sub("notification." + input.GetName()) - if err := input.Initialize(viperSub); err != nil { + if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification input for %s: %+v", input.GetName(), err) } @@ -64,10 +65,9 @@ func runFilerReplicate(cmd *Command, args []string) bool { // avoid recursive replication if config.GetBool("notification.source.filer.enabled") && config.GetBool("notification.sink.filer.enabled") { - sourceConfig, sinkConfig := config.Sub("source.filer"), config.Sub("sink.filer") - if sourceConfig.GetString("grpcAddress") == sinkConfig.GetString("grpcAddress") { - fromDir := sourceConfig.GetString("directory") - toDir := sinkConfig.GetString("directory") + if config.GetString("source.filer.grpcAddress") == config.GetString("sink.filer.grpcAddress") { + fromDir := config.GetString("source.filer.directory") + toDir := config.GetString("sink.filer.directory") if strings.HasPrefix(toDir, fromDir) { glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir) } @@ -77,8 +77,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { var dataSink sink.ReplicationSink for _, sk := range sink.Sinks { if config.GetBool("sink." + sk.GetName() + ".enabled") { - viperSub := config.Sub("sink." + sk.GetName()) - if err := sk.Initialize(viperSub); err != nil { + if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize sink for %s: %+v", sk.GetName(), err) } @@ -96,7 +95,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { return true } - replicator := replication.NewReplicator(config.Sub("source.filer"), dataSink) + replicator := replication.NewReplicator(config, "source.filer.", dataSink) for { key, m, err := notificationInput.ReceiveMessage() @@ -115,14 +114,13 @@ func runFilerReplicate(cmd *Command, args []string) bool { } else { glog.V(1).Infof("modify: %s", key) } - if err = replicator.Replicate(key, m); err != nil { + if err = replicator.Replicate(context.Background(), key, m); err != nil { glog.Errorf("replicate %s: %+v", key, err) } else { glog.V(1).Infof("replicated %s", key) } } - return true } func validateOneEnabledInput(config *viper.Viper) { diff --git a/weed/command/fix.go b/weed/command/fix.go index a800978c6..223808f4b 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -7,6 +7,9 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/needle_map" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" ) @@ -29,12 +32,12 @@ var ( ) type VolumeFileScanner4Fix struct { - version storage.Version - nm *storage.NeedleMap + version needle.Version + nm *needle_map.MemDb } -func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock storage.SuperBlock) error { - scanner.version = superBlock.Version() +func (scanner *VolumeFileScanner4Fix) VisitSuperBlock(superBlock super_block.SuperBlock) error { + scanner.version = superBlock.Version return nil } @@ -42,14 +45,14 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool { return false } -func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *storage.Needle, offset int64) error { - glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped()) - if n.Size > 0 { - pe := scanner.nm.Put(n.Id, types.Offset(offset/types.NeedlePaddingSize), n.Size) +func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { + glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed()) + if n.Size > 0 && n.Size != types.TombstoneFileSize { + pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size) glog.V(2).Infof("saved %d with error %v", n.Size, pe) } else { glog.V(2).Infof("skipping deleted file ...") - return scanner.nm.Delete(n.Id, types.Offset(offset/types.NeedlePaddingSize)) + return scanner.nm.Delete(n.Id) } return nil } @@ -65,23 +68,22 @@ func runFix(cmd *Command, args []string) bool { baseFileName = *fixVolumeCollection + "_" + baseFileName } indexFileName := path.Join(*fixVolumePath, baseFileName+".idx") - indexFile, err := os.OpenFile(indexFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - glog.Fatalf("Create Volume Index [ERROR] %s\n", err) - } - defer indexFile.Close() - nm := storage.NewBtreeNeedleMap(indexFile) + nm := needle_map.NewMemDb() defer nm.Close() - vid := storage.VolumeId(*fixVolumeId) + vid := needle.VolumeId(*fixVolumeId) scanner := &VolumeFileScanner4Fix{ nm: nm, } - err = storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner) - if err != nil { - glog.Fatalf("Export Volume File [ERROR] %s\n", err) + if err := storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil { + glog.Fatalf("scan .dat File: %v", err) + os.Remove(indexFileName) + } + + if err := nm.SaveToIdx(indexFileName); err != nil { + glog.Fatalf("save to .idx File: %v", err) os.Remove(indexFileName) } diff --git a/weed/command/master.go b/weed/command/master.go index bd2267b9e..21c759f4e 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -6,120 +6,149 @@ import ( "runtime" "strconv" "strings" - "time" + + "github.com/chrislusf/raft/protobuf" + "github.com/chrislusf/seaweedfs/weed/util/grace" + "github.com/gorilla/mux" + "google.golang.org/grpc/reflection" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" - "google.golang.org/grpc/reflection" ) +var ( + m MasterOptions +) + +type MasterOptions struct { + port *int + ip *string + ipBind *string + metaFolder *string + peers *string + volumeSizeLimitMB *uint + volumePreallocate *bool + // pulseSeconds *int + defaultReplication *string + garbageThreshold *float64 + whiteList *string + disableHttp *bool + metricsAddress *string + metricsIntervalSec *int +} + func init() { cmdMaster.Run = runMaster // break init cycle + m.port = cmdMaster.Flag.Int("port", 9333, "http listen port") + m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master | address") + m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data") + m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095") + m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") + m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.") + // m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") + m.defaultReplication = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.") + m.garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") + m.whiteList = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") + m.disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") + m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address") + m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") } var cmdMaster = &Command{ UsageLine: "master -port=9333", Short: "start a master server", - Long: `start a master server to provide volume=>location mapping service - and sequence number of file ids + Long: `start a master server to provide volume=>location mapping service and sequence number of file ids + + The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. + + The example security.toml configuration file can be generated by "weed scaffold -config=security" `, } var ( - mport = cmdMaster.Flag.Int("port", 9333, "http listen port") - mGrpcPort = cmdMaster.Flag.Int("port.grpc", 0, "grpc server listen port, default to http port + 10000") - masterIp = cmdMaster.Flag.String("ip", "localhost", "master | address") - masterBindIp = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") - metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data") - masterPeers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094") - volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") - volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.") - mpulse = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") - defaultReplicaPlacement = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.") - // mTimeout = cmdMaster.Flag.Int("idleTimeout", 30, "connection idle seconds") - mMaxCpu = cmdMaster.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") - garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") - masterWhiteListOption = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") - masterSecureKey = cmdMaster.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") - masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file") - masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file") - - masterWhiteList []string + masterCpuProfile = cmdMaster.Flag.String("cpuprofile", "", "cpu profile output file") + masterMemProfile = cmdMaster.Flag.String("memprofile", "", "memory profile output file") ) func runMaster(cmd *Command, args []string) bool { - if *mMaxCpu < 1 { - *mMaxCpu = runtime.NumCPU() - } - runtime.GOMAXPROCS(*mMaxCpu) - util.SetupProfiling(*masterCpuProfile, *masterMemProfile) - if err := util.TestFolderWritable(*metaFolder); err != nil { - glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *metaFolder, err) + util.LoadConfiguration("security", false) + util.LoadConfiguration("master", false) + + runtime.GOMAXPROCS(runtime.NumCPU()) + grace.SetupProfiling(*masterCpuProfile, *masterMemProfile) + + if err := util.TestFolderWritable(*m.metaFolder); err != nil { + glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err) } - if *masterWhiteListOption != "" { - masterWhiteList = strings.Split(*masterWhiteListOption, ",") + + var masterWhiteList []string + if *m.whiteList != "" { + masterWhiteList = strings.Split(*m.whiteList, ",") } - if *volumeSizeLimitMB > 30*1000 { + if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { glog.Fatalf("volumeSizeLimitMB should be smaller than 30000") } - r := mux.NewRouter() - ms := weed_server.NewMasterServer(r, *mport, *metaFolder, - *volumeSizeLimitMB, *volumePreallocate, - *mpulse, *defaultReplicaPlacement, *garbageThreshold, - masterWhiteList, *masterSecureKey, - ) + startMaster(m, masterWhiteList) + + return true +} - listeningAddress := *masterBindIp + ":" + strconv.Itoa(*mport) +func startMaster(masterOption MasterOptions, masterWhiteList []string) { - glog.V(0).Infoln("Start Seaweed Master", util.VERSION, "at", listeningAddress) + backend.LoadConfiguration(util.GetViper()) + myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.peers) + + r := mux.NewRouter() + ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers) + listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port) + glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress) masterListener, e := util.NewListener(listeningAddress, 0) if e != nil { glog.Fatalf("Master startup error: %v", e) } + // start raftServer + raftServer := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"), + peers, myMasterAddress, *masterOption.metaFolder, ms.Topo, 5) + if raftServer == nil { + glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *masterOption.metaFolder) + } + ms.SetRaftServer(raftServer) + r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET") + // starting grpc server + grpcPort := *masterOption.port + 10000 + grpcL, err := util.NewListener(*masterOption.ipBind+":"+strconv.Itoa(grpcPort), 0) + if err != nil { + glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) + } + // Create your protocol servers. + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) + master_pb.RegisterSeaweedServer(grpcS, ms) + protobuf.RegisterRaftServer(grpcS, raftServer) + reflection.Register(grpcS) + glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort) + go grpcS.Serve(grpcL) - go func() { - time.Sleep(100 * time.Millisecond) - myMasterAddress, peers := checkPeers(*masterIp, *mport, *masterPeers) - raftServer := weed_server.NewRaftServer(r, peers, myMasterAddress, *metaFolder, ms.Topo, *mpulse) - ms.SetRaftServer(raftServer) - }() - - go func() { - // starting grpc server - grpcPort := *mGrpcPort - if grpcPort == 0 { - grpcPort = *mport + 10000 - } - grpcL, err := util.NewListener(*masterBindIp+":"+strconv.Itoa(grpcPort), 0) - if err != nil { - glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) - } - // Create your protocol servers. - grpcS := util.NewGrpcServer() - master_pb.RegisterSeaweedServer(grpcS, ms) - reflection.Register(grpcS) - - glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *masterBindIp, grpcPort) - grpcS.Serve(grpcL) - }() + go ms.MasterClient.KeepConnectedToMaster() // start http server httpS := &http.Server{Handler: r} - if err := httpS.Serve(masterListener); err != nil { - glog.Fatalf("master server failed to serve: %v", err) - } + go httpS.Serve(masterListener) - return true + select {} } func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) { + glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers) masterAddress = masterIp + ":" + strconv.Itoa(masterPort) if peers != "" { cleanedPeers = strings.Split(peers, ",") @@ -133,12 +162,28 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st } } - peerCount := len(cleanedPeers) if !hasSelf { - peerCount += 1 + cleanedPeers = append(cleanedPeers, masterAddress) } - if peerCount%2 == 0 { + if len(cleanedPeers)%2 == 0 { glog.Fatalf("Only odd number of masters are supported!") } return } + +func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption { + return &weed_server.MasterOption{ + Host: *m.ip, + Port: *m.port, + MetaFolder: *m.metaFolder, + VolumeSizeLimitMB: *m.volumeSizeLimitMB, + VolumePreallocate: *m.volumePreallocate, + // PulseSeconds: *m.pulseSeconds, + DefaultReplicaPlacement: *m.defaultReplication, + GarbageThreshold: *m.garbageThreshold, + WhiteList: whiteList, + DisableHttp: *m.disableHttp, + MetricsAddress: *m.metricsAddress, + MetricsIntervalSec: *m.metricsIntervalSec, + } +} diff --git a/weed/command/mount.go b/weed/command/mount.go index e61f16783..440aca8c6 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -1,22 +1,26 @@ package command import ( - "fmt" - "strconv" - "strings" + "os" ) type MountOptions struct { - filer *string - filerGrpcPort *int - filerMountRootPath *string - dir *string - dirListingLimit *int - collection *string - replication *string - ttlSec *int - chunkSizeLimitMB *int - dataCenter *string + filer *string + filerMountRootPath *string + dir *string + dirAutoCreate *bool + dirListCacheLimit *int64 + collection *string + replication *string + ttlSec *int + chunkSizeLimitMB *int + cacheDir *string + cacheSizeMB *int64 + dataCenter *string + allowOthers *bool + umaskString *string + nonempty *bool + outsideContainerClusterMode *bool } var ( @@ -28,17 +32,23 @@ var ( func init() { cmdMount.Run = runMount // break init cycle mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location") - mountOptions.filerGrpcPort = cmdMount.Flag.Int("filer.grpc.port", 0, "filer grpc server listen port, default to http port + 10000") mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server") mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory") - mountOptions.dirListingLimit = cmdMount.Flag.Int("dirListLimit", 100000, "limit directory listing size") + mountOptions.dirAutoCreate = cmdMount.Flag.Bool("dirAutoCreate", false, "auto create the directory to mount to") + mountOptions.dirListCacheLimit = cmdMount.Flag.Int64("dirListCacheLimit", 1000000, "limit cache size to speed up directory long format listing") mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files") mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") - mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files") + mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 16, "local write buffer size, also chunk large files") + mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data") + mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") + mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system") + mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111") + mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory") mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") + mountOptions.outsideContainerClusterMode = cmdMount.Flag.Bool("outsideContainerClusterMode", false, "allows other users to access the file system") } var cmdMount = &Command{ @@ -56,24 +66,11 @@ var cmdMount = &Command{ On OS X, it requires OSXFUSE (http://osxfuse.github.com/). - `, -} - -func parseFilerGrpcAddress(filer string, optionalGrpcPort int) (filerGrpcAddress string, err error) { - hostnameAndPort := strings.Split(filer, ":") - if len(hostnameAndPort) != 2 { - return "", fmt.Errorf("The filer should have hostname:port format: %v", hostnameAndPort) - } + If the SeaweedFS system runs in a container cluster, e.g. managed by kubernetes or docker compose, + the volume servers are not accessible by their own ip addresses. + In "outsideContainerClusterMode", the mount will use the filer ip address instead, assuming: + * All volume server containers are accessible through the same hostname or IP address as the filer. + * All volume server container ports are open external to the cluster. - filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) - if parseErr != nil { - return "", fmt.Errorf("The filer filer port parse error: %v", parseErr) - } - - filerGrpcPort := int(filerPort) + 10000 - if optionalGrpcPort != 0 { - filerGrpcPort = optionalGrpcPort - } - - return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil + `, } diff --git a/weed/command/mount_darwin.go b/weed/command/mount_darwin.go new file mode 100644 index 000000000..f0a5581e7 --- /dev/null +++ b/weed/command/mount_darwin.go @@ -0,0 +1,13 @@ +package command + +import ( + "github.com/seaweedfs/fuse" +) + +func osSpecificMountOptions() []fuse.MountOption { + return []fuse.MountOption{} +} + +func checkMountPointAvailable(dir string) bool { + return true +} diff --git a/weed/command/mount_freebsd.go b/weed/command/mount_freebsd.go new file mode 100644 index 000000000..f0a5581e7 --- /dev/null +++ b/weed/command/mount_freebsd.go @@ -0,0 +1,13 @@ +package command + +import ( + "github.com/seaweedfs/fuse" +) + +func osSpecificMountOptions() []fuse.MountOption { + return []fuse.MountOption{} +} + +func checkMountPointAvailable(dir string) bool { + return true +} diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go new file mode 100644 index 000000000..25c4f72cf --- /dev/null +++ b/weed/command/mount_linux.go @@ -0,0 +1,155 @@ +package command + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/seaweedfs/fuse" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func mounted(mountPoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountPoint + for _, e := range entries { + if e.Mountpoint == mountPoint { + return true, nil + } + } + return false, nil +} + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out []*Info + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +func osSpecificMountOptions() []fuse.MountOption { + return []fuse.MountOption{} +} + +func checkMountPointAvailable(dir string) bool { + mountPoint := dir + if mountPoint != "/" && strings.HasSuffix(mountPoint, "/") { + mountPoint = mountPoint[0 : len(mountPoint)-1] + } + + if mounted, err := mounted(mountPoint); err != nil || mounted { + return false + } + + return true +} diff --git a/weed/command/mount_notsupported.go b/weed/command/mount_notsupported.go index 3bf22ddc4..f3c0de3d6 100644 --- a/weed/command/mount_notsupported.go +++ b/weed/command/mount_notsupported.go @@ -1,5 +1,6 @@ // +build !linux // +build !darwin +// +build !freebsd package command diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 2937b9ef1..915754166 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -1,11 +1,13 @@ -// +build linux darwin +// +build linux darwin freebsd package command import ( + "context" "fmt" "os" "os/user" + "path" "runtime" "strconv" "strings" @@ -13,104 +15,185 @@ import ( "github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/grace" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) func runMount(cmd *Command, args []string) bool { - fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) - if *mountOptions.dir == "" { + + grace.SetupProfiling(*mountCpuProfile, *mountMemProfile) + + umask, umaskErr := strconv.ParseUint(*mountOptions.umaskString, 8, 64) + if umaskErr != nil { + fmt.Printf("can not parse umask %s", *mountOptions.umaskString) + return false + } + + if len(args) > 0 { + return false + } + + return RunMount(&mountOptions, os.FileMode(umask)) +} + +func RunMount(option *MountOptions, umask os.FileMode) bool { + + filer := *option.filer + // parse filer grpc address + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer) + if err != nil { + glog.V(0).Infof("ParseFilerGrpcAddress: %v", err) + return true + } + + // try to connect to filer, filerBucketsPath may be useful later + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + var cipher bool + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer grpc address %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err) + return true + } + + filerMountRootPath := *option.filerMountRootPath + dir := *option.dir + chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB + + util.LoadConfiguration("security", false) + + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) + if dir == "" { fmt.Printf("Please specify the mount directory via \"-dir\"") return false } - if *mountOptions.chunkSizeLimitMB <= 0 { + if chunkSizeLimitMB <= 0 { fmt.Printf("Please specify a reasonable buffer size.") return false } - fuse.Unmount(*mountOptions.dir) + fuse.Unmount(dir) + + uid, gid := uint32(0), uint32(0) // detect mount folder mode + if *option.dirAutoCreate { + os.MkdirAll(dir, 0755) + } mountMode := os.ModeDir | 0755 - if fileInfo, err := os.Stat(*mountOptions.dir); err == nil { + fileInfo, err := os.Stat(dir) + if err == nil { mountMode = os.ModeDir | fileInfo.Mode() + uid, gid = util.GetFileUidGid(fileInfo) + fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode()) } - // detect current user - uid, gid := uint32(0), uint32(0) - if u, err := user.Current(); err == nil { - if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil { - uid = uint32(parsedId) - } - if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil { - gid = uint32(parsedId) + if uid == 0 { + if u, err := user.Current(); err == nil { + if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil { + uid = uint32(parsedId) + } + if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil { + gid = uint32(parsedId) + } + fmt.Printf("current uid=%d gid=%d\n", uid, gid) } } - util.SetupProfiling(*mountCpuProfile, *mountMemProfile) + // Ensure target mount point availability + if isValid := checkMountPointAvailable(dir); !isValid { + glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir) + return true + } - c, err := fuse.Mount( - *mountOptions.dir, - fuse.VolumeName("SeaweedFS"), - fuse.FSName("SeaweedFS"), - fuse.Subtype("SeaweedFS"), - fuse.NoAppleDouble(), + mountName := path.Base(dir) + + options := []fuse.MountOption{ + fuse.VolumeName(mountName), + fuse.FSName(filer + ":" + filerMountRootPath), + fuse.Subtype("seaweedfs"), + // fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders fuse.NoAppleXattr(), fuse.NoBrowse(), fuse.AutoXattr(), fuse.ExclCreate(), fuse.DaemonTimeout("3600"), - fuse.AllowOther(), fuse.AllowSUID(), fuse.DefaultPermissions(), - fuse.MaxReadahead(1024*128), + fuse.MaxReadahead(1024 * 128), fuse.AsyncRead(), fuse.WritebackCache(), - ) - if err != nil { - glog.Fatal(err) - return false } - util.OnInterrupt(func() { - fuse.Unmount(*mountOptions.dir) - c.Close() - }) - - filerGrpcAddress, err := parseFilerGrpcAddress(*mountOptions.filer, *mountOptions.filerGrpcPort) - if err != nil { - glog.Fatal(err) - return false + options = append(options, osSpecificMountOptions()...) + if *option.allowOthers { + options = append(options, fuse.AllowOther()) + } + if *option.nonempty { + options = append(options, fuse.AllowNonEmptyMount()) } - mountRoot := *mountOptions.filerMountRootPath + // find mount point + mountRoot := filerMountRootPath if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") { mountRoot = mountRoot[0 : len(mountRoot)-1] } - err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ - FilerGrpcAddress: filerGrpcAddress, - FilerMountRootPath: mountRoot, - Collection: *mountOptions.collection, - Replication: *mountOptions.replication, - TtlSec: int32(*mountOptions.ttlSec), - ChunkSizeLimit: int64(*mountOptions.chunkSizeLimitMB) * 1024 * 1024, - DataCenter: *mountOptions.dataCenter, - DirListingLimit: *mountOptions.dirListingLimit, - EntryCacheTtl: 3 * time.Second, - MountUid: uid, - MountGid: gid, - MountMode: mountMode, - })) + seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{ + FilerGrpcAddress: filerGrpcAddress, + GrpcDialOption: grpcDialOption, + FilerMountRootPath: mountRoot, + Collection: *option.collection, + Replication: *option.replication, + TtlSec: int32(*option.ttlSec), + ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, + CacheDir: *option.cacheDir, + CacheSizeMB: *option.cacheSizeMB, + DataCenter: *option.dataCenter, + DirListCacheLimit: *option.dirListCacheLimit, + EntryCacheTtl: 3 * time.Second, + MountUid: uid, + MountGid: gid, + MountMode: mountMode, + MountCtime: fileInfo.ModTime(), + MountMtime: time.Now(), + Umask: umask, + OutsideContainerClusterMode: *mountOptions.outsideContainerClusterMode, + Cipher: cipher, + }) + + // mount + c, err := fuse.Mount(dir, options...) if err != nil { - fuse.Unmount(*mountOptions.dir) + glog.V(0).Infof("mount: %v", err) + return true } + defer fuse.Unmount(dir) + + grace.OnInterrupt(func() { + fuse.Unmount(dir) + c.Close() + }) + + glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir) + err = fs.Serve(c, seaweedFileSystem) // check if the mount process has an error to report <-c.Ready if err := c.MountError; err != nil { - glog.Fatal(err) + glog.V(0).Infof("mount process: %v", err) + return true } return true diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go new file mode 100644 index 000000000..b4b5855ff --- /dev/null +++ b/weed/command/msg_broker.go @@ -0,0 +1,114 @@ +package command + +import ( + "context" + "fmt" + "strconv" + "time" + + "google.golang.org/grpc/reflection" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + messageBrokerStandaloneOptions MessageBrokerOptions +) + +type MessageBrokerOptions struct { + filer *string + ip *string + port *int + cpuprofile *string + memprofile *string +} + +func init() { + cmdMsgBroker.Run = runMsgBroker // break init cycle + messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address") + messageBrokerStandaloneOptions.ip = cmdMsgBroker.Flag.String("ip", util.DetectedHostAddress(), "broker host address") + messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "broker gRPC listen port") + messageBrokerStandaloneOptions.cpuprofile = cmdMsgBroker.Flag.String("cpuprofile", "", "cpu profile output file") + messageBrokerStandaloneOptions.memprofile = cmdMsgBroker.Flag.String("memprofile", "", "memory profile output file") +} + +var cmdMsgBroker = &Command{ + UsageLine: "msgBroker [-port=17777] [-filer=]", + Short: "start a message queue broker", + Long: `start a message queue broker + + The broker can accept gRPC calls to write or read messages. The messages are stored via filer. + The brokers are stateless. To scale up, just add more brokers. + +`, +} + +func runMsgBroker(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + return messageBrokerStandaloneOptions.startQueueServer() + +} + +func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool { + + grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile) + + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker") + cipher := false + + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress) + break + } + } + + qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{ + Filers: []string{*msgBrokerOpt.filer}, + DefaultReplication: "", + MaxMB: 0, + Ip: *msgBrokerOpt.ip, + Port: *msgBrokerOpt.port, + Cipher: cipher, + }, grpcDialOption) + + // start grpc listener + grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0) + if err != nil { + glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err) + } + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker")) + messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs) + reflection.Register(grpcS) + grpcS.Serve(grpcL) + + return true + +} diff --git a/weed/command/s3.go b/weed/command/s3.go index 16a9490ff..7ebd4fab0 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -1,64 +1,161 @@ package command import ( + "context" + "fmt" "net/http" "time" - "fmt" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + + "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/s3api" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" ) var ( - s3options S3Options + s3StandaloneOptions S3Options ) type S3Options struct { - filer *string - filerGrpcPort *int - filerBucketsPath *string - port *int - domainName *string - tlsPrivateKey *string - tlsCertificate *string + filer *string + port *int + config *string + domainName *string + tlsPrivateKey *string + tlsCertificate *string } func init() { cmdS3.Run = runS3 // break init cycle - s3options.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address") - s3options.filerGrpcPort = cmdS3.Flag.Int("filer.grpcPort", 0, "filer server grpc port, default to filer http port plus 10000") - s3options.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets") - s3options.port = cmdS3.Flag.Int("port", 8333, "s3options server http listen port") - s3options.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}") - s3options.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") - s3options.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") + s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address") + s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port") + s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}") + s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file") + s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") + s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") } var cmdS3 = &Command{ - UsageLine: "s3 -port=8333 -filer=", + UsageLine: "s3 [-port=8333] [-filer=] [-config=]", Short: "start a s3 API compatible server that is backed by a filer", Long: `start a s3 API compatible server that is backed by a filer. + By default, you can use any access key and secret key to access the S3 APIs. + To enable credential based access, create a config.json file similar to this: + +{ + "identities": [ + { + "name": "some_name", + "credentials": [ + { + "accessKey": "some_access_key1", + "secretKey": "some_secret_key1" + } + ], + "actions": [ + "Admin", + "Read", + "Write" + ] + }, + { + "name": "some_read_only_user", + "credentials": [ + { + "accessKey": "some_access_key2", + "secretKey": "some_secret_key2" + } + ], + "actions": [ + "Read" + ] + }, + { + "name": "some_normal_user", + "credentials": [ + { + "accessKey": "some_access_key3", + "secretKey": "some_secret_key3" + } + ], + "actions": [ + "Read", + "Write" + ] + }, + { + "name": "user_limited_to_bucket1", + "credentials": [ + { + "accessKey": "some_access_key4", + "secretKey": "some_secret_key4" + } + ], + "actions": [ + "Read:bucket1", + "Write:bucket1" + ] + } + ] +} + `, } func runS3(cmd *Command, args []string) bool { - filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer, *s3options.filerGrpcPort) + util.LoadConfiguration("security", false) + + return s3StandaloneOptions.startS3Server() + +} + +func (s3opt *S3Options) startS3Server() bool { + + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer) if err != nil { glog.Fatal(err) return false } + filerBucketsPath := "/buckets" + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + filerBucketsPath = resp.DirBuckets + glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath) + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress) + break + } + } + router := mux.NewRouter().SkipClean(true) _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{ - Filer: *s3options.filer, + Filer: *s3opt.filer, FilerGrpcAddress: filerGrpcAddress, - DomainName: *s3options.domainName, - BucketsPath: *s3options.filerBucketsPath, + Config: *s3opt.config, + DomainName: *s3opt.domainName, + BucketsPath: filerBucketsPath, + GrpcDialOption: grpcDialOption, }) if s3ApiServer_err != nil { glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) @@ -66,22 +163,22 @@ func runS3(cmd *Command, args []string) bool { httpS := &http.Server{Handler: router} - listenAddress := fmt.Sprintf(":%d", *s3options.port) + listenAddress := fmt.Sprintf(":%d", *s3opt.port) s3ApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) if err != nil { glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err) } - if *s3options.tlsPrivateKey != "" { - if err = httpS.ServeTLS(s3ApiListener, *s3options.tlsCertificate, *s3options.tlsPrivateKey); err != nil { + if *s3opt.tlsPrivateKey != "" { + glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port) + if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } - glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3options.port) } else { + glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port) if err = httpS.Serve(s3ApiListener); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } - glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3options.port) } return true diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index ec0723859..b199f2d2d 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -10,16 +10,24 @@ func init() { } var cmdScaffold = &Command{ - UsageLine: "scaffold [filer]", + UsageLine: "scaffold -config=[filer|notification|replication|security|master]", Short: "generate basic configuration files", Long: `Generate filer.toml with all possible configurations for you to customize. + The options can also be overwritten by environment variables. + For example, the filer.toml mysql password can be overwritten by environment variable + export WEED_MYSQL_PASSWORD=some_password + Environment variable rules: + * Prefix the variable name with "WEED_" + * Upppercase the reset of variable name. + * Replace '.' with '_' + `, } var ( outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory") - config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication] the configuration file to generate") + config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate") ) func runScaffold(cmd *Command, args []string) bool { @@ -32,6 +40,10 @@ func runScaffold(cmd *Command, args []string) bool { content = NOTIFICATION_TOML_EXAMPLE case "replication": content = REPLICATION_TOML_EXAMPLE + case "security": + content = SECURITY_TOML_EXAMPLE + case "master": + content = MASTER_TOML_EXAMPLE } if content == "" { println("need a valid -config option") @@ -55,27 +67,39 @@ const ( # $HOME/.seaweedfs/filer.toml # /etc/seaweedfs/filer.toml -[memory] -# local in memory, mostly for testing purpose -enabled = false +#################################################### +# Customizable filer server options +#################################################### +[filer.options] +# with http DELETE, by default the filer would check whether a folder is empty. +# recursive_delete will delete all sub folders and files, similar to "rm -Rf" +recursive_delete = false +# directories under this folder will be automatically creating a separate bucket +buckets_folder = "/buckets" +buckets_fsync = [ # a list of buckets with all write requests fsync=true + "important_bucket", + "should_always_fsync", +] + +#################################################### +# The following are filer store options +#################################################### -[leveldb] +[leveldb2] # local on disk, mostly for simple single-machine setup, fairly scalable +# faster than previous leveldb, recommended. enabled = true dir = "." # directory to store level db files -#################################################### -# multiple filers on shared storage, fairly scalable -#################################################### - -[mysql] +[mysql] # or tidb # CREATE TABLE IF NOT EXISTS filemeta ( -# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', -# name VARCHAR(1000) COMMENT 'directory or file name', -# directory VARCHAR(4096) COMMENT 'full path to parent directory', -# meta BLOB, +# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', +# name VARCHAR(1000) COMMENT 'directory or file name', +# directory TEXT COMMENT 'full path to parent directory', +# meta LONGBLOB, # PRIMARY KEY (dirhash, name) # ) DEFAULT CHARSET=utf8; + enabled = false hostname = "localhost" port = 3306 @@ -84,12 +108,13 @@ password = "" database = "" # create or use an existing database connection_max_idle = 2 connection_max_open = 100 +interpolateParams = false -[postgres] +[postgres] # or cockroachdb # CREATE TABLE IF NOT EXISTS filemeta ( # dirhash BIGINT, -# name VARCHAR(1000), -# directory VARCHAR(4096), +# name VARCHAR(65535), +# directory VARCHAR(65535), # meta bytea, # PRIMARY KEY (dirhash, name) # ); @@ -116,13 +141,13 @@ hosts=[ "localhost:9042", ] -[redis] +[redis2] enabled = false address = "localhost:6379" password = "" -db = 0 +database = 0 -[redis_cluster] +[redis_cluster2] enabled = false addresses = [ "localhost:30001", @@ -132,7 +157,22 @@ addresses = [ "localhost:30005", "localhost:30006", ] +password = "" +# allows reads from slave servers or the master, but all writes still go to the master +readOnly = true +# automatically use the closest Redis server for reads +routeByLatency = true + +[etcd] +enabled = false +servers = "localhost:2379" +timeout = "3s" +[mongodb] +enabled = false +uri = "mongodb://localhost:27017" +option_pool_size = 0 +database = "seaweedfs" ` NOTIFICATION_TOML_EXAMPLE = ` @@ -178,6 +218,17 @@ google_application_credentials = "/path/to/x.json" # path to json credential fil project_id = "" # an existing project id topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists +[notification.gocdk_pub_sub] +# The Go Cloud Development Kit (https://gocloud.dev). +# PubSub API (https://godoc.org/gocloud.dev/pubsub). +# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ. +enabled = false +# This URL will Dial the RabbitMQ server at the URL in the environment +# variable RABBIT_SERVER_URL and open the exchange "myexchange". +# The exchange must have already been created by some other means, like +# the RabbitMQ management plugin. +topic_url = "rabbit://myexchange" +sub_url = "rabbit://myqueue" ` REPLICATION_TOML_EXAMPLE = ` @@ -194,28 +245,29 @@ grpcAddress = "localhost:18888" # all files under this directory tree are replicated. # this is not a directory on your hard drive, but on your filer. # i.e., all files with this "prefix" are sent to notification message queue. -directory = "/buckets" +directory = "/buckets" [sink.filer] enabled = false grpcAddress = "localhost:18888" # all replicated files are under this directory tree -# this is not a directory on your hard drive, but on your filer. +# this is not a directory on your hard drive, but on your filer. # i.e., all received files will be "prefixed" to this directory. -directory = "/backup" +directory = "/backup" replication = "" collection = "" ttlSec = 0 [sink.s3] # read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html -# default loads credentials from the shared credentials file (~/.aws/credentials). +# default loads credentials from the shared credentials file (~/.aws/credentials). enabled = false aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials). aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials). region = "us-east-2" bucket = "your_bucket_name" # an existing bucket directory = "/" # destination directory +endpoint = "" [sink.google_cloud_storage] # read credentials doc at https://cloud.google.com/docs/authentication/getting-started @@ -239,5 +291,128 @@ b2_master_application_key = "" bucket = "mybucket" # an existing bucket directory = "/" # destination directory +` + + SECURITY_TOML_EXAMPLE = ` +# Put this file to one of the location, with descending priority +# ./security.toml +# $HOME/.seaweedfs/security.toml +# /etc/seaweedfs/security.toml +# this file is read by master, volume server, and filer + +# the jwt signing key is read by master and volume server. +# a jwt defaults to expire after 10 seconds. +[jwt.signing] +key = "" +expires_after_seconds = 10 # seconds + +# jwt for read is only supported with master+volume setup. Filer does not support this mode. +[jwt.signing.read] +key = "" +expires_after_seconds = 10 # seconds + +# all grpc tls authentications are mutual +# the values for the following ca, cert, and key are paths to the PERM files. +# the host name is not checked, so the PERM files can be shared. +[grpc] +ca = "" + +[grpc.volume] +cert = "" +key = "" + +[grpc.master] +cert = "" +key = "" + +[grpc.filer] +cert = "" +key = "" + +[grpc.msg_broker] +cert = "" +key = "" + +# use this for any place needs a grpc client +# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" +[grpc.client] +cert = "" +key = "" + + +# volume server https options +# Note: work in progress! +# this does not work with other clients, e.g., "weed filer|mount" etc, yet. +[https.client] +enabled = true +[https.volume] +cert = "" +key = "" + + +` + + MASTER_TOML_EXAMPLE = ` +# Put this file to one of the location, with descending priority +# ./master.toml +# $HOME/.seaweedfs/master.toml +# /etc/seaweedfs/master.toml +# this file is read by master + +[master.maintenance] +# periodically run these scripts are the same as running them from 'weed shell' +scripts = """ + lock + ec.encode -fullPercent=95 -quietFor=1h + ec.rebuild -force + ec.balance -force + volume.balance -force + volume.fix.replication + unlock +""" +sleep_minutes = 17 # sleep minutes between each script execution + +[master.filer] +default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands + + +[master.sequencer] +type = "memory" # Choose [memory|etcd] type for storing the file id sequence +# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence +# example : http://127.0.0.1:2379,http://127.0.0.1:2389 +sequencer_etcd_urls = "http://127.0.0.1:2379" + + +# configurations for tiered cloud storage +# old volumes are transparently moved to cloud for cost efficiency +[storage.backend] + [storage.backend.s3.default] + enabled = false + aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials). + aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials). + region = "us-east-2" + bucket = "your_bucket_name" # an existing bucket + endpoint = "" + +# create this number of logical volumes if no more writable volumes +# count_x means how many copies of data. +# e.g.: +# 000 has only one copy, copy_1 +# 010 and 001 has two copies, copy_2 +# 011 has only 3 copies, copy_3 +[master.volume_growth] +copy_1 = 7 # create 1 x 7 = 7 actual volumes +copy_2 = 6 # create 2 x 6 = 12 actual volumes +copy_3 = 3 # create 3 x 3 = 9 actual volumes +copy_other = 1 # create n x 1 = n actual volumes + +# configuration flags for replication +[master.replication] +# any replication counts should be considered minimums. If you specify 010 and +# have 3 different racks, that's still considered writable. Writes will still +# try to replicate to all available volumes. You should only use this option +# if you are doing your own replication or periodic sync of volumes. +treat_replication_as_minimums = false + ` ) diff --git a/weed/command/scaffold_test.go b/weed/command/scaffold_test.go new file mode 100644 index 000000000..423dacc32 --- /dev/null +++ b/weed/command/scaffold_test.go @@ -0,0 +1,44 @@ +package command + +import ( + "bytes" + "fmt" + "testing" + + "github.com/spf13/viper" +) + +func TestReadingTomlConfiguration(t *testing.T) { + + viper.SetConfigType("toml") + + // any approach to require this configuration into your program. + var tomlExample = []byte(` +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +`) + + viper.ReadConfig(bytes.NewBuffer(tomlExample)) + + fmt.Printf("database is %v\n", viper.Get("database")) + fmt.Printf("servers is %v\n", viper.GetStringMap("servers")) + + alpha := viper.Sub("servers.alpha") + + fmt.Printf("alpha ip is %v\n", alpha.GetString("ip")) +} diff --git a/weed/command/server.go b/weed/command/server.go index ba5305a97..443f041c5 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -1,21 +1,15 @@ package command import ( - "net/http" + "fmt" "os" "runtime" "runtime/pprof" - "strconv" "strings" - "sync" "time" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" - "google.golang.org/grpc/reflection" ) type ServerOptions struct { @@ -24,8 +18,11 @@ type ServerOptions struct { } var ( - serverOptions ServerOptions - filerOptions FilerOptions + serverOptions ServerOptions + masterOptions MasterOptions + filerOptions FilerOptions + s3Options S3Options + msgBrokerOptions MessageBrokerOptions ) func init() { @@ -33,70 +30,90 @@ func init() { } var cmdServer = &Command{ - UsageLine: "server -port=8080 -dir=/tmp -volume.max=5 -ip=server_name", - Short: "start a server, including volume server, and automatically elect a master server", + UsageLine: "server -dir=/tmp -volume.max=5 -ip=server_name", + Short: "start a master server, a volume server, and optionally a filer and a S3 gateway", Long: `start both a volume server to provide storage spaces and a master server to provide volume=>location mapping service and sequence number of file ids This is provided as a convenient way to start both volume server and master server. - The servers are exactly the same as starting them separately. + The servers acts exactly the same as starting them separately. + So other volume servers can connect to this master server also. - So other volume servers can use this embedded master server also. - - Optionally, one filer server can be started. Logically, filer servers should not be in a cluster. - They run with meta data on disk, not shared. So each filer server is different. + Optionally, a filer server can be started. + Also optionally, a S3 gateway can be started. `, } var ( - serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name") - serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") - serverMaxCpu = cmdServer.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") - serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds") - serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") - serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") - serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") - serverPeers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list") - serverSecureKey = cmdServer.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") - serverGarbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") - masterPort = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") - masterGrpcPort = cmdServer.Flag.Int("master.port.grpc", 0, "master grpc server listen port, default to http port + 10000") - masterMetaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified") - masterVolumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") - masterVolumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.") - masterDefaultReplicaPlacement = cmdServer.Flag.String("master.defaultReplicaPlacement", "000", "Default replication type if not specified.") - volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") - volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...") - pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") - isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") + serverIp = cmdServer.Flag.String("ip", util.DetectedHostAddress(), "ip or server name") + serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds") + serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") + serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") + serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") + serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") + volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") + volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.") + volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.") + + // pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") + isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") + isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway") + isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker") serverWhiteList []string + + False = false ) func init() { serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file") + + masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") + masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified") + masterOptions.peers = cmdServer.Flag.String("master.peers", "", "all master nodes in comma separated ip:masterPort list") + masterOptions.volumeSizeLimitMB = cmdServer.Flag.Uint("master.volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") + masterOptions.volumePreallocate = cmdServer.Flag.Bool("master.volumePreallocate", false, "Preallocate disk space for volumes.") + masterOptions.defaultReplication = cmdServer.Flag.String("master.defaultReplication", "000", "Default replication type if not specified.") + masterOptions.garbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") + masterOptions.metricsAddress = cmdServer.Flag.String("metrics.address", "", "Prometheus gateway address") + masterOptions.metricsIntervalSec = cmdServer.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") + filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection") filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port") - filerOptions.grpcPort = cmdServer.Flag.Int("filer.port.grpc", 0, "filer grpc server listen port, default to http port + 10000") filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port") filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.") - filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") + filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") + filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") - serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|boltdb|btree] mode for memory~performance balance.") - serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.") + serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") + serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second") + serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") + serverOptions.v.pprof = &False + + s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") + s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}") + s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file") + s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file") + s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file") + + msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port") } func runServer(cmd *Command, args []string) bool { - filerOptions.secretKey = serverSecureKey + + util.LoadConfiguration("security", false) + util.LoadConfiguration("master", false) + if *serverOptions.cpuprofile != "" { f, err := os.Create(*serverOptions.cpuprofile) if err != nil { @@ -106,45 +123,62 @@ func runServer(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - if *filerOptions.redirectOnRead { + if *isStartingS3 { + *isStartingFiler = true + } + if *isStartingMsgBroker { *isStartingFiler = true } - master := *serverIp + ":" + strconv.Itoa(*masterPort) + _, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers) + peers := strings.Join(peerList, ",") + masterOptions.peers = &peers + + masterOptions.ip = serverIp + masterOptions.ipBind = serverBindIp + filerOptions.masters = &peers filerOptions.ip = serverIp + filerOptions.bindIp = serverBindIp serverOptions.v.ip = serverIp serverOptions.v.bindIp = serverBindIp - serverOptions.v.masters = &master + serverOptions.v.masters = &peers serverOptions.v.idleConnectionTimeout = serverTimeout - serverOptions.v.maxCpu = serverMaxCpu serverOptions.v.dataCenter = serverDataCenter serverOptions.v.rack = serverRack - serverOptions.v.pulseSeconds = pulseSeconds + msgBrokerOptions.ip = serverIp + + // serverOptions.v.pulseSeconds = pulseSeconds + // masterOptions.pulseSeconds = pulseSeconds + + masterOptions.whiteList = serverWhiteListOption filerOptions.dataCenter = serverDataCenter + filerOptions.disableHttp = serverDisableHttp + masterOptions.disableHttp = serverDisableHttp + + filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port) + s3Options.filer = &filerAddress + msgBrokerOptions.filer = &filerAddress if *filerOptions.defaultReplicaPlacement == "" { - *filerOptions.defaultReplicaPlacement = *masterDefaultReplicaPlacement + *filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication } - if *serverMaxCpu < 1 { - *serverMaxCpu = runtime.NumCPU() - } - runtime.GOMAXPROCS(*serverMaxCpu) + runtime.GOMAXPROCS(runtime.NumCPU()) folders := strings.Split(*volumeDataFolders, ",") - if *masterVolumeSizeLimitMB > 30*1000 { + if *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 { glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000") } - if *masterMetaFolder == "" { - *masterMetaFolder = folders[0] + if *masterOptions.metaFolder == "" { + *masterOptions.metaFolder = folders[0] } - if err := util.TestFolderWritable(*masterMetaFolder); err != nil { - glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterMetaFolder, err) + if err := util.TestFolderWritable(*masterOptions.metaFolder); err != nil { + glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err) } - filerOptions.defaultLevelDbDirectory = masterMetaFolder + filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder if *serverWhiteListOption != "" { serverWhiteList = strings.Split(*serverWhiteListOption, ",") @@ -159,68 +193,29 @@ func runServer(cmd *Command, args []string) bool { }() } - var raftWaitForMaster sync.WaitGroup - var volumeWait sync.WaitGroup - - raftWaitForMaster.Add(1) - volumeWait.Add(1) - - go func() { - r := mux.NewRouter() - ms := weed_server.NewMasterServer(r, *masterPort, *masterMetaFolder, - *masterVolumeSizeLimitMB, *masterVolumePreallocate, - *pulseSeconds, *masterDefaultReplicaPlacement, *serverGarbageThreshold, - serverWhiteList, *serverSecureKey, - ) + if *isStartingS3 { + go func() { + time.Sleep(2 * time.Second) - glog.V(0).Infof("Start Seaweed Master %s at %s:%d", util.VERSION, *serverIp, *masterPort) - masterListener, e := util.NewListener(*serverBindIp+":"+strconv.Itoa(*masterPort), 0) - if e != nil { - glog.Fatalf("Master startup error: %v", e) - } + s3Options.startS3Server() - go func() { - // starting grpc server - grpcPort := *masterGrpcPort - if grpcPort == 0 { - grpcPort = *masterPort + 10000 - } - grpcL, err := util.NewListener(*serverIp+":"+strconv.Itoa(grpcPort), 0) - if err != nil { - glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) - } - // Create your protocol servers. - grpcS := util.NewGrpcServer() - master_pb.RegisterSeaweedServer(grpcS, ms) - reflection.Register(grpcS) - - glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *serverIp, grpcPort) - grpcS.Serve(grpcL) }() + } + if *isStartingMsgBroker { go func() { - raftWaitForMaster.Wait() - time.Sleep(100 * time.Millisecond) - myAddress, peers := checkPeers(*serverIp, *masterPort, *serverPeers) - raftServer := weed_server.NewRaftServer(r, peers, myAddress, *masterMetaFolder, ms.Topo, *pulseSeconds) - ms.SetRaftServer(raftServer) - volumeWait.Done() + time.Sleep(2 * time.Second) + msgBrokerOptions.startQueueServer() }() + } - raftWaitForMaster.Done() - - // start http server - httpS := &http.Server{Handler: r} - if err := httpS.Serve(masterListener); err != nil { - glog.Fatalf("master server failed to serve: %v", err) - } - - }() + // start volume server + { + go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, *volumeMinFreeSpacePercent) - volumeWait.Wait() - time.Sleep(100 * time.Millisecond) + } - serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption) + startMaster(masterOptions, serverWhiteList) return true } diff --git a/weed/command/shell.go b/weed/command/shell.go index 19c5049c5..6dd768f47 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -1,61 +1,47 @@ package command import ( - "bufio" "fmt" - "os" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/shell" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + shellOptions shell.ShellOptions + shellInitialFiler *string ) func init() { cmdShell.Run = runShell // break init cycle + shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers") + shellInitialFiler = cmdShell.Flag.String("filer", "localhost:8888", "filer host and port") } var cmdShell = &Command{ UsageLine: "shell", - Short: "run interactive commands, now just echo", - Long: `run interactive commands. + Short: "run interactive administrative commands", + Long: `run interactive administrative commands. `, } -var () - func runShell(command *Command, args []string) bool { - r := bufio.NewReader(os.Stdin) - o := bufio.NewWriter(os.Stdout) - e := bufio.NewWriter(os.Stderr) - prompt := func() { - var err error - if _, err = o.WriteString("> "); err != nil { - glog.V(0).Infoln("error writing to stdout:", err) - } - if err = o.Flush(); err != nil { - glog.V(0).Infoln("error flushing stdout:", err) - } - } - readLine := func() string { - ret, err := r.ReadString('\n') - if err != nil { - fmt.Fprint(e, err) - os.Exit(1) - } - return ret - } - execCmd := func(cmd string) int { - if cmd != "" { - if _, err := o.WriteString(cmd); err != nil { - glog.V(0).Infoln("error writing to stdout:", err) - } - } - return 0 - } - cmd := "" - for { - prompt() - cmd = readLine() - execCmd(cmd) + util.LoadConfiguration("security", false) + shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + var err error + shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler) + if err != nil { + fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err) + return false } + shellOptions.Directory = "/" + + shell.RunShell(shellOptions) + + return true + } diff --git a/weed/command/upload.go b/weed/command/upload.go index f664c0e3a..358897aee 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -8,6 +8,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -15,15 +16,15 @@ var ( ) type UploadOptions struct { - master *string - dir *string - include *string - replication *string - collection *string - dataCenter *string - ttl *string - maxMB *int - secretKey *string + master *string + dir *string + include *string + replication *string + collection *string + dataCenter *string + ttl *string + maxMB *int + usePublicUrl *bool } func init() { @@ -36,8 +37,8 @@ func init() { upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name") upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name") upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - upload.maxMB = cmdUpload.Flag.Int("maxMB", 0, "split files larger than the limit") - upload.secretKey = cmdUpload.Flag.String("secure.secret", "", "secret to encrypt Json Web Token(JWT)") + upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit") + upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server") } var cmdUpload = &Command{ @@ -53,14 +54,17 @@ var cmdUpload = &Command{ All files under the folder and subfolders will be uploaded, each with its own file key. Optional parameter "-include" allows you to specify the file name patterns. - If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separatedly. + If "maxMB" is set to a positive number, files larger than it would be split into chunks and uploaded separately. The list of file ids of those chunks would be stored in an additional chunk, and this additional chunk's file id would be returned. `, } func runUpload(cmd *Command, args []string) bool { - secret := security.Secret(*upload.secretKey) + + util.LoadConfiguration("security", false) + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + if len(args) == 0 { if *upload.dir == "" { return false @@ -77,9 +81,7 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { return e } - results, e := operation.SubmitFiles(*upload.master, parts, - *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB, secret) + results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) if e != nil { @@ -96,9 +98,7 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { fmt.Println(e.Error()) } - results, _ := operation.SubmitFiles(*upload.master, parts, - *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB, secret) + results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.maxMB, *upload.usePublicUrl) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) } diff --git a/weed/command/version.go b/weed/command/version.go index 8fdd68ec8..9caf7dc4e 100644 --- a/weed/command/version.go +++ b/weed/command/version.go @@ -19,6 +19,6 @@ func runVersion(cmd *Command, args []string) bool { cmd.Usage() } - fmt.Printf("version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) + fmt.Printf("version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) return true } diff --git a/weed/command/volume.go b/weed/command/volume.go index 27a075b5b..0a7d52049 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -1,7 +1,9 @@ package command import ( + "fmt" "net/http" + httppprof "net/http/pprof" "os" "runtime" "runtime/pprof" @@ -9,12 +11,22 @@ import ( "strings" "time" + "github.com/spf13/viper" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util/httpdown" + + "google.golang.org/grpc/reflection" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( @@ -30,37 +42,40 @@ type VolumeServerOptions struct { publicUrl *string bindIp *string masters *string - pulseSeconds *int idleConnectionTimeout *int - maxCpu *int dataCenter *string rack *string whiteList []string indexType *string - fixJpgOrientation *bool readRedirect *bool cpuProfile *string memProfile *string + compactionMBPerSecond *int + fileSizeLimitMB *int + minFreeSpacePercents []float32 + pprof *bool + // pulseSeconds *int } func init() { cmdVolume.Run = runVolume // break init cycle v.port = cmdVolume.Flag.Int("port", 8080, "http listen port") v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public") - v.ip = cmdVolume.Flag.String("ip", "", "ip or server name") + v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name") v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address") v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers") - v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting") + // v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting") v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds") - v.maxCpu = cmdVolume.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name") v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name") - v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|boltdb|btree] mode for memory~performance balance.") - v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.") + v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.") v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file") v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") + v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") + v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory") + v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") } var cmdVolume = &Command{ @@ -73,26 +88,39 @@ var cmdVolume = &Command{ var ( volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") - maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]...") + maxVolumeCounts = cmdVolume.Flag.String("max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.") volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") + minFreeSpacePercent = cmdVolume.Flag.String("minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.") ) func runVolume(cmd *Command, args []string) bool { - if *v.maxCpu < 1 { - *v.maxCpu = runtime.NumCPU() + + util.LoadConfiguration("security", false) + + runtime.GOMAXPROCS(runtime.NumCPU()) + + // If --pprof is set we assume the caller wants to be able to collect + // cpu and memory profiles via go tool pprof + if !*v.pprof { + grace.SetupProfiling(*v.cpuProfile, *v.memProfile) } - runtime.GOMAXPROCS(*v.maxCpu) - util.SetupProfiling(*v.cpuProfile, *v.memProfile) - v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption) + v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, *minFreeSpacePercent) return true } -func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption string) { +func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption, minFreeSpacePercent string) { - //Set multiple folders and each folder's max volume count limit' + // Set multiple folders and each folder's max volume count limit' v.folders = strings.Split(volumeFolders, ",") + for _, folder := range v.folders { + if err := util.TestFolderWritable(folder); err != nil { + glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err) + } + } + + // set max maxCountStrings := strings.Split(maxVolumeCounts, ",") for _, maxString := range maxCountStrings { if max, e := strconv.Atoi(maxString); e == nil { @@ -104,19 +132,33 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v if len(v.folders) != len(v.folderMaxLimits) { glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits)) } - for _, folder := range v.folders { - if err := util.TestFolderWritable(folder); err != nil { - glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err) + + // set minFreeSpacePercent + minFreeSpacePercentStrings := strings.Split(minFreeSpacePercent, ",") + for _, freeString := range minFreeSpacePercentStrings { + if value, e := strconv.ParseFloat(freeString, 32); e == nil { + v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value)) + } else { + glog.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString) + } + } + if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 { + for i := 0; i < len(v.folders)-1; i++ { + v.minFreeSpacePercents = append(v.minFreeSpacePercents, v.minFreeSpacePercents[0]) } } + if len(v.folders) != len(v.minFreeSpacePercents) { + glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents)) + } - //security related white list configuration + // security related white list configuration if volumeWhiteListOption != "" { v.whiteList = strings.Split(volumeWhiteListOption, ",") } if *v.ip == "" { - *v.ip = "127.0.0.1" + *v.ip = util.DetectedHostAddress() + glog.V(0).Infof("detected volume server ip address: %v", *v.ip) } if *v.publicPort == 0 { @@ -125,73 +167,169 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v if *v.publicUrl == "" { *v.publicUrl = *v.ip + ":" + strconv.Itoa(*v.publicPort) } - isSeperatedPublicPort := *v.publicPort != *v.port volumeMux := http.NewServeMux() publicVolumeMux := volumeMux - if isSeperatedPublicPort { + if v.isSeparatedPublicPort() { publicVolumeMux = http.NewServeMux() } + if *v.pprof { + volumeMux.HandleFunc("/debug/pprof/", httppprof.Index) + volumeMux.HandleFunc("/debug/pprof/cmdline", httppprof.Cmdline) + volumeMux.HandleFunc("/debug/pprof/profile", httppprof.Profile) + volumeMux.HandleFunc("/debug/pprof/symbol", httppprof.Symbol) + volumeMux.HandleFunc("/debug/pprof/trace", httppprof.Trace) + } + volumeNeedleMapKind := storage.NeedleMapInMemory switch *v.indexType { case "leveldb": volumeNeedleMapKind = storage.NeedleMapLevelDb - case "boltdb": - volumeNeedleMapKind = storage.NeedleMapBoltDb - case "btree": - volumeNeedleMapKind = storage.NeedleMapBtree + case "leveldbMedium": + volumeNeedleMapKind = storage.NeedleMapLevelDbMedium + case "leveldbLarge": + volumeNeedleMapKind = storage.NeedleMapLevelDbLarge } masters := *v.masters volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux, *v.ip, *v.port, *v.publicUrl, - v.folders, v.folderMaxLimits, + v.folders, v.folderMaxLimits, v.minFreeSpacePercents, volumeNeedleMapKind, - strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack, + strings.Split(masters, ","), 5, *v.dataCenter, *v.rack, v.whiteList, - *v.fixJpgOrientation, *v.readRedirect, + *v.readRedirect, + *v.compactionMBPerSecond, + *v.fileSizeLimitMB, ) - listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) - glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress) - listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) - if e != nil { - glog.Fatalf("Volume server listener error:%v", e) - } - if isSeperatedPublicPort { - publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort) - glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress) - publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) - if e != nil { - glog.Fatalf("Volume server listener error:%v", e) + // starting grpc server + grpcS := v.startGrpcService(volumeServer) + + // starting public http server + var publicHttpDown httpdown.Server + if v.isSeparatedPublicPort() { + publicHttpDown = v.startPublicHttpService(publicVolumeMux) + if nil == publicHttpDown { + glog.Fatalf("start public http service failed") } - go func() { - if e := http.Serve(publicListener, publicVolumeMux); e != nil { - glog.Fatalf("Volume server fail to serve public: %v", e) - } - }() } - util.OnInterrupt(func() { + // starting the cluster http server + clusterHttpServer := v.startClusterHttpService(volumeMux) + + stopChain := make(chan struct{}) + grace.OnInterrupt(func() { + fmt.Println("volume server has be killed") + var startTime time.Time + + // firstly, stop the public http service to prevent from receiving new user request + if nil != publicHttpDown { + startTime = time.Now() + if err := publicHttpDown.Stop(); err != nil { + glog.Warningf("stop the public http server failed, %v", err) + } + delta := time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("stop public http server, elapsed %dms", delta) + } + + startTime = time.Now() + if err := clusterHttpServer.Stop(); err != nil { + glog.Warningf("stop the cluster http server failed, %v", err) + } + delta := time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("graceful stop cluster http server, elapsed [%d]", delta) + + startTime = time.Now() + grpcS.GracefulStop() + delta = time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("graceful stop gRPC, elapsed [%d]", delta) + + startTime = time.Now() volumeServer.Shutdown() + delta = time.Now().Sub(startTime).Nanoseconds() / 1e6 + glog.V(0).Infof("stop volume server, elapsed [%d]", delta) + pprof.StopCPUProfile() + + close(stopChain) // notify exit }) - // starting grpc server + select { + case <-stopChain: + } + glog.Warningf("the volume server exit.") +} + +// check whether configure the public port +func (v VolumeServerOptions) isSeparatedPublicPort() bool { + return *v.publicPort != *v.port +} + +func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerServer) *grpc.Server { grpcPort := *v.port + 10000 grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0) if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer() - volume_server_pb.RegisterVolumeServerServer(grpcS, volumeServer) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume")) + volume_server_pb.RegisterVolumeServerServer(grpcS, vs) reflection.Register(grpcS) - go grpcS.Serve(grpcL) + go func() { + if err := grpcS.Serve(grpcL); err != nil { + glog.Fatalf("start gRPC service failed, %s", err) + } + }() + return grpcS +} - if e := http.Serve(listener, volumeMux); e != nil { - glog.Fatalf("Volume server fail to serve: %v", e) +func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server { + publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort) + glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress) + publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) + if e != nil { + glog.Fatalf("Volume server listener error:%v", e) + } + + pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute} + publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener) + go func() { + if err := publicHttpDown.Wait(); err != nil { + glog.Errorf("public http down wait failed, %v", err) + } + }() + + return publicHttpDown +} + +func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpdown.Server { + var ( + certFile, keyFile string + ) + if viper.GetString("https.volume.key") != "" { + certFile = viper.GetString("https.volume.cert") + keyFile = viper.GetString("https.volume.key") + } + + listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) + glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress) + listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) + if e != nil { + glog.Fatalf("Volume server listener error:%v", e) } + httpDown := httpdown.HTTP{ + KillTimeout: 5 * time.Minute, + StopTimeout: 5 * time.Minute, + CertFile: certFile, + KeyFile: keyFile} + clusterHttpServer := httpDown.Serve(&http.Server{Handler: handler}, listener) + go func() { + if e := clusterHttpServer.Wait(); e != nil { + glog.Fatalf("Volume server fail to serve: %v", e) + } + }() + return clusterHttpServer } diff --git a/weed/command/watch.go b/weed/command/watch.go new file mode 100644 index 000000000..b46707a62 --- /dev/null +++ b/weed/command/watch.go @@ -0,0 +1,65 @@ +package command + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + cmdWatch.Run = runWatch // break init cycle +} + +var cmdWatch = &Command{ + UsageLine: "watch [-filer=localhost:8888] [-target=/]", + Short: "see recent changes on a filer", + Long: `See recent changes on a filer. + + `, +} + +var ( + watchFiler = cmdWatch.Flag.String("filer", "localhost:8888", "filer hostname:port") + watchTarget = cmdWatch.Flag.String("pathPrefix", "/", "path to a folder or file, or common prefix for the folders or files on filer") + watchStart = cmdWatch.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"") +) + +func runWatch(cmd *Command, args []string) bool { + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + watchErr := pb.WithFilerClient(*watchFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + stream, err := client.SubscribeMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{ + ClientName: "watch", + PathPrefix: *watchTarget, + SinceNs: time.Now().Add(-*watchStart).UnixNano(), + }) + if err != nil { + return fmt.Errorf("listen: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + fmt.Printf("events: %+v\n", resp.EventNotification) + } + + }) + if watchErr != nil { + fmt.Printf("watch %s: %v\n", *watchFiler, watchErr) + } + + return true +} diff --git a/weed/command/webdav.go b/weed/command/webdav.go new file mode 100644 index 000000000..b9676c909 --- /dev/null +++ b/weed/command/webdav.go @@ -0,0 +1,142 @@ +package command + +import ( + "context" + "fmt" + "net/http" + "os" + "os/user" + "strconv" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + webDavStandaloneOptions WebDavOption +) + +type WebDavOption struct { + filer *string + port *int + collection *string + tlsPrivateKey *string + tlsCertificate *string + cacheDir *string + cacheSizeMB *int64 +} + +func init() { + cmdWebDav.Run = runWebDav // break init cycle + webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address") + webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port") + webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files") + webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file") + webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file") + webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks") + webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB") +} + +var cmdWebDav = &Command{ + UsageLine: "webdav -port=7333 -filer=", + Short: "start a webdav server that is backed by a filer", + Long: `start a webdav server that is backed by a filer. + +`, +} + +func runWebDav(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port) + + return webDavStandaloneOptions.startWebDav() + +} + +func (wo *WebDavOption) startWebDav() bool { + + // detect current user + uid, gid := uint32(0), uint32(0) + if u, err := user.Current(); err == nil { + if parsedId, pe := strconv.ParseUint(u.Uid, 10, 32); pe == nil { + uid = uint32(parsedId) + } + if parsedId, pe := strconv.ParseUint(u.Gid, 10, 32); pe == nil { + gid = uint32(parsedId) + } + } + + // parse filer grpc address + filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + var cipher bool + // connect to filer + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress) + break + } + } + + ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ + Filer: *wo.filer, + FilerGrpcAddress: filerGrpcAddress, + GrpcDialOption: grpcDialOption, + Collection: *wo.collection, + Uid: uid, + Gid: gid, + Cipher: cipher, + CacheDir: *wo.cacheDir, + CacheSizeMB: *wo.cacheSizeMB, + }) + if webdavServer_err != nil { + glog.Fatalf("WebDav Server startup error: %v", webdavServer_err) + } + + httpS := &http.Server{Handler: ws.Handler} + + listenAddress := fmt.Sprintf(":%d", *wo.port) + webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) + if err != nil { + glog.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err) + } + + if *wo.tlsPrivateKey != "" { + glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port) + if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil { + glog.Fatalf("WebDav Server Fail to serve: %v", err) + } + } else { + glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port) + if err = httpS.Serve(webDavListener); err != nil { + glog.Fatalf("WebDav Server Fail to serve: %v", err) + } + } + + return true + +} diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go index 5f2990475..5ade18960 100644 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ b/weed/filer2/abstract_sql/abstract_sql_store.go @@ -1,24 +1,65 @@ package abstract_sql import ( + "context" "database/sql" "fmt" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) type AbstractSqlStore struct { - DB *sql.DB - SqlInsert string - SqlUpdate string - SqlFind string - SqlDelete string - SqlListExclusive string - SqlListInclusive string + DB *sql.DB + SqlInsert string + SqlUpdate string + SqlFind string + SqlDelete string + SqlDeleteFolderChildren string + SqlListExclusive string + SqlListInclusive string } -func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) { +type TxOrDB interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) { + tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{ + Isolation: sql.LevelReadCommitted, + ReadOnly: false, + }) + if err != nil { + return ctx, err + } + + return context.WithValue(ctx, "tx", tx), nil +} +func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx.Commit() + } + return nil +} +func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx.Rollback() + } + return nil +} + +func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx + } + return store.DB +} + +func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() meta, err := entry.EncodeAttributesAndChunks() @@ -26,7 +67,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) { return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.DB.Exec(store.SqlInsert, hashToLong(dir), name, dir, meta) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, util.HashStringToLong(dir), name, dir, meta) if err != nil { return fmt.Errorf("insert %s: %s", entry.FullPath, err) } @@ -38,7 +79,7 @@ func (store *AbstractSqlStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() meta, err := entry.EncodeAttributesAndChunks() @@ -46,7 +87,7 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) { return fmt.Errorf("encode %s: %s", entry.FullPath, err) } - res, err := store.DB.Exec(store.SqlUpdate, meta, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir) if err != nil { return fmt.Errorf("update %s: %s", entry.FullPath, err) } @@ -58,13 +99,13 @@ func (store *AbstractSqlStore) UpdateEntry(entry *filer2.Entry) (err error) { return nil } -func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entry, error) { +func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer2.Entry, error) { dir, name := fullpath.DirAndName() - row := store.DB.QueryRow(store.SqlFind, hashToLong(dir), name, dir) + row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, util.HashStringToLong(dir), name, dir) var data []byte if err := row.Scan(&data); err != nil { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } entry := &filer2.Entry{ @@ -77,11 +118,11 @@ func (store *AbstractSqlStore) FindEntry(fullpath filer2.FullPath) (*filer2.Entr return entry, nil } -func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error { +func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { dir, name := fullpath.DirAndName() - res, err := store.DB.Exec(store.SqlDelete, hashToLong(dir), name, dir) + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, util.HashStringToLong(dir), name, dir) if err != nil { return fmt.Errorf("delete %s: %s", fullpath, err) } @@ -94,14 +135,29 @@ func (store *AbstractSqlStore) DeleteEntry(fullpath filer2.FullPath) error { return nil } -func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { +func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { + + res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, util.HashStringToLong(string(fullpath)), fullpath) + if err != nil { + return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err) + } + + return nil +} + +func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { sqlText := store.SqlListExclusive if inclusive { sqlText = store.SqlListInclusive } - rows, err := store.DB.Query(sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) + rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, util.HashStringToLong(string(fullpath)), startFileName, string(fullpath), limit) if err != nil { return nil, fmt.Errorf("list %s : %v", fullpath, err) } @@ -116,7 +172,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, st } entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), name), + FullPath: util.NewFullPath(string(fullpath), name), } if err = entry.DecodeAttributesAndChunks(data); err != nil { glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) @@ -128,3 +184,7 @@ func (store *AbstractSqlStore) ListDirectoryEntries(fullpath filer2.FullPath, st return entries, nil } + +func (store *AbstractSqlStore) Shutdown() { + store.DB.Close() +} diff --git a/weed/filer2/abstract_sql/hashing.go b/weed/filer2/abstract_sql/hashing.go deleted file mode 100644 index 5c982c537..000000000 --- a/weed/filer2/abstract_sql/hashing.go +++ /dev/null @@ -1,32 +0,0 @@ -package abstract_sql - -import ( - "crypto/md5" - "io" -) - -// returns a 64 bit big int -func hashToLong(dir string) (v int64) { - h := md5.New() - io.WriteString(h, dir) - - b := h.Sum(nil) - - v += int64(b[0]) - v <<= 8 - v += int64(b[1]) - v <<= 8 - v += int64(b[2]) - v <<= 8 - v += int64(b[3]) - v <<= 8 - v += int64(b[4]) - v <<= 8 - v += int64(b[5]) - v <<= 8 - v += int64(b[6]) - v <<= 8 - v += int64(b[7]) - - return -} diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go index 2c1f03182..5dd7d8036 100644 --- a/weed/filer2/cassandra/cassandra_store.go +++ b/weed/filer2/cassandra/cassandra_store.go @@ -1,11 +1,15 @@ package cassandra import ( + "context" "fmt" + + "github.com/gocql/gocql" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gocql/gocql" ) func init() { @@ -21,10 +25,10 @@ func (store *CassandraStore) GetName() string { return "cassandra" } -func (store *CassandraStore) Initialize(configuration util.Configuration) (err error) { +func (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("keyspace"), - configuration.GetStringSlice("hosts"), + configuration.GetString(prefix+"keyspace"), + configuration.GetStringSlice(prefix+"hosts"), ) } @@ -39,7 +43,17 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string) (err er return } -func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *CassandraStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *CassandraStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { dir, name := entry.FullPath.DirAndName() meta, err := entry.EncodeAttributesAndChunks() @@ -56,12 +70,12 @@ func (store *CassandraStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *CassandraStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return store.InsertEntry(entry) + return store.InsertEntry(ctx, entry) } -func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) { dir, name := fullpath.DirAndName() var data []byte @@ -69,12 +83,12 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2. "SELECT meta FROM filemeta WHERE directory=? AND name=?", dir, name).Consistency(gocql.One).Scan(&data); err != nil { if err != gocql.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } } if len(data) == 0 { - return nil, fmt.Errorf("not found: %s", fullpath) + return nil, filer_pb.ErrNotFound } entry = &filer2.Entry{ @@ -88,7 +102,7 @@ func (store *CassandraStore) FindEntry(fullpath filer2.FullPath) (entry *filer2. return entry, nil } -func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error { +func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { dir, name := fullpath.DirAndName() @@ -101,7 +115,18 @@ func (store *CassandraStore) DeleteEntry(fullpath filer2.FullPath) error { return nil } -func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { + + if err := store.session.Query( + "DELETE FROM filemeta WHERE directory=?", + fullpath).Exec(); err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" @@ -114,7 +139,7 @@ func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, star iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter() for iter.Scan(&name, &data) { entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), name), + FullPath: util.NewFullPath(string(fullpath), name), } if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil { err = decodeErr @@ -129,3 +154,7 @@ func (store *CassandraStore) ListDirectoryEntries(fullpath filer2.FullPath, star return entries, err } + +func (store *CassandraStore) Shutdown() { + store.session.Close() +} diff --git a/weed/filer2/configuration.go b/weed/filer2/configuration.go index 7b05b53dc..a174117ea 100644 --- a/weed/filer2/configuration.go +++ b/weed/filer2/configuration.go @@ -17,8 +17,7 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) { for _, store := range Stores { if config.GetBool(store.GetName() + ".enabled") { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { + if err := store.Initialize(config, store.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize store for %s: %+v", store.GetName(), err) } diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go index f17a11727..00b9b132d 100644 --- a/weed/filer2/entry.go +++ b/weed/filer2/entry.go @@ -5,6 +5,7 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) type Attr struct { @@ -20,6 +21,7 @@ type Attr struct { UserName string GroupNames []string SymlinkTarget string + Md5 []byte } func (attr Attr) IsDirectory() bool { @@ -27,9 +29,10 @@ func (attr Attr) IsDirectory() bool { } type Entry struct { - FullPath + util.FullPath Attr + Extended map[string][]byte // the following is for files Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"` @@ -52,9 +55,29 @@ func (entry *Entry) ToProtoEntry() *filer_pb.Entry { return nil } return &filer_pb.Entry{ - Name: string(entry.FullPath), + Name: entry.FullPath.Name(), IsDirectory: entry.IsDirectory(), Attributes: EntryAttributeToPb(entry), Chunks: entry.Chunks, + Extended: entry.Extended, + } +} + +func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry { + if entry == nil { + return nil + } + dir, _ := entry.FullPath.DirAndName() + return &filer_pb.FullEntry{ + Dir: dir, + Entry: entry.ToProtoEntry(), + } +} + +func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry { + return &Entry{ + FullPath: util.NewFullPath(dir, entry.Name), + Attr: PbToEntryAttribute(entry.Attributes), + Chunks: entry.Chunks, } } diff --git a/weed/filer2/entry_codec.go b/weed/filer2/entry_codec.go index e50b3fa9a..47c911011 100644 --- a/weed/filer2/entry_codec.go +++ b/weed/filer2/entry_codec.go @@ -1,18 +1,21 @@ package filer2 import ( + "bytes" + "fmt" "os" "time" - "fmt" + "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gogo/protobuf/proto" ) func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) { message := &filer_pb.Entry{ Attributes: EntryAttributeToPb(entry), Chunks: entry.Chunks, + Extended: entry.Extended, } return proto.Marshal(message) } @@ -27,6 +30,8 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error { entry.Attr = PbToEntryAttribute(message.Attributes) + entry.Extended = message.Extended + entry.Chunks = message.Chunks return nil @@ -47,6 +52,7 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes { UserName: entry.Attr.UserName, GroupName: entry.Attr.GroupNames, SymlinkTarget: entry.Attr.SymlinkTarget, + Md5: entry.Attr.Md5, } } @@ -66,6 +72,7 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr { t.UserName = attr.UserName t.GroupNames = attr.GroupName t.SymlinkTarget = attr.SymlinkTarget + t.Md5 = attr.Md5 return t } @@ -84,6 +91,14 @@ func EqualEntry(a, b *Entry) bool { return false } + if !eq(a.Extended, b.Extended) { + return false + } + + if !bytes.Equal(a.Md5, b.Md5) { + return false + } + for i := 0; i < len(a.Chunks); i++ { if !proto.Equal(a.Chunks[i], b.Chunks[i]) { return false @@ -91,3 +106,17 @@ func EqualEntry(a, b *Entry) bool { } return true } + +func eq(a, b map[string][]byte) bool { + if len(a) != len(b) { + return false + } + + for k, v := range a { + if w, ok := b[k]; !ok || !bytes.Equal(v, w) { + return false + } + } + + return true +} diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer2/etcd/etcd_store.go new file mode 100644 index 000000000..2ef65b4a0 --- /dev/null +++ b/weed/filer2/etcd/etcd_store.go @@ -0,0 +1,202 @@ +package etcd + +import ( + "context" + "fmt" + "strings" + "time" + + "go.etcd.io/etcd/clientv3" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + DIR_FILE_SEPARATOR = byte(0x00) +) + +func init() { + filer2.Stores = append(filer2.Stores, &EtcdStore{}) +} + +type EtcdStore struct { + client *clientv3.Client +} + +func (store *EtcdStore) GetName() string { + return "etcd" +} + +func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + servers := configuration.GetString(prefix + "servers") + if servers == "" { + servers = "localhost:2379" + } + + timeout := configuration.GetString(prefix + "timeout") + if timeout == "" { + timeout = "3s" + } + + return store.initialize(servers, timeout) +} + +func (store *EtcdStore) initialize(servers string, timeout string) (err error) { + glog.Infof("filer store etcd: %s", servers) + + to, err := time.ParseDuration(timeout) + if err != nil { + return fmt.Errorf("parse timeout %s: %s", timeout, err) + } + + store.client, err = clientv3.New(clientv3.Config{ + Endpoints: strings.Split(servers, ","), + DialTimeout: to, + }) + if err != nil { + return fmt.Errorf("connect to etcd %s: %s", servers, err) + } + + return +} + +func (store *EtcdStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *EtcdStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *EtcdStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { + key := genKey(entry.DirAndName()) + + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + if _, err := store.client.Put(ctx, string(key), string(value)); err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) + } + + return nil +} + +func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { + return store.InsertEntry(ctx, entry) +} + +func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) { + key := genKey(fullpath.DirAndName()) + + resp, err := store.client.Get(ctx, string(key)) + if err != nil { + return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) + } + + if len(resp.Kvs) == 0 { + return nil, filer_pb.ErrNotFound + } + + entry = &filer2.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(resp.Kvs[0].Value) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { + key := genKey(fullpath.DirAndName()) + + if _, err := store.client.Delete(ctx, string(key)); err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + + if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil { + return fmt.Errorf("deleteFolderChildren %s : %v", fullpath, err) + } + + return nil +} + +func (store *EtcdStore) ListDirectoryEntries( + ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int, +) (entries []*filer2.Entry, err error) { + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + + resp, err := store.client.Get(ctx, string(directoryPrefix), + clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend)) + if err != nil { + return nil, fmt.Errorf("list %s : %v", fullpath, err) + } + + for _, kv := range resp.Kvs { + fileName := getNameFromKey(kv.Key) + if fileName == "" { + continue + } + if fileName == startFileName && !inclusive { + continue + } + limit-- + if limit < 0 { + break + } + entry := &filer2.Entry{ + FullPath: weed_util.NewFullPath(string(fullpath), fileName), + } + if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + entries = append(entries, entry) + } + + return entries, err +} + +func genKey(dirPath, fileName string) (key []byte) { + key = []byte(dirPath) + key = append(key, DIR_FILE_SEPARATOR) + key = append(key, []byte(fileName)...) + return key +} + +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { + keyPrefix = []byte(string(fullpath)) + keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR) + if len(startFileName) > 0 { + keyPrefix = append(keyPrefix, []byte(startFileName)...) + } + return keyPrefix +} + +func getNameFromKey(key []byte) string { + sepIndex := len(key) - 1 + for sepIndex >= 0 && key[sepIndex] != DIR_FILE_SEPARATOR { + sepIndex-- + } + + return string(key[sepIndex+1:]) +} + +func (store *EtcdStore) Shutdown() { + store.client.Close() +} diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go index 6c3157e6c..6832d0f31 100644 --- a/weed/filer2/filechunks.go +++ b/weed/filer2/filechunks.go @@ -3,6 +3,7 @@ package filer2 import ( "fmt" "hash/fnv" + "math" "sort" "sync" @@ -19,7 +20,21 @@ func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) { return } -func ETag(chunks []*filer_pb.FileChunk) (etag string) { +func ETag(entry *filer_pb.Entry) (etag string) { + if entry.Attributes == nil || entry.Attributes.Md5 == nil { + return ETagChunks(entry.Chunks) + } + return fmt.Sprintf("%x", entry.Attributes.Md5) +} + +func ETagEntry(entry *Entry) (etag string) { + if entry.Attr.Md5 == nil { + return ETagChunks(entry.Chunks) + } + return fmt.Sprintf("%x", entry.Attr.Md5) +} + +func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) { if len(chunks) == 1 { return chunks[0].ETag } @@ -40,7 +55,7 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file fileIds[interval.fileId] = true } for _, chunk := range chunks { - if found := fileIds[chunk.FileId]; found { + if _, found := fileIds[chunk.GetFileIdString()]; found { compacted = append(compacted, chunk) } else { garbage = append(garbage, chunk) @@ -50,15 +65,15 @@ func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*file return } -func FindUnusedFileChunks(oldChunks, newChunks []*filer_pb.FileChunk) (unused []*filer_pb.FileChunk) { +func MinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) { fileIds := make(map[string]bool) - for _, interval := range newChunks { - fileIds[interval.FileId] = true + for _, interval := range bs { + fileIds[interval.GetFileIdString()] = true } - for _, chunk := range oldChunks { - if found := fileIds[chunk.FileId]; !found { - unused = append(unused, chunk) + for _, chunk := range as { + if _, found := fileIds[chunk.GetFileIdString()]; !found { + delta = append(delta, chunk) } } @@ -70,10 +85,16 @@ type ChunkView struct { Offset int64 Size uint64 LogicOffset int64 - IsFullChunk bool + ChunkSize uint64 + CipherKey []byte + IsGzipped bool } -func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) { +func (cv *ChunkView) IsFullChunk() bool { + return cv.Size == cv.ChunkSize +} + +func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) { visibles := NonOverlappingVisibleIntervals(chunks) @@ -81,19 +102,27 @@ func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views } -func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int) (views []*ChunkView) { +func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) { - stop := offset + int64(size) + stop := offset + size + if size == math.MaxInt64 { + stop = math.MaxInt64 + } + if stop < offset { + stop = math.MaxInt64 + } for _, chunk := range visibles { + if chunk.start <= offset && offset < chunk.stop && offset < stop { - isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop views = append(views, &ChunkView{ FileId: chunk.fileId, Offset: offset - chunk.start, // offset is the data starting location in this file id Size: uint64(min(chunk.stop, stop) - offset), LogicOffset: offset, - IsFullChunk: isFullChunk, + ChunkSize: chunk.chunkSize, + CipherKey: chunk.cipherKey, + IsGzipped: chunk.isGzipped, }) offset = min(chunk.stop, stop) } @@ -120,13 +149,7 @@ var bufPool = sync.Pool{ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval { - newV := newVisibleInterval( - chunk.Offset, - chunk.Offset+int64(chunk.Size), - chunk.FileId, - chunk.Mtime, - true, - ) + newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, chunk.Size, chunk.CipherKey, chunk.IsCompressed) length := len(visibles) if length == 0 { @@ -140,23 +163,11 @@ func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb. logPrintf(" before", visibles) for _, v := range visibles { if v.start < chunk.Offset && chunk.Offset < v.stop { - newVisibles = append(newVisibles, newVisibleInterval( - v.start, - chunk.Offset, - v.fileId, - v.modifiedTime, - false, - )) + newVisibles = append(newVisibles, newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped)) } chunkStop := chunk.Offset + int64(chunk.Size) if v.start < chunkStop && chunkStop < v.stop { - newVisibles = append(newVisibles, newVisibleInterval( - chunkStop, - v.stop, - v.fileId, - v.modifiedTime, - false, - )) + newVisibles = append(newVisibles, newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, chunk.Size, v.cipherKey, v.isGzipped)) } if chunkStop <= v.start || v.stop <= chunk.Offset { newVisibles = append(newVisibles, v) @@ -187,6 +198,7 @@ func NonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []Vi var newVisibles []VisibleInterval for _, chunk := range chunks { + newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk) t := visibles[:0] visibles = newVisibles @@ -207,16 +219,20 @@ type VisibleInterval struct { stop int64 modifiedTime int64 fileId string - isFullChunk bool + chunkSize uint64 + cipherKey []byte + isGzipped bool } -func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool) VisibleInterval { +func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval { return VisibleInterval{ start: start, stop: stop, fileId: fileId, modifiedTime: modifiedTime, - isFullChunk: isFullChunk, + chunkSize: chunkSize, + cipherKey: cipherKey, + isGzipped: isGzipped, } } diff --git a/weed/filer2/filechunks_test.go b/weed/filer2/filechunks_test.go index e75e60753..7b1133b85 100644 --- a/weed/filer2/filechunks_test.go +++ b/weed/filer2/filechunks_test.go @@ -218,7 +218,7 @@ func TestChunksReading(t *testing.T) { testcases := []struct { Chunks []*filer_pb.FileChunk Offset int64 - Size int + Size int64 Expected []*ChunkView }{ // case 0: normal @@ -331,6 +331,42 @@ func TestChunksReading(t *testing.T) { {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, }, }, + // case 8: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353}, + }, + Offset: 0, + Size: 300, + Expected: []*ChunkView{ + {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90}, + {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190}, + }, + }, + // case 9: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1}, + {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2}, + {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3}, + {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4}, + {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5}, + {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6}, + }, + Offset: 0, + Size: 153578836, + Expected: []*ChunkView{ + {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0}, + {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936}, + {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760}, + {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736}, + {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168}, + {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248}, + }, + }, } for i, testcase := range testcases { diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go index 1ee2f5ede..06c997f36 100644 --- a/weed/filer2/filer.go +++ b/weed/filer2/filer.go @@ -3,35 +3,53 @@ package filer2 import ( "context" "fmt" - "math" "os" - "path/filepath" "strings" "time" + "google.golang.org/grpc" + + "github.com/karlseguin/ccache" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/karlseguin/ccache" ) +const PaginationSize = 1024 * 256 + var ( OS_UID = uint32(os.Getuid()) OS_GID = uint32(os.Getgid()) ) type Filer struct { - store FilerStore - directoryCache *ccache.Cache - MasterClient *wdclient.MasterClient - fileIdDeletionChan chan string + store *FilerStoreWrapper + directoryCache *ccache.Cache + MasterClient *wdclient.MasterClient + fileIdDeletionQueue *util.UnboundedQueue + GrpcDialOption grpc.DialOption + DirBucketsPath string + FsyncBuckets []string + buckets *FilerBuckets + Cipher bool + LocalMetaLogBuffer *log_buffer.LogBuffer + metaLogCollection string + metaLogReplication string } -func NewFiler(masters []string) *Filer { +func NewFiler(masters []string, grpcDialOption grpc.DialOption, filerHost string, filerGrpcPort uint32, collection string, replication string, notifyFn func()) *Filer { f := &Filer{ - directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), - MasterClient: wdclient.NewMasterClient(context.Background(), "filer", masters), - fileIdDeletionChan: make(chan string, 4096), + directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), + MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, masters), + fileIdDeletionQueue: util.NewUnboundedQueue(), + GrpcDialOption: grpcDialOption, } + f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn) + f.metaLogCollection = collection + f.metaLogReplication = replication go f.loopProcessingDeletion() @@ -39,7 +57,11 @@ func NewFiler(masters []string) *Filer { } func (f *Filer) SetStore(store FilerStore) { - f.store = store + f.store = NewFilerStoreWrapper(store) +} + +func (f *Filer) GetStore() (store FilerStore) { + return f.store } func (f *Filer) DisableDirectoryCache() { @@ -54,7 +76,19 @@ func (fs *Filer) KeepConnectedToMaster() { fs.MasterClient.KeepConnectedToMaster() } -func (f *Filer) CreateEntry(entry *Entry) error { +func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) { + return f.store.BeginTransaction(ctx) +} + +func (f *Filer) CommitTransaction(ctx context.Context) error { + return f.store.CommitTransaction(ctx) +} + +func (f *Filer) RollbackTransaction(ctx context.Context) error { + return f.store.RollbackTransaction(ctx) +} + +func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool) error { if string(entry.FullPath) == "/" { return nil @@ -67,7 +101,7 @@ func (f *Filer) CreateEntry(entry *Entry) error { var lastDirectoryEntry *Entry for i := 1; i < len(dirParts); i++ { - dirPath := "/" + filepath.Join(dirParts[:i]...) + dirPath := "/" + util.Join(dirParts[:i]...) // fmt.Printf("%d directory: %+v\n", i, dirPath) // first check local cache @@ -76,9 +110,9 @@ func (f *Filer) CreateEntry(entry *Entry) error { // not found, check the store directly if dirEntry == nil { glog.V(4).Infof("find uncached directory: %s", dirPath) - dirEntry, _ = f.FindEntry(FullPath(dirPath)) + dirEntry, _ = f.FindEntry(ctx, util.FullPath(dirPath)) } else { - glog.V(4).Infof("found cached directory: %s", dirPath) + // glog.V(4).Infof("found cached directory: %s", dirPath) } // no such existing directory @@ -88,27 +122,34 @@ func (f *Filer) CreateEntry(entry *Entry) error { now := time.Now() dirEntry = &Entry{ - FullPath: FullPath(dirPath), + FullPath: util.FullPath(dirPath), Attr: Attr{ - Mtime: now, - Crtime: now, - Mode: os.ModeDir | 0770, - Uid: entry.Uid, - Gid: entry.Gid, + Mtime: now, + Crtime: now, + Mode: os.ModeDir | entry.Mode | 0110, + Uid: entry.Uid, + Gid: entry.Gid, + Collection: entry.Collection, + Replication: entry.Replication, + UserName: entry.UserName, + GroupNames: entry.GroupNames, }, } glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) - mkdirErr := f.store.InsertEntry(dirEntry) + mkdirErr := f.store.InsertEntry(ctx, dirEntry) if mkdirErr != nil { - if _, err := f.FindEntry(FullPath(dirPath)); err == ErrNotFound { + if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound { + glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) } } else { - f.NotifyUpdateEvent(nil, dirEntry, false) + f.maybeAddBucket(dirEntry) + f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster) } } else if !dirEntry.IsDirectory() { + glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) return fmt.Errorf("%s is a file", dirPath) } @@ -123,6 +164,7 @@ func (f *Filer) CreateEntry(entry *Entry) error { } if lastDirectoryEntry == nil { + glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath) return fmt.Errorf("parent folder not found: %v", entry.FullPath) } @@ -134,38 +176,50 @@ func (f *Filer) CreateEntry(entry *Entry) error { } */ - oldEntry, _ := f.FindEntry(entry.FullPath) + oldEntry, _ := f.FindEntry(ctx, entry.FullPath) + glog.V(4).Infof("CreateEntry %s: old entry: %v exclusive:%v", entry.FullPath, oldEntry, o_excl) if oldEntry == nil { - if err := f.store.InsertEntry(entry); err != nil { + if err := f.store.InsertEntry(ctx, entry); err != nil { + glog.Errorf("insert entry %s: %v", entry.FullPath, err) return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) } } else { - if err := f.UpdateEntry(oldEntry, entry); err != nil { + if o_excl { + glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath) + return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) + } + if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { + glog.Errorf("update entry %s: %v", entry.FullPath, err) return fmt.Errorf("update entry %s: %v", entry.FullPath, err) } } - f.NotifyUpdateEvent(oldEntry, entry, true) + f.maybeAddBucket(entry) + f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster) f.deleteChunksIfNotNew(oldEntry, entry) + glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + return nil } -func (f *Filer) UpdateEntry(oldEntry, entry *Entry) (err error) { +func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) { if oldEntry != nil { if oldEntry.IsDirectory() && !entry.IsDirectory() { + glog.Errorf("existing %s is a directory", entry.FullPath) return fmt.Errorf("existing %s is a directory", entry.FullPath) } if !oldEntry.IsDirectory() && entry.IsDirectory() { + glog.Errorf("existing %s is a file", entry.FullPath) return fmt.Errorf("existing %s is a file", entry.FullPath) } } - return f.store.UpdateEntry(entry) + return f.store.UpdateEntry(ctx, entry) } -func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) { +func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) { now := time.Now() @@ -181,74 +235,59 @@ func (f *Filer) FindEntry(p FullPath) (entry *Entry, err error) { }, }, nil } - return f.store.FindEntry(p) -} - -func (f *Filer) DeleteEntryMetaAndData(p FullPath, isRecursive bool, shouldDeleteChunks bool) (err error) { - entry, err := f.FindEntry(p) - if err != nil { - return err - } - - if entry.IsDirectory() { - limit := int(1) - if isRecursive { - limit = math.MaxInt32 - } - lastFileName := "" - includeLastFile := false - for limit > 0 { - entries, err := f.ListDirectoryEntries(p, lastFileName, includeLastFile, 1024) - if err != nil { - return fmt.Errorf("list folder %s: %v", p, err) - } - if len(entries) == 0 { - break - } else { - if isRecursive { - for _, sub := range entries { - lastFileName = sub.Name() - f.DeleteEntryMetaAndData(sub.FullPath, isRecursive, shouldDeleteChunks) - limit-- - if limit <= 0 { - break - } - } - } else { - if len(entries) > 0 { - return fmt.Errorf("folder %s is not empty", p) - } - } - f.cacheDelDirectory(string(p)) - if len(entries) < 1024 { - break - } - } + entry, err = f.store.FindEntry(ctx, p) + if entry != nil && entry.TtlSec > 0 { + if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + f.store.DeleteEntry(ctx, p.Child(entry.Name())) + return nil, filer_pb.ErrNotFound } } + return - if shouldDeleteChunks { - f.DeleteChunks(entry.Chunks) - } +} - if p == "/" { - return nil +func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { + if strings.HasSuffix(string(p), "/") && len(p) > 1 { + p = p[0 : len(p)-1] } - glog.V(3).Infof("deleting entry %v", p) - f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) + var makeupEntries []*Entry + entries, expiredCount, lastFileName, err := f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit) + for expiredCount > 0 && err == nil { + makeupEntries, expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount) + if err == nil { + entries = append(entries, makeupEntries...) + } + } - return f.store.DeleteEntry(p) + return entries, err } -func (f *Filer) ListDirectoryEntries(p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { - if strings.HasSuffix(string(p), "/") && len(p) > 1 { - p = p[0 : len(p)-1] +func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int) (entries []*Entry, expiredCount int, lastFileName string, err error) { + listedEntries, listErr := f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) + if listErr != nil { + return listedEntries, expiredCount, "", listErr } - return f.store.ListDirectoryEntries(p, startFileName, inclusive, limit) + for _, entry := range listedEntries { + lastFileName = entry.Name() + if entry.TtlSec > 0 { + if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + f.store.DeleteEntry(ctx, p.Child(entry.Name())) + expiredCount++ + continue + } + } + entries = append(entries, entry) + } + return } func (f *Filer) cacheDelDirectory(dirpath string) { + + if dirpath == "/" { + return + } + if f.directoryCache == nil { return } @@ -257,6 +296,7 @@ func (f *Filer) cacheDelDirectory(dirpath string) { } func (f *Filer) cacheGetDirectory(dirpath string) *Entry { + if f.directoryCache == nil { return nil } @@ -280,3 +320,8 @@ func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) { f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute) } + +func (f *Filer) Shutdown() { + f.LocalMetaLogBuffer.Shutdown() + f.store.Shutdown() +} diff --git a/weed/filer2/filer_buckets.go b/weed/filer2/filer_buckets.go new file mode 100644 index 000000000..7a57e7ee1 --- /dev/null +++ b/weed/filer2/filer_buckets.go @@ -0,0 +1,121 @@ +package filer2 + +import ( + "context" + "math" + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type BucketName string +type BucketOption struct { + Name BucketName + Replication string + fsync bool +} +type FilerBuckets struct { + dirBucketsPath string + buckets map[BucketName]*BucketOption + sync.RWMutex +} + +func (f *Filer) LoadBuckets() { + + f.buckets = &FilerBuckets{ + buckets: make(map[BucketName]*BucketOption), + } + + limit := math.MaxInt32 + + entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit) + + if err != nil { + glog.V(1).Infof("no buckets found: %v", err) + return + } + + shouldFsyncMap := make(map[string]bool) + for _, bucket := range f.FsyncBuckets { + shouldFsyncMap[bucket] = true + } + + glog.V(1).Infof("buckets found: %d", len(entries)) + + f.buckets.Lock() + for _, entry := range entries { + _, shouldFsnyc := shouldFsyncMap[entry.Name()] + f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{ + Name: BucketName(entry.Name()), + Replication: entry.Replication, + fsync: shouldFsnyc, + } + } + f.buckets.Unlock() + +} + +func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) { + + f.buckets.RLock() + defer f.buckets.RUnlock() + + option, found := f.buckets.buckets[BucketName(buketName)] + + if !found { + return "", false + } + return option.Replication, option.fsync + +} + +func (f *Filer) isBucket(entry *Entry) bool { + if !entry.IsDirectory() { + return false + } + parent, dirName := entry.FullPath.DirAndName() + if parent != f.DirBucketsPath { + return false + } + + f.buckets.RLock() + defer f.buckets.RUnlock() + + _, found := f.buckets.buckets[BucketName(dirName)] + + return found + +} + +func (f *Filer) maybeAddBucket(entry *Entry) { + if !entry.IsDirectory() { + return + } + parent, dirName := entry.FullPath.DirAndName() + if parent != f.DirBucketsPath { + return + } + f.addBucket(dirName, &BucketOption{ + Name: BucketName(dirName), + Replication: entry.Replication, + }) +} + +func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) { + + f.buckets.Lock() + defer f.buckets.Unlock() + + f.buckets.buckets[BucketName(buketName)] = bucketOption + +} + +func (f *Filer) deleteBucket(buketName string) { + + f.buckets.Lock() + defer f.buckets.Unlock() + + delete(f.buckets.buckets, BucketName(buketName)) + +} diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go new file mode 100644 index 000000000..f30e10d59 --- /dev/null +++ b/weed/filer2/filer_delete_entry.go @@ -0,0 +1,128 @@ +package filer2 + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool) (err error) { + if p == "/" { + return nil + } + + entry, findErr := f.FindEntry(ctx, p) + if findErr != nil { + return findErr + } + + isCollection := f.isBucket(entry) + + var chunks []*filer_pb.FileChunk + chunks = append(chunks, entry.Chunks...) + if entry.IsDirectory() { + // delete the folder children, not including the folder itself + var dirChunks []*filer_pb.FileChunk + dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection, isFromOtherCluster) + if err != nil { + glog.V(0).Infof("delete directory %s: %v", p, err) + return fmt.Errorf("delete directory %s: %v", p, err) + } + chunks = append(chunks, dirChunks...) + } + + // delete the file or folder + err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks, isFromOtherCluster) + if err != nil { + return fmt.Errorf("delete file %s: %v", p, err) + } + + if shouldDeleteChunks && !isCollection { + go f.DeleteChunks(chunks) + } + if isCollection { + collectionName := entry.Name() + f.doDeleteCollection(collectionName) + f.deleteBucket(collectionName) + } + + return nil +} + +func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool) (chunks []*filer_pb.FileChunk, err error) { + + lastFileName := "" + includeLastFile := false + for { + entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize) + if err != nil { + glog.Errorf("list folder %s: %v", entry.FullPath, err) + return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) + } + if lastFileName == "" && !isRecursive && len(entries) > 0 { + // only for first iteration in the loop + return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath) + } + + for _, sub := range entries { + lastFileName = sub.Name() + var dirChunks []*filer_pb.FileChunk + if sub.IsDirectory() { + dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, false) + f.cacheDelDirectory(string(sub.FullPath)) + f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster) + chunks = append(chunks, dirChunks...) + } else { + chunks = append(chunks, sub.Chunks...) + } + if err != nil && !ignoreRecursiveError { + return nil, err + } + } + + if len(entries) < PaginationSize { + break + } + } + + glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks) + + if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { + return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr) + } + + return chunks, nil +} + +func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool) (err error) { + + glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) + + if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil { + return fmt.Errorf("filer store delete: %v", storeDeletionErr) + } + if entry.IsDirectory() { + f.cacheDelDirectory(string(entry.FullPath)) + } + f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster) + + return nil +} + +func (f *Filer) doDeleteCollection(collectionName string) (err error) { + + return f.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ + Name: collectionName, + }) + if err != nil { + glog.Infof("delete collection %s: %v", collectionName, err) + } + return err + }) + +} diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go index 8fe8ae04f..a6b229771 100644 --- a/weed/filer2/filer_deletion.go +++ b/weed/filer2/filer_deletion.go @@ -6,16 +6,14 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" ) -func (f *Filer) loopProcessingDeletion() { - - ticker := time.NewTicker(5 * time.Second) - - lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { +func LookupByMasterClientFn(masterClient *wdclient.MasterClient) func(vids []string) (map[string]operation.LookupResult, error) { + return func(vids []string) (map[string]operation.LookupResult, error) { m := make(map[string]operation.LookupResult) for _, vid := range vids { - locs := f.MasterClient.GetVidLocations(vid) + locs, _ := masterClient.GetVidLocations(vid) var locations []operation.Location for _, loc := range locs { locations = append(locations, operation.Location{ @@ -30,35 +28,56 @@ func (f *Filer) loopProcessingDeletion() { } return m, nil } +} + +func (f *Filer) loopProcessingDeletion() { + + lookupFunc := LookupByMasterClientFn(f.MasterClient) - var fileIds []string + DeletionBatchSize := 100000 // roughly 20 bytes cost per file id. + + var deletionCount int for { - select { - case fid := <-f.fileIdDeletionChan: - fileIds = append(fileIds, fid) - if len(fileIds) >= 4096 { - glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) - fileIds = fileIds[:0] - } - case <-ticker.C: - if len(fileIds) > 0 { - glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) - fileIds = fileIds[:0] + deletionCount = 0 + f.fileIdDeletionQueue.Consume(func(fileIds []string) { + for len(fileIds) > 0 { + var toDeleteFileIds []string + if len(fileIds) > DeletionBatchSize { + toDeleteFileIds = fileIds[:DeletionBatchSize] + fileIds = fileIds[DeletionBatchSize:] + } else { + toDeleteFileIds = fileIds + fileIds = fileIds[:0] + } + deletionCount = len(toDeleteFileIds) + deleteResults, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) + if err != nil { + glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + } else { + glog.V(1).Infof("deleting fileIds len=%d", deletionCount) + } + if len(deleteResults) != deletionCount { + glog.V(0).Infof("delete %d fileIds actual %d", deletionCount, len(deleteResults)) + } } + }) + + if deletionCount == 0 { + time.Sleep(1123 * time.Millisecond) } } } func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { for _, chunk := range chunks { - f.fileIdDeletionChan <- chunk.FileId + f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString()) } } +// DeleteFileByFileId direct delete by file id. +// Only used when the fileId is not being managed by snapshots. func (f *Filer) DeleteFileByFileId(fileId string) { - f.fileIdDeletionChan <- fileId + f.fileIdDeletionQueue.EnQueue(fileId) } func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { @@ -71,16 +90,13 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { } var toDelete []*filer_pb.FileChunk + newChunkIds := make(map[string]bool) + for _, newChunk := range newEntry.Chunks { + newChunkIds[newChunk.GetFileIdString()] = true + } for _, oldChunk := range oldEntry.Chunks { - found := false - for _, newChunk := range newEntry.Chunks { - if oldChunk.FileId == newChunk.FileId { - found = true - break - } - } - if !found { + if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found { toDelete = append(toDelete, oldChunk) } } diff --git a/weed/filer2/filer_notify.go b/weed/filer2/filer_notify.go index b3c215249..340f19fb5 100644 --- a/weed/filer2/filer_notify.go +++ b/weed/filer2/filer_notify.go @@ -1,33 +1,160 @@ package filer2 import ( + "context" + "fmt" + "io" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) -func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool) { - var key string +func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool) { + var fullpath string if oldEntry != nil { - key = string(oldEntry.FullPath) + fullpath = string(oldEntry.FullPath) } else if newEntry != nil { - key = string(newEntry.FullPath) + fullpath = string(newEntry.FullPath) } else { return } + // println("fullpath:", fullpath) + + if strings.HasPrefix(fullpath, SystemLogDir) { + return + } + + newParentPath := "" + if newEntry != nil { + newParentPath, _ = newEntry.FullPath.DirAndName() + } + eventNotification := &filer_pb.EventNotification{ + OldEntry: oldEntry.ToProtoEntry(), + NewEntry: newEntry.ToProtoEntry(), + DeleteChunks: deleteChunks, + NewParentPath: newParentPath, + IsFromOtherCluster: isFromOtherCluster, + } + if notification.Queue != nil { + glog.V(3).Infof("notifying entry update %v", fullpath) + notification.Queue.SendMessage(fullpath, eventNotification) + } + + f.logMetaEvent(ctx, fullpath, eventNotification) + +} - glog.V(3).Infof("notifying entry update %v", key) +func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) { - notification.Queue.SendMessage( - key, - &filer_pb.EventNotification{ - OldEntry: oldEntry.ToProtoEntry(), - NewEntry: newEntry.ToProtoEntry(), - DeleteChunks: deleteChunks, - }, - ) + dir, _ := util.FullPath(fullpath).DirAndName() + + event := &filer_pb.SubscribeMetadataResponse{ + Directory: dir, + EventNotification: eventNotification, + TsNs: time.Now().UnixNano(), + } + data, err := proto.Marshal(event) + if err != nil { + glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + return + } + + f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data) + +} + +func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) { + + targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir, + startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), + // startTime.Second(), startTime.Nanosecond(), + ) + + if err := f.appendToFile(targetFile, buf); err != nil { + glog.V(0).Infof("log write failed %s: %v", targetFile, err) + } +} + +func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) { + + startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day()) + startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute()) + + sizeBuf := make([]byte, 4) + startTsNs := startTime.UnixNano() + + dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366) + if listDayErr != nil { + return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr) + } + for _, dayEntry := range dayEntries { + // println("checking day", dayEntry.FullPath) + hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60) + if listHourMinuteErr != nil { + return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr) + } + for _, hourMinuteEntry := range hourMinuteEntries { + // println("checking hh-mm", hourMinuteEntry.FullPath) + if dayEntry.Name() == startDate { + if strings.Compare(hourMinuteEntry.Name(), startHourMinute) < 0 { + continue + } + } + // println("processing", hourMinuteEntry.FullPath) + chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks) + if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil { + chunkedFileReader.Close() + if err == io.EOF { + break + } + return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err) + } + chunkedFileReader.Close() + } + } + + return lastTsNs, nil +} +func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) { + for { + n, err := r.Read(sizeBuf) + if err != nil { + return lastTsNs, err + } + if n != 4 { + return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n) + } + size := util.BytesToUint32(sizeBuf) + // println("entry size", size) + entryData := make([]byte, size) + n, err = r.Read(entryData) + if err != nil { + return lastTsNs, err + } + if n != int(size) { + return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size) + } + logEntry := &filer_pb.LogEntry{} + if err = proto.Unmarshal(entryData, logEntry); err != nil { + return lastTsNs, err + } + if logEntry.TsNs <= ns { + return lastTsNs, nil + } + // println("each log: ", logEntry.TsNs) + if err := eachLogEntryFn(logEntry); err != nil { + return lastTsNs, err + } else { + lastTsNs = logEntry.TsNs + } } } diff --git a/weed/filer2/filer_notify_append.go b/weed/filer2/filer_notify_append.go new file mode 100644 index 000000000..61bbc9c45 --- /dev/null +++ b/weed/filer2/filer_notify_append.go @@ -0,0 +1,73 @@ +package filer2 + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (f *Filer) appendToFile(targetFile string, data []byte) error { + + assignResult, uploadResult, err2 := f.assignAndUpload(data) + if err2 != nil { + return err2 + } + + // find out existing entry + fullpath := util.FullPath(targetFile) + entry, err := f.FindEntry(context.Background(), fullpath) + var offset int64 = 0 + if err == filer_pb.ErrNotFound { + entry = &Entry{ + FullPath: fullpath, + Attr: Attr{ + Crtime: time.Now(), + Mtime: time.Now(), + Mode: os.FileMode(0644), + Uid: OS_UID, + Gid: OS_GID, + }, + } + } else { + offset = int64(TotalSize(entry.Chunks)) + } + + // append to existing chunks + entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset)) + + // update the entry + err = f.CreateEntry(context.Background(), entry, false, false) + + return err +} + +func (f *Filer) assignAndUpload(data []byte) (*operation.AssignResult, *operation.UploadResult, error) { + // assign a volume location + assignRequest := &operation.VolumeAssignRequest{ + Count: 1, + Collection: f.metaLogCollection, + Replication: f.metaLogReplication, + WritableVolumeCount: 1, + } + assignResult, err := operation.Assign(f.GetMaster(), f.GrpcDialOption, assignRequest) + if err != nil { + return nil, nil, fmt.Errorf("AssignVolume: %v", err) + } + if assignResult.Error != "" { + return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error) + } + + // upload data + targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + uploadResult, err := operation.UploadData(targetUrl, "", f.Cipher, data, false, "", nil, assignResult.Auth) + if err != nil { + return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) + } + // println("uploaded to", targetUrl) + return assignResult, uploadResult, nil +} diff --git a/weed/filer2/filer_notify_test.go b/weed/filer2/filer_notify_test.go index b74e2ad35..29170bfdf 100644 --- a/weed/filer2/filer_notify_test.go +++ b/weed/filer2/filer_notify_test.go @@ -5,13 +5,15 @@ import ( "time" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) func TestProtoMarshalText(t *testing.T) { oldEntry := &Entry{ - FullPath: FullPath("/this/path/to"), + FullPath: util.FullPath("/this/path/to"), Attr: Attr{ Mtime: time.Now(), Mode: 0644, diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go index 9ef1d9d48..f36c74f14 100644 --- a/weed/filer2/filerstore.go +++ b/weed/filer2/filerstore.go @@ -1,7 +1,11 @@ package filer2 import ( - "errors" + "context" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -9,13 +13,129 @@ type FilerStore interface { // GetName gets the name to locate the configuration in filer.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error - InsertEntry(*Entry) error - UpdateEntry(*Entry) (err error) + Initialize(configuration util.Configuration, prefix string) error + InsertEntry(context.Context, *Entry) error + UpdateEntry(context.Context, *Entry) (err error) // err == filer2.ErrNotFound if not found - FindEntry(FullPath) (entry *Entry, err error) - DeleteEntry(FullPath) (err error) - ListDirectoryEntries(dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) + FindEntry(context.Context, util.FullPath) (entry *Entry, err error) + DeleteEntry(context.Context, util.FullPath) (err error) + DeleteFolderChildren(context.Context, util.FullPath) (err error) + ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) + + BeginTransaction(ctx context.Context) (context.Context, error) + CommitTransaction(ctx context.Context) error + RollbackTransaction(ctx context.Context) error + + Shutdown() +} + +type FilerStoreWrapper struct { + actualStore FilerStore +} + +func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper { + if innerStore, ok := store.(*FilerStoreWrapper); ok { + return innerStore + } + return &FilerStoreWrapper{ + actualStore: store, + } +} + +func (fsw *FilerStoreWrapper) GetName() string { + return fsw.actualStore.GetName() +} + +func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration, prefix string) error { + return fsw.actualStore.Initialize(configuration, prefix) +} + +func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "insert").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "insert").Observe(time.Since(start).Seconds()) + }() + + filer_pb.BeforeEntrySerialization(entry.Chunks) + return fsw.actualStore.InsertEntry(ctx, entry) +} + +func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "update").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "update").Observe(time.Since(start).Seconds()) + }() + + filer_pb.BeforeEntrySerialization(entry.Chunks) + return fsw.actualStore.UpdateEntry(ctx, entry) +} + +func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "find").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "find").Observe(time.Since(start).Seconds()) + }() + + entry, err = fsw.actualStore.FindEntry(ctx, fp) + if err != nil { + return nil, err + } + filer_pb.AfterEntryDeserialization(entry.Chunks) + return +} + +func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "delete").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "delete").Observe(time.Since(start).Seconds()) + }() + + return fsw.actualStore.DeleteEntry(ctx, fp) +} + +func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds()) + }() + + return fsw.actualStore.DeleteFolderChildren(ctx, fp) +} + +func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) { + stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "list").Observe(time.Since(start).Seconds()) + }() + + entries, err := fsw.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit) + if err != nil { + return nil, err + } + for _, entry := range entries { + filer_pb.AfterEntryDeserialization(entry.Chunks) + } + return entries, err } -var ErrNotFound = errors.New("filer: no entry is found in filer store") +func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) { + return fsw.actualStore.BeginTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error { + return fsw.actualStore.CommitTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error { + return fsw.actualStore.RollbackTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) Shutdown() { + fsw.actualStore.Shutdown() +} diff --git a/weed/filer2/fullpath.go b/weed/filer2/fullpath.go deleted file mode 100644 index be6e34431..000000000 --- a/weed/filer2/fullpath.go +++ /dev/null @@ -1,31 +0,0 @@ -package filer2 - -import ( - "path/filepath" - "strings" -) - -type FullPath string - -func NewFullPath(dir, name string) FullPath { - if strings.HasSuffix(dir, "/") { - return FullPath(dir + name) - } - return FullPath(dir + "/" + name) -} - -func (fp FullPath) DirAndName() (string, string) { - dir, name := filepath.Split(string(fp)) - if dir == "/" { - return dir, name - } - if len(dir) < 1 { - return "/", "" - } - return dir[:len(dir)-1], name -} - -func (fp FullPath) Name() string { - _, name := filepath.Split(string(fp)) - return name -} diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer2/leveldb/leveldb_store.go index 179107e2c..31919ca49 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer2/leveldb/leveldb_store.go @@ -2,13 +2,18 @@ package leveldb import ( "bytes" + "context" "fmt" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" - "github.com/syndtr/goleveldb/leveldb" - leveldb_util "github.com/syndtr/goleveldb/leveldb/util" ) const ( @@ -27,8 +32,8 @@ func (store *LevelDBStore) GetName() string { return "leveldb" } -func (store *LevelDBStore) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir) } @@ -38,14 +43,35 @@ func (store *LevelDBStore) initialize(dir string) (err error) { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } - if store.db, err = leveldb.OpenFile(dir, nil); err != nil { - glog.Infof("filer store open dir %s: %v", dir, err) - return + opts := &opt.Options{ + BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 10, + } + + if store.db, err = leveldb.OpenFile(dir, opts); err != nil { + if errors.IsCorrupted(err) { + store.db, err = leveldb.RecoverFile(dir, opts) + } + if err != nil { + glog.Infof("filer store open dir %s: %v", dir, err) + return + } } return } -func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *LevelDBStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *LevelDBStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { key := genKey(entry.DirAndName()) value, err := entry.EncodeAttributesAndChunks() @@ -64,18 +90,18 @@ func (store *LevelDBStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *LevelDBStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return store.InsertEntry(entry) + return store.InsertEntry(ctx, entry) } -func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) { key := genKey(fullpath.DirAndName()) data, err := store.db.Get(key, nil) if err == leveldb.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) @@ -94,7 +120,7 @@ func (store *LevelDBStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.En return entry, nil } -func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) { +func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { key := genKey(fullpath.DirAndName()) err = store.db.Delete(key, nil) @@ -105,7 +131,35 @@ func (store *LevelDBStore) DeleteEntry(fullpath filer2.FullPath) (err error) { return nil } -func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, +func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { + + batch := new(leveldb.Batch) + + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + iter := store.db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + batch.Delete([]byte(genKey(string(fullpath), fileName))) + } + iter.Release() + + err = store.db.Write(batch, nil) + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { directoryPrefix := genDirectoryKeyPrefix(fullpath, "") @@ -128,7 +182,7 @@ func (store *LevelDBStore) ListDirectoryEntries(fullpath filer2.FullPath, startF break } entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), + FullPath: weed_util.NewFullPath(string(fullpath), fileName), } if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { err = decodeErr @@ -149,7 +203,7 @@ func genKey(dirPath, fileName string) (key []byte) { return key } -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { keyPrefix = []byte(string(fullpath)) keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR) if len(startFileName) > 0 { @@ -168,3 +222,7 @@ func getNameFromKey(key []byte) string { return string(key[sepIndex+1:]) } + +func (store *LevelDBStore) Shutdown() { + store.db.Close() +} diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer2/leveldb/leveldb_store_test.go index 5b214558f..77df07a9b 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer2/leveldb/leveldb_store_test.go @@ -1,14 +1,17 @@ package leveldb import ( - "github.com/chrislusf/seaweedfs/weed/filer2" + "context" "io/ioutil" "os" "testing" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil) + filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDBStore{} @@ -16,7 +19,9 @@ func TestCreateAndFind(t *testing.T) { filer.SetStore(store) filer.DisableDirectoryCache() - fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") + + ctx := context.Background() entry1 := &filer2.Entry{ FullPath: fullpath, @@ -27,12 +32,12 @@ func TestCreateAndFind(t *testing.T) { }, } - if err := filer.CreateEntry(entry1); err != nil { + if err := filer.CreateEntry(ctx, entry1, false, false); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } - entry, err := filer.FindEntry(fullpath) + entry, err := filer.FindEntry(ctx, fullpath) if err != nil { t.Errorf("find entry: %v", err) @@ -45,14 +50,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) + entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100) if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -61,7 +66,7 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil) + filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDBStore{} @@ -69,8 +74,10 @@ func TestEmptyRoot(t *testing.T) { filer.SetStore(store) filer.DisableDirectoryCache() + ctx := context.Background() + // checking one upper directory - entries, err := filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) + entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100) if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer2/leveldb2/leveldb2_store.go new file mode 100644 index 000000000..c907e8746 --- /dev/null +++ b/weed/filer2/leveldb2/leveldb2_store.go @@ -0,0 +1,248 @@ +package leveldb + +import ( + "bytes" + "context" + "crypto/md5" + "fmt" + "io" + "os" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + filer2.Stores = append(filer2.Stores, &LevelDB2Store{}) +} + +type LevelDB2Store struct { + dbs []*leveldb.DB + dbCount int +} + +func (store *LevelDB2Store) GetName() string { + return "leveldb2" +} + +func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") + return store.initialize(dir, 8) +} + +func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { + glog.Infof("filer store leveldb2 dir: %s", dir) + if err := weed_util.TestFolderWritable(dir); err != nil { + return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) + } + + opts := &opt.Options{ + BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 4, + } + + for d := 0; d < dbCount; d++ { + dbFolder := fmt.Sprintf("%s/%02d", dir, d) + os.MkdirAll(dbFolder, 0755) + db, dbErr := leveldb.OpenFile(dbFolder, opts) + if errors.IsCorrupted(dbErr) { + db, dbErr = leveldb.RecoverFile(dbFolder, opts) + } + if dbErr != nil { + glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) + return dbErr + } + store.dbs = append(store.dbs, db) + } + store.dbCount = dbCount + + return +} + +func (store *LevelDB2Store) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *LevelDB2Store) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *LevelDB2Store) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { + dir, name := entry.DirAndName() + key, partitionId := genKey(dir, name, store.dbCount) + + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + err = store.dbs[partitionId].Put(key, value, nil) + + if err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) + } + + // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) + + return nil +} + +func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer2.Entry, err error) { + dir, name := fullpath.DirAndName() + key, partitionId := genKey(dir, name, store.dbCount) + + data, err := store.dbs[partitionId].Get(key, nil) + + if err == leveldb.ErrNotFound { + return nil, filer_pb.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) + } + + entry = &filer2.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(data) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) + + return entry, nil +} + +func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { + dir, name := fullpath.DirAndName() + key, partitionId := genKey(dir, name, store.dbCount) + + err = store.dbs[partitionId].Delete(key, nil) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { + directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) + + batch := new(leveldb.Batch) + + iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + batch.Delete(append(directoryPrefix, []byte(fileName)...)) + } + iter.Release() + + err = store.dbs[partitionId].Write(batch, nil) + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, + limit int) (entries []*filer2.Entry, err error) { + + directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) + lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount) + + iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + if fileName == startFileName && !inclusive { + continue + } + limit-- + if limit < 0 { + break + } + entry := &filer2.Entry{ + FullPath: weed_util.NewFullPath(string(fullpath), fileName), + } + + // println("list", entry.FullPath, "chunks", len(entry.Chunks)) + + if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + entries = append(entries, entry) + } + iter.Release() + + return entries, err +} + +func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) { + key, partitionId = hashToBytes(dirPath, dbCount) + key = append(key, []byte(fileName)...) + return key, partitionId +} + +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) { + keyPrefix, partitionId = hashToBytes(string(fullpath), dbCount) + if len(startFileName) > 0 { + keyPrefix = append(keyPrefix, []byte(startFileName)...) + } + return keyPrefix, partitionId +} + +func getNameFromKey(key []byte) string { + + return string(key[md5.Size:]) + +} + +// hash directory, and use last byte for partitioning +func hashToBytes(dir string, dbCount int) ([]byte, int) { + h := md5.New() + io.WriteString(h, dir) + + b := h.Sum(nil) + + x := b[len(b)-1] + + return b, int(x) % dbCount +} + +func (store *LevelDB2Store) Shutdown() { + for d := 0; d < store.dbCount; d++ { + store.dbs[d].Close() + } +} diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer2/leveldb2/leveldb2_store_test.go new file mode 100644 index 000000000..b211d86e4 --- /dev/null +++ b/weed/filer2/leveldb2/leveldb2_store_test.go @@ -0,0 +1,90 @@ +package leveldb + +import ( + "context" + "io/ioutil" + "os" + "testing" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TestCreateAndFind(t *testing.T) { + filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") + defer os.RemoveAll(dir) + store := &LevelDB2Store{} + store.initialize(dir, 2) + filer.SetStore(store) + filer.DisableDirectoryCache() + + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") + + ctx := context.Background() + + entry1 := &filer2.Entry{ + FullPath: fullpath, + Attr: filer2.Attr{ + Mode: 0440, + Uid: 1234, + Gid: 5678, + }, + } + + if err := filer.CreateEntry(ctx, entry1, false, false); err != nil { + t.Errorf("create entry %v: %v", entry1.FullPath, err) + return + } + + entry, err := filer.FindEntry(ctx, fullpath) + + if err != nil { + t.Errorf("find entry: %v", err) + return + } + + if entry.FullPath != entry1.FullPath { + t.Errorf("find wrong entry: %v", entry.FullPath) + return + } + + // checking one upper directory + entries, _ := filer.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100) + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + + // checking one upper directory + entries, _ = filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100) + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} + +func TestEmptyRoot(t *testing.T) { + filer := filer2.NewFiler(nil, nil, "", 0, "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") + defer os.RemoveAll(dir) + store := &LevelDB2Store{} + store.initialize(dir, 2) + filer.SetStore(store) + filer.DisableDirectoryCache() + + ctx := context.Background() + + // checking one upper directory + entries, err := filer.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100) + if err != nil { + t.Errorf("list entries: %v", err) + return + } + if len(entries) != 0 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} diff --git a/weed/filer2/memdb/memdb_store.go b/weed/filer2/memdb/memdb_store.go deleted file mode 100644 index 062f1cd1c..000000000 --- a/weed/filer2/memdb/memdb_store.go +++ /dev/null @@ -1,113 +0,0 @@ -package memdb - -import ( - "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/google/btree" - "strings" -) - -func init() { - filer2.Stores = append(filer2.Stores, &MemDbStore{}) -} - -type MemDbStore struct { - tree *btree.BTree -} - -type entryItem struct { - *filer2.Entry -} - -func (a entryItem) Less(b btree.Item) bool { - return strings.Compare(string(a.FullPath), string(b.(entryItem).FullPath)) < 0 -} - -func (store *MemDbStore) GetName() string { - return "memory" -} - -func (store *MemDbStore) Initialize(configuration util.Configuration) (err error) { - store.tree = btree.New(8) - return nil -} - -func (store *MemDbStore) InsertEntry(entry *filer2.Entry) (err error) { - // println("inserting", entry.FullPath) - store.tree.ReplaceOrInsert(entryItem{entry}) - return nil -} - -func (store *MemDbStore) UpdateEntry(entry *filer2.Entry) (err error) { - if _, err = store.FindEntry(entry.FullPath); err != nil { - return fmt.Errorf("no such file %s : %v", entry.FullPath, err) - } - store.tree.ReplaceOrInsert(entryItem{entry}) - return nil -} - -func (store *MemDbStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - item := store.tree.Get(entryItem{&filer2.Entry{FullPath: fullpath}}) - if item == nil { - return nil, filer2.ErrNotFound - } - entry = item.(entryItem).Entry - return entry, nil -} - -func (store *MemDbStore) DeleteEntry(fullpath filer2.FullPath) (err error) { - store.tree.Delete(entryItem{&filer2.Entry{FullPath: fullpath}}) - return nil -} - -func (store *MemDbStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { - - startFrom := string(fullpath) - if startFileName != "" { - startFrom = startFrom + "/" + startFileName - } - - store.tree.AscendGreaterOrEqual(entryItem{&filer2.Entry{FullPath: filer2.FullPath(startFrom)}}, - func(item btree.Item) bool { - if limit <= 0 { - return false - } - entry := item.(entryItem).Entry - // println("checking", entry.FullPath) - - if entry.FullPath == fullpath { - // skipping the current directory - // println("skipping the folder", entry.FullPath) - return true - } - - dir, name := entry.FullPath.DirAndName() - if name == startFileName { - if inclusive { - limit-- - entries = append(entries, entry) - } - return true - } - - // only iterate the same prefix - if !strings.HasPrefix(string(entry.FullPath), string(fullpath)) { - // println("breaking from", entry.FullPath) - return false - } - - if dir != string(fullpath) { - // this could be items in deeper directories - // println("skipping deeper folder", entry.FullPath) - return true - } - // now process the directory items - // println("adding entry", entry.FullPath) - limit-- - entries = append(entries, entry) - return true - }, - ) - return entries, nil -} diff --git a/weed/filer2/memdb/memdb_store_test.go b/weed/filer2/memdb/memdb_store_test.go deleted file mode 100644 index cf813e04b..000000000 --- a/weed/filer2/memdb/memdb_store_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package memdb - -import ( - "github.com/chrislusf/seaweedfs/weed/filer2" - "testing" -) - -func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil) - store := &MemDbStore{} - store.Initialize(nil) - filer.SetStore(store) - filer.DisableDirectoryCache() - - fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") - - entry1 := &filer2.Entry{ - FullPath: fullpath, - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - - if err := filer.CreateEntry(entry1); err != nil { - t.Errorf("create entry %v: %v", entry1.FullPath, err) - return - } - - entry, err := filer.FindEntry(fullpath) - - if err != nil { - t.Errorf("find entry: %v", err) - return - } - - if entry.FullPath != entry1.FullPath { - t.Errorf("find wrong entry: %v", entry.FullPath) - return - } - -} - -func TestCreateFileAndList(t *testing.T) { - filer := filer2.NewFiler(nil) - store := &MemDbStore{} - store.Initialize(nil) - filer.SetStore(store) - filer.DisableDirectoryCache() - - entry1 := &filer2.Entry{ - FullPath: filer2.FullPath("/home/chris/this/is/one/file1.jpg"), - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - - entry2 := &filer2.Entry{ - FullPath: filer2.FullPath("/home/chris/this/is/one/file2.jpg"), - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - - filer.CreateEntry(entry1) - filer.CreateEntry(entry2) - - // checking the 2 files - entries, err := filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "", false, 100) - - if err != nil { - t.Errorf("list entries: %v", err) - return - } - - if len(entries) != 2 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - if entries[0].FullPath != entry1.FullPath { - t.Errorf("find wrong entry 1: %v", entries[0].FullPath) - return - } - - if entries[1].FullPath != entry2.FullPath { - t.Errorf("find wrong entry 2: %v", entries[1].FullPath) - return - } - - // checking the offset - entries, err = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is/one/"), "file1.jpg", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // checking one upper directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // checking root directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/"), "", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // add file3 - file3Path := filer2.FullPath("/home/chris/this/is/file3.jpg") - entry3 := &filer2.Entry{ - FullPath: file3Path, - Attr: filer2.Attr{ - Mode: 0440, - Uid: 1234, - Gid: 5678, - }, - } - filer.CreateEntry(entry3) - - // checking one upper directory - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) - if len(entries) != 2 { - t.Errorf("list entries count: %v", len(entries)) - return - } - - // delete file and count - filer.DeleteEntryMetaAndData(file3Path, false, false) - entries, _ = filer.ListDirectoryEntries(filer2.FullPath("/home/chris/this/is"), "", false, 100) - if len(entries) != 1 { - t.Errorf("list entries count: %v", len(entries)) - return - } - -} diff --git a/weed/filer2/meta_aggregator.go b/weed/filer2/meta_aggregator.go new file mode 100644 index 000000000..2f707b921 --- /dev/null +++ b/weed/filer2/meta_aggregator.go @@ -0,0 +1,91 @@ +package filer2 + +import ( + "context" + "fmt" + "io" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" +) + +type MetaAggregator struct { + filers []string + grpcDialOption grpc.DialOption + MetaLogBuffer *log_buffer.LogBuffer + // notifying clients + ListenersLock sync.Mutex + ListenersCond *sync.Cond +} + +func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAggregator { + t := &MetaAggregator{ + filers: filers, + grpcDialOption: grpcDialOption, + } + t.ListenersCond = sync.NewCond(&t.ListenersLock) + t.MetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, nil, func() { + t.ListenersCond.Broadcast() + }) + return t +} + +func (ma *MetaAggregator) StartLoopSubscribe(lastTsNs int64) { + for _, filer := range ma.filers { + go ma.subscribeToOneFiler(filer, lastTsNs) + } +} + +func (ma *MetaAggregator) subscribeToOneFiler(filer string, lastTsNs int64) { + + processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error { + data, err := proto.Marshal(event) + if err != nil { + glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + return err + } + dir := event.Directory + // println("received meta change", dir, "size", len(data)) + ma.MetaLogBuffer.AddToBuffer([]byte(dir), data) + return nil + } + + for { + err := pb.WithFilerClient(filer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + stream, err := client.SubscribeLocalMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{ + ClientName: "filer", + PathPrefix: "/", + SinceNs: lastTsNs, + }) + if err != nil { + return fmt.Errorf("subscribe: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + return fmt.Errorf("process %v: %v", resp, err) + } + lastTsNs = resp.TsNs + } + }) + if err != nil { + glog.V(0).Infof("subscribing remote %s meta change: %v", filer, err) + time.Sleep(1733 * time.Millisecond) + } + } +} diff --git a/weed/filer2/mongodb/mongodb_store.go b/weed/filer2/mongodb/mongodb_store.go new file mode 100644 index 000000000..375a457a4 --- /dev/null +++ b/weed/filer2/mongodb/mongodb_store.go @@ -0,0 +1,210 @@ +package mongodb + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/x/bsonx" + "time" +) + +func init() { + filer2.Stores = append(filer2.Stores, &MongodbStore{}) +} + +type MongodbStore struct { + connect *mongo.Client + database string + collectionName string +} + +type Model struct { + Directory string `bson:"directory"` + Name string `bson:"name"` + Meta []byte `bson:"meta"` +} + +func (store *MongodbStore) GetName() string { + return "mongodb" +} + +func (store *MongodbStore) Initialize(configuration util.Configuration, prefix string) (err error) { + store.database = configuration.GetString(prefix + "database") + store.collectionName = "filemeta" + poolSize := configuration.GetInt(prefix + "option_pool_size") + return store.connection(configuration.GetString(prefix+"uri"), uint64(poolSize)) +} + +func (store *MongodbStore) connection(uri string, poolSize uint64) (err error) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + opts := options.Client().ApplyURI(uri) + + if poolSize > 0 { + opts.SetMaxPoolSize(poolSize) + } + + client, err := mongo.Connect(ctx, opts) + if err != nil { + return err + } + + c := client.Database(store.database).Collection(store.collectionName) + err = store.indexUnique(c) + store.connect = client + return err +} + +func (store *MongodbStore) createIndex(c *mongo.Collection, index mongo.IndexModel, opts *options.CreateIndexesOptions) error { + _, err := c.Indexes().CreateOne(context.Background(), index, opts) + return err +} + +func (store *MongodbStore) indexUnique(c *mongo.Collection) error { + opts := options.CreateIndexes().SetMaxTime(10 * time.Second) + + unique := new(bool) + *unique = true + + index := mongo.IndexModel{ + Keys: bsonx.Doc{{Key: "directory", Value: bsonx.Int32(1)}, {Key: "name", Value: bsonx.Int32(1)}}, + Options: &options.IndexOptions{ + Unique: unique, + }, + } + + return store.createIndex(c, index, opts) +} + +func (store *MongodbStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} + +func (store *MongodbStore) CommitTransaction(ctx context.Context) error { + return nil +} + +func (store *MongodbStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { + + dir, name := entry.FullPath.DirAndName() + meta, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encode %s: %s", entry.FullPath, err) + } + + c := store.connect.Database(store.database).Collection(store.collectionName) + + _, err = c.InsertOne(ctx, Model{ + Directory: dir, + Name: name, + Meta: meta, + }) + + return nil +} + +func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { + return store.InsertEntry(ctx, entry) +} + +func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) { + + dir, name := fullpath.DirAndName() + var data Model + + var where = bson.M{"directory": dir, "name": name} + err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) + if err != mongo.ErrNoDocuments && err != nil { + return nil, filer_pb.ErrNotFound + } + + if len(data.Meta) == 0 { + return nil, filer_pb.ErrNotFound + } + + entry = &filer2.Entry{ + FullPath: fullpath, + } + + err = entry.DecodeAttributesAndChunks(data.Meta) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { + + dir, name := fullpath.DirAndName() + + where := bson.M{"directory": dir, "name": name} + _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { + + where := bson.M{"directory": fullpath} + _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteMany(ctx, where) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { + + var where = bson.M{"directory": string(fullpath), "name": bson.M{"$gt": startFileName}} + if inclusive { + where["name"] = bson.M{ + "$gte": startFileName, + } + } + optLimit := int64(limit) + opts := &options.FindOptions{Limit: &optLimit, Sort: bson.M{"name": 1}} + cur, err := store.connect.Database(store.database).Collection(store.collectionName).Find(ctx, where, opts) + for cur.Next(ctx) { + var data Model + err := cur.Decode(&data) + if err != nil && err != mongo.ErrNoDocuments { + return nil, err + } + + entry := &filer2.Entry{ + FullPath: util.NewFullPath(string(fullpath), data.Name), + } + if decodeErr := entry.DecodeAttributesAndChunks(data.Meta); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + + entries = append(entries, entry) + } + + if err := cur.Close(ctx); err != nil { + glog.V(0).Infof("list iterator close: %v", err) + } + + return entries, err +} + +func (store *MongodbStore) Shutdown() { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + store.connect.Disconnect(ctx) +} diff --git a/weed/filer2/mysql/mysql_store.go b/weed/filer2/mysql/mysql_store.go index e18299bd2..63d99cd9d 100644 --- a/weed/filer2/mysql/mysql_store.go +++ b/weed/filer2/mysql/mysql_store.go @@ -26,28 +26,35 @@ func (store *MysqlStore) GetName() string { return "mysql" } -func (store *MysqlStore) Initialize(configuration util.Configuration) (err error) { +func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetBool(prefix+"interpolateParams"), ) } -func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int) (err error) { +func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int, + interpolateParams bool) (err error) { store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)" store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?" store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?" store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?" + store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?" store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?" store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?" sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) + if interpolateParams { + sqlUrl += "&interpolateParams=true" + } + var dbErr error store.DB, dbErr = sql.Open("mysql", sqlUrl) if dbErr != nil { diff --git a/weed/filer2/postgres/README.txt b/weed/filer2/postgres/README.txt index ef2ef683b..cb0c99c63 100644 --- a/weed/filer2/postgres/README.txt +++ b/weed/filer2/postgres/README.txt @@ -9,8 +9,8 @@ $PGHOME/bin/psql --username=postgres --password seaweedfs CREATE TABLE IF NOT EXISTS filemeta ( dirhash BIGINT, - name VARCHAR(1000), - directory VARCHAR(4096), + name VARCHAR(65535), + directory VARCHAR(65535), meta bytea, PRIMARY KEY (dirhash, name) ); diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go index ffd3d1e01..51c069aae 100644 --- a/weed/filer2/postgres/postgres_store.go +++ b/weed/filer2/postgres/postgres_store.go @@ -11,7 +11,7 @@ import ( ) const ( - CONNECTION_URL_PATTERN = "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30" + CONNECTION_URL_PATTERN = "host=%s port=%d user=%s sslmode=%s connect_timeout=30" ) func init() { @@ -26,16 +26,16 @@ func (store *PostgresStore) GetName() string { return "postgres" } -func (store *PostgresStore) Initialize(configuration util.Configuration) (err error) { +func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetString("sslmode"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetString(prefix+"sslmode"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), ) } @@ -45,10 +45,17 @@ func (store *PostgresStore) initialize(user, password, hostname string, port int store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4" store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" + store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2" store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" - sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, password, database, sslmode) + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, sslmode) + if password != "" { + sqlUrl += " password=" + password + } + if database != "" { + sqlUrl += " dbname=" + database + } var dbErr error store.DB, dbErr = sql.Open("postgres", sqlUrl) if dbErr != nil { diff --git a/weed/filer2/reader_at.go b/weed/filer2/reader_at.go new file mode 100644 index 000000000..11a80443f --- /dev/null +++ b/weed/filer2/reader_at.go @@ -0,0 +1,162 @@ +package filer2 + +import ( + "bytes" + "context" + "fmt" + "io" + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +type ChunkReadAt struct { + masterClient *wdclient.MasterClient + chunkViews []*ChunkView + buffer []byte + bufferOffset int64 + lookupFileId func(fileId string) (targetUrl string, err error) + readerLock sync.Mutex + + chunkCache *chunk_cache.ChunkCache +} + +// var _ = io.ReaderAt(&ChunkReadAt{}) + +type LookupFileIdFunctionType func(fileId string) (targetUrl string, err error) + +func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType { + return func(fileId string) (targetUrl string, err error) { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + vid := VolumeId(fileId) + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ + VolumeIds: []string{vid}, + }) + if err != nil { + return err + } + + locations := resp.LocationsMap[vid] + if locations == nil || len(locations.Locations) == 0 { + glog.V(0).Infof("failed to locate %s", fileId) + return fmt.Errorf("failed to locate %s", fileId) + } + + volumeServerAddress := filerClient.AdjustedUrl(locations.Locations[0].Url) + + targetUrl = fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId) + + return nil + }) + return + } +} + +func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt { + + return &ChunkReadAt{ + chunkViews: chunkViews, + lookupFileId: LookupFn(filerClient), + bufferOffset: -1, + chunkCache: chunkCache, + } +} + +func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { + + c.readerLock.Lock() + defer c.readerLock.Unlock() + + for n < len(p) && err == nil { + readCount, readErr := c.doReadAt(p[n:], offset+int64(n)) + n += readCount + err = readErr + if readCount == 0 { + return n, io.EOF + } + } + return +} + +func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { + + var found bool + for _, chunk := range c.chunkViews { + if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) { + found = true + if c.bufferOffset != chunk.LogicOffset { + c.buffer, err = c.fetchChunkData(chunk) + if err != nil { + glog.Errorf("fetching chunk %+v: %v\n", chunk, err) + } + c.bufferOffset = chunk.LogicOffset + } + break + } + } + if !found { + return 0, io.EOF + } + + if err == nil { + n = copy(p, c.buffer[offset-c.bufferOffset:]) + } + + // fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d)\n", offset, offset+int64(n), c.bufferOffset, c.bufferOffset+int64(len(c.buffer))) + + return + +} + +func (c *ChunkReadAt) fetchChunkData(chunkView *ChunkView) (data []byte, err error) { + + glog.V(4).Infof("fetchChunkData %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + + hasDataInCache := false + chunkData := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize) + if chunkData != nil { + glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + hasDataInCache = true + } else { + chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped) + if err != nil { + return nil, err + } + } + + if int64(len(chunkData)) < chunkView.Offset+int64(chunkView.Size) { + glog.Errorf("unexpected larger cached:%v chunk %s [%d,%d) than %d", hasDataInCache, chunkView.FileId, chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData)) + return nil, fmt.Errorf("unexpected larger cached:%v chunk %s [%d,%d) than %d", hasDataInCache, chunkView.FileId, chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData)) + } + + data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)] + + if !hasDataInCache { + c.chunkCache.SetChunk(chunkView.FileId, chunkData) + } + + return data, nil +} + +func (c *ChunkReadAt) doFetchFullChunkData(fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) { + + urlString, err := c.lookupFileId(fileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err) + return nil, err + } + var buffer bytes.Buffer + err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, true, 0, 0, func(data []byte) { + buffer.Write(data) + }) + if err != nil { + glog.V(0).Infof("read %s failed, err: %v", fileId, err) + return nil, err + } + + return buffer.Bytes(), nil +} diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer2/redis/redis_cluster_store.go index 4f74a8a22..eaaecb740 100644 --- a/weed/filer2/redis/redis_cluster_store.go +++ b/weed/filer2/redis/redis_cluster_store.go @@ -18,15 +18,25 @@ func (store *RedisClusterStore) GetName() string { return "redis_cluster" } -func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) { + + configuration.SetDefault(prefix+"useReadOnly", true) + configuration.SetDefault(prefix+"routeByLatency", true) + return store.initialize( - configuration.GetStringSlice("addresses"), + configuration.GetStringSlice(prefix+"addresses"), + configuration.GetString(prefix+"password"), + configuration.GetBool(prefix+"useReadOnly"), + configuration.GetBool(prefix+"routeByLatency"), ) } -func (store *RedisClusterStore) initialize(addresses []string) (err error) { +func (store *RedisClusterStore) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) { store.Client = redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: addresses, + Addrs: addresses, + Password: password, + ReadOnly: readOnly, + RouteByLatency: routeByLatency, }) return } diff --git a/weed/filer2/redis/redis_store.go b/weed/filer2/redis/redis_store.go index c56fa014c..9debdb070 100644 --- a/weed/filer2/redis/redis_store.go +++ b/weed/filer2/redis/redis_store.go @@ -18,11 +18,11 @@ func (store *RedisStore) GetName() string { return "redis" } -func (store *RedisStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("address"), - configuration.GetString("password"), - configuration.GetInt("database"), + configuration.GetString(prefix+"address"), + configuration.GetString(prefix+"password"), + configuration.GetInt(prefix+"database"), ) } diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer2/redis/universal_redis_store.go index 7fd7e1180..e5b9e8840 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer2/redis/universal_redis_store.go @@ -1,13 +1,18 @@ package redis import ( + "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/go-redis/redis" "sort" "strings" "time" + + "github.com/go-redis/redis" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -18,7 +23,17 @@ type UniversalRedisStore struct { Client redis.UniversalClient } -func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) { +func (store *UniversalRedisStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *UniversalRedisStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { value, err := entry.EncodeAttributesAndChunks() if err != nil { @@ -42,16 +57,16 @@ func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) { return nil } -func (store *UniversalRedisStore) UpdateEntry(entry *filer2.Entry) (err error) { +func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return store.InsertEntry(entry) + return store.InsertEntry(ctx, entry) } -func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) { data, err := store.Client.Get(string(fullpath)).Result() if err == redis.Nil { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { @@ -69,7 +84,7 @@ func (store *UniversalRedisStore) FindEntry(fullpath filer2.FullPath) (entry *fi return entry, nil } -func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err error) { +func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { _, err = store.Client.Del(string(fullpath)).Result() @@ -88,10 +103,29 @@ func (store *UniversalRedisStore) DeleteEntry(fullpath filer2.FullPath) (err err return nil } -func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { +func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) { members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() + if err != nil { + return fmt.Errorf("delete folder %s : %v", fullpath, err) + } + + for _, fileName := range members { + path := util.NewFullPath(string(fullpath), fileName) + _, err = store.Client.Del(string(path)).Result() + if err != nil { + return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) + } + } + + return nil +} + +func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, + limit int) (entries []*filer2.Entry, err error) { + + dirListKey := genDirectoryListKey(string(fullpath)) + members, err := store.Client.SMembers(dirListKey).Result() if err != nil { return nil, fmt.Errorf("list %s : %v", fullpath, err) } @@ -125,11 +159,18 @@ func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath, // fetch entry meta for _, fileName := range members { - path := filer2.NewFullPath(string(fullpath), fileName) - entry, err := store.FindEntry(path) + path := util.NewFullPath(string(fullpath), fileName) + entry, err := store.FindEntry(ctx, path) if err != nil { glog.V(0).Infof("list %s : %v", path, err) } else { + if entry.TtlSec > 0 { + if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + store.Client.Del(string(path)).Result() + store.Client.SRem(dirListKey, fileName).Result() + continue + } + } entries = append(entries, entry) } } @@ -140,3 +181,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(fullpath filer2.FullPath, func genDirectoryListKey(dir string) (dirList string) { return dir + DIR_LIST_MARKER } + +func (store *UniversalRedisStore) Shutdown() { + store.Client.Close() +} diff --git a/weed/filer2/redis2/redis_cluster_store.go b/weed/filer2/redis2/redis_cluster_store.go new file mode 100644 index 000000000..b252eabab --- /dev/null +++ b/weed/filer2/redis2/redis_cluster_store.go @@ -0,0 +1,42 @@ +package redis2 + +import ( + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis" +) + +func init() { + filer2.Stores = append(filer2.Stores, &RedisCluster2Store{}) +} + +type RedisCluster2Store struct { + UniversalRedis2Store +} + +func (store *RedisCluster2Store) GetName() string { + return "redis_cluster2" +} + +func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) { + + configuration.SetDefault(prefix+"useReadOnly", true) + configuration.SetDefault(prefix+"routeByLatency", true) + + return store.initialize( + configuration.GetStringSlice(prefix+"addresses"), + configuration.GetString(prefix+"password"), + configuration.GetBool(prefix+"useReadOnly"), + configuration.GetBool(prefix+"routeByLatency"), + ) +} + +func (store *RedisCluster2Store) initialize(addresses []string, password string, readOnly, routeByLatency bool) (err error) { + store.Client = redis.NewClusterClient(&redis.ClusterOptions{ + Addrs: addresses, + Password: password, + ReadOnly: readOnly, + RouteByLatency: routeByLatency, + }) + return +} diff --git a/weed/filer2/redis2/redis_store.go b/weed/filer2/redis2/redis_store.go new file mode 100644 index 000000000..1e2a20043 --- /dev/null +++ b/weed/filer2/redis2/redis_store.go @@ -0,0 +1,36 @@ +package redis2 + +import ( + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis" +) + +func init() { + filer2.Stores = append(filer2.Stores, &Redis2Store{}) +} + +type Redis2Store struct { + UniversalRedis2Store +} + +func (store *Redis2Store) GetName() string { + return "redis2" +} + +func (store *Redis2Store) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"address"), + configuration.GetString(prefix+"password"), + configuration.GetInt(prefix+"database"), + ) +} + +func (store *Redis2Store) initialize(hostPort string, password string, database int) (err error) { + store.Client = redis.NewClient(&redis.Options{ + Addr: hostPort, + Password: password, + DB: database, + }) + return +} diff --git a/weed/filer2/redis2/universal_redis_store.go b/weed/filer2/redis2/universal_redis_store.go new file mode 100644 index 000000000..420336b46 --- /dev/null +++ b/weed/filer2/redis2/universal_redis_store.go @@ -0,0 +1,162 @@ +package redis2 + +import ( + "context" + "fmt" + "time" + + "github.com/go-redis/redis" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + DIR_LIST_MARKER = "\x00" +) + +type UniversalRedis2Store struct { + Client redis.UniversalClient +} + +func (store *UniversalRedis2Store) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *UniversalRedis2Store) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { + + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + if err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) + } + + dir, name := entry.FullPath.DirAndName() + if name != "" { + if err = store.Client.ZAddNX(genDirectoryListKey(dir), redis.Z{Score: 0, Member: name}).Err(); err != nil { + return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err) + } + } + + return nil +} + +func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer2.Entry, err error) { + + data, err := store.Client.Get(string(fullpath)).Result() + if err == redis.Nil { + return nil, filer_pb.ErrNotFound + } + + if err != nil { + return nil, fmt.Errorf("get %s : %v", fullpath, err) + } + + entry = &filer2.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks([]byte(data)) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { + + _, err = store.Client.Del(string(fullpath)).Result() + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + dir, name := fullpath.DirAndName() + if name != "" { + _, err = store.Client.ZRem(genDirectoryListKey(dir), name).Result() + if err != nil { + return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) + } + } + + return nil +} + +func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) { + + members, err := store.Client.ZRange(genDirectoryListKey(string(fullpath)), 0, -1).Result() + if err != nil { + return fmt.Errorf("delete folder %s : %v", fullpath, err) + } + + for _, fileName := range members { + path := util.NewFullPath(string(fullpath), fileName) + _, err = store.Client.Del(string(path)).Result() + if err != nil { + return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) + } + } + + return nil +} + +func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, fullpath util.FullPath, startFileName string, inclusive bool, + limit int) (entries []*filer2.Entry, err error) { + + dirListKey := genDirectoryListKey(string(fullpath)) + start := int64(0) + if startFileName != "" { + start, _ = store.Client.ZRank(dirListKey, startFileName).Result() + if !inclusive { + start++ + } + } + members, err := store.Client.ZRange(dirListKey, start, start+int64(limit)-1).Result() + if err != nil { + return nil, fmt.Errorf("list %s : %v", fullpath, err) + } + + // fetch entry meta + for _, fileName := range members { + path := util.NewFullPath(string(fullpath), fileName) + entry, err := store.FindEntry(ctx, path) + if err != nil { + glog.V(0).Infof("list %s : %v", path, err) + } else { + if entry.TtlSec > 0 { + if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + store.Client.Del(string(path)).Result() + store.Client.ZRem(dirListKey, fileName).Result() + continue + } + } + entries = append(entries, entry) + } + } + + return entries, err +} + +func genDirectoryListKey(dir string) (dirList string) { + return dir + DIR_LIST_MARKER +} + +func (store *UniversalRedis2Store) Shutdown() { + store.Client.Close() +} diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go new file mode 100644 index 000000000..033a8dd13 --- /dev/null +++ b/weed/filer2/stream.go @@ -0,0 +1,199 @@ +package filer2 + +import ( + "bytes" + "io" + "math" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error { + + chunkViews := ViewFromChunks(chunks, offset, size) + + fileId2Url := make(map[string]string) + + for _, chunkView := range chunkViews { + + urlString, err := masterClient.LookupFileId(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return err + } + fileId2Url[chunkView.FileId] = urlString + } + + for _, chunkView := range chunkViews { + + urlString := fileId2Url[chunkView.FileId] + err := util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) { + w.Write(data) + }) + if err != nil { + glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) + return err + } + } + + return nil + +} + +// ---------------- ReadAllReader ---------------------------------- + +func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) { + + buffer := bytes.Buffer{} + + chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32) + + lookupFileId := func(fileId string) (targetUrl string, err error) { + return masterClient.LookupFileId(fileId) + } + + for _, chunkView := range chunkViews { + urlString, err := lookupFileId(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return nil, err + } + err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) { + buffer.Write(data) + }) + if err != nil { + glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) + return nil, err + } + } + return buffer.Bytes(), nil +} + +// ---------------- ChunkStreamReader ---------------------------------- +type ChunkStreamReader struct { + chunkViews []*ChunkView + logicOffset int64 + buffer []byte + bufferOffset int64 + bufferPos int + chunkIndex int + lookupFileId LookupFileIdFunctionType +} + +var _ = io.ReadSeeker(&ChunkStreamReader{}) + +func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { + + chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32) + + return &ChunkStreamReader{ + chunkViews: chunkViews, + lookupFileId: func(fileId string) (targetUrl string, err error) { + return masterClient.LookupFileId(fileId) + }, + } +} + +func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { + + chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32) + + return &ChunkStreamReader{ + chunkViews: chunkViews, + lookupFileId: LookupFn(filerClient), + } +} + +func (c *ChunkStreamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if c.isBufferEmpty() { + if c.chunkIndex >= len(c.chunkViews) { + return n, io.EOF + } + chunkView := c.chunkViews[c.chunkIndex] + c.fetchChunkToBuffer(chunkView) + c.chunkIndex++ + } + t := copy(p[n:], c.buffer[c.bufferPos:]) + c.bufferPos += t + n += t + } + return +} + +func (c *ChunkStreamReader) isBufferEmpty() bool { + return len(c.buffer) <= c.bufferPos +} + +func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { + + var totalSize int64 + for _, chunk := range c.chunkViews { + totalSize += int64(chunk.Size) + } + + var err error + switch whence { + case io.SeekStart: + case io.SeekCurrent: + offset += c.bufferOffset + int64(c.bufferPos) + case io.SeekEnd: + offset = totalSize + offset + } + if offset > totalSize { + err = io.ErrUnexpectedEOF + } + + for i, chunk := range c.chunkViews { + if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) { + if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset { + c.fetchChunkToBuffer(chunk) + c.chunkIndex = i + 1 + break + } + } + } + c.bufferPos = int(offset - c.bufferOffset) + + return offset, err + +} + +func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { + urlString, err := c.lookupFileId(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return err + } + var buffer bytes.Buffer + err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) { + buffer.Write(data) + }) + if err != nil { + glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) + return err + } + c.buffer = buffer.Bytes() + c.bufferPos = 0 + c.bufferOffset = chunkView.LogicOffset + + // glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + + return nil +} + +func (c *ChunkStreamReader) Close() { + // TODO try to release and reuse buffer +} + +func VolumeId(fileId string) string { + lastCommaIndex := strings.LastIndex(fileId, ",") + if lastCommaIndex > 0 { + return fileId[:lastCommaIndex] + } + return fileId +} diff --git a/weed/filer2/topics.go b/weed/filer2/topics.go new file mode 100644 index 000000000..9c6e5c88d --- /dev/null +++ b/weed/filer2/topics.go @@ -0,0 +1,6 @@ +package filer2 + +const ( + TopicsDir = "/topics" + SystemLogDir = TopicsDir + "/.system/log" +) diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index fae289217..9ef74a95c 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -1,23 +1,27 @@ package filesys import ( + "bytes" "context" "os" - "path" - "path/filepath" + "strings" "time" + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/seaweedfs/fuse" - "github.com/seaweedfs/fuse/fs" + "github.com/chrislusf/seaweedfs/weed/util" ) type Dir struct { - Path string - wfs *WFS - attributes *filer_pb.FuseAttributes + name string + wfs *WFS + entry *filer_pb.Entry + parent *Dir } var _ = fs.Node(&Dir{}) @@ -28,99 +32,96 @@ var _ = fs.HandleReadDirAller(&Dir{}) var _ = fs.NodeRemover(&Dir{}) var _ = fs.NodeRenamer(&Dir{}) var _ = fs.NodeSetattrer(&Dir{}) +var _ = fs.NodeGetxattrer(&Dir{}) +var _ = fs.NodeSetxattrer(&Dir{}) +var _ = fs.NodeRemovexattrer(&Dir{}) +var _ = fs.NodeListxattrer(&Dir{}) +var _ = fs.NodeForgetter(&Dir{}) -func (dir *Dir) Attr(context context.Context, attr *fuse.Attr) error { +func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { // https://github.com/bazil/fuse/issues/196 attr.Valid = time.Second - if dir.Path == dir.wfs.option.FilerMountRootPath { - attr.Uid = dir.wfs.option.MountUid - attr.Gid = dir.wfs.option.MountGid - attr.Mode = dir.wfs.option.MountMode + if dir.FullPath() == dir.wfs.option.FilerMountRootPath { + dir.setRootDirAttributes(attr) + glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr) return nil } - item := dir.wfs.listDirectoryEntriesCache.Get(dir.Path) - if item != nil && !item.Expired() { - entry := item.Value().(*filer_pb.Entry) - - attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) - attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) - attr.Mode = os.FileMode(entry.Attributes.FileMode) - attr.Gid = entry.Attributes.Gid - attr.Uid = entry.Attributes.Uid - - return nil + if err := dir.maybeLoadEntry(); err != nil { + glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err) + return err } - parent, name := filepath.Split(dir.Path) + attr.Inode = util.FullPath(dir.FullPath()).AsInode() + attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir + attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0) + attr.Crtime = time.Unix(dir.entry.Attributes.Crtime, 0) + attr.Gid = dir.entry.Attributes.Gid + attr.Uid = dir.entry.Attributes.Uid - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: parent, - Name: name, - } + glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr) - glog.V(1).Infof("read dir %s attr: %v", dir.Path, request) - resp, err := client.LookupDirectoryEntry(context, request) - if err != nil { - if err == filer2.ErrNotFound { - return nil - } - glog.V(0).Infof("read dir %s attr %v: %v", dir.Path, request, err) - return err - } - - if resp.Entry != nil { - dir.attributes = resp.Entry.Attributes - } + return nil +} - // dir.wfs.listDirectoryEntriesCache.Set(dir.Path, resp.Entry, dir.wfs.option.EntryCacheTtl) +func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { - return nil - }) + glog.V(4).Infof("dir Getxattr %s", dir.FullPath()) - if err != nil { + if err := dir.maybeLoadEntry(); err != nil { return err } - // glog.V(1).Infof("dir %s: %v", dir.Path, attributes) - // glog.V(1).Infof("dir %s permission: %v", dir.Path, os.FileMode(attributes.FileMode)) - - attr.Mode = os.FileMode(dir.attributes.FileMode) | os.ModeDir + return getxattr(dir.entry, req, resp) +} - attr.Mtime = time.Unix(dir.attributes.Mtime, 0) - attr.Ctime = time.Unix(dir.attributes.Crtime, 0) - attr.Gid = dir.attributes.Gid - attr.Uid = dir.attributes.Uid +func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { + attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode() + attr.Valid = time.Hour + attr.Uid = dir.wfs.option.MountUid + attr.Gid = dir.wfs.option.MountGid + attr.Mode = dir.wfs.option.MountMode + attr.Crtime = dir.wfs.option.MountCtime + attr.Ctime = dir.wfs.option.MountCtime + attr.Mtime = dir.wfs.option.MountMtime + attr.Atime = dir.wfs.option.MountMtime + attr.BlockSize = 1024 * 1024 +} - return nil +func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node { + return dir.wfs.fsNodeCache.EnsureFsNode(util.NewFullPath(dir.FullPath(), name), func() fs.Node { + return &File{ + Name: name, + dir: dir, + wfs: dir.wfs, + entry: entry, + entryViewCache: nil, + } + }) } -func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File { - return &File{ - Name: name, - dir: dir, - wfs: dir.wfs, - entry: entry, - entryViewCache: nil, - } +func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.Node { + + return dir.wfs.fsNodeCache.EnsureFsNode(fullpath, func() fs.Node { + return &Dir{name: entry.Name, wfs: dir.wfs, entry: entry, parent: dir} + }) + } func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { request := &filer_pb.CreateEntryRequest{ - Directory: dir.Path, + Directory: dir.FullPath(), Entry: &filer_pb.Entry{ Name: req.Name, IsDirectory: req.Mode&os.ModeDir > 0, Attributes: &filer_pb.FuseAttributes{ Mtime: time.Now().Unix(), Crtime: time.Now().Unix(), - FileMode: uint32(req.Mode), + FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), Uid: req.Uid, Gid: req.Gid, Collection: dir.wfs.option.Collection, @@ -128,109 +129,119 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, TtlSec: dir.wfs.option.TtlSec, }, }, + OExcl: req.Flags&fuse.OpenExclusive != 0, } - glog.V(1).Infof("create: %v", request) + glog.V(1).Infof("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags) - if request.Entry.IsDirectory { - if err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) - return fuse.EIO + if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(client, request); err != nil { + if strings.Contains(err.Error(), "EEXIST") { + return fuse.EEXIST } - return nil - }); err != nil { - return nil, nil, err + return fuse.EIO } - } - file := dir.newFile(req.Name, request.Entry) - if !request.Entry.IsDirectory { - file.isOpen = true + dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry)) + + return nil + }); err != nil { + return nil, nil, err } + var node fs.Node + if request.Entry.IsDirectory { + node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), request.Entry) + return node, nil, nil + } + + node = dir.newFile(req.Name, request.Entry) + file := node.(*File) + file.isOpen++ fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) - fh.dirtyMetadata = true return file, fh, nil } func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name) + + newEntry := &filer_pb.Entry{ + Name: req.Name, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), + Uid: req.Uid, + Gid: req.Gid, + }, + } + + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ - Directory: dir.Path, - Entry: &filer_pb.Entry{ - Name: req.Name, - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(req.Mode), - Uid: req.Uid, - Gid: req.Gid, - }, - }, + Directory: dir.FullPath(), + Entry: newEntry, } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err) - return fuse.EIO + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err) + return err } + dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry)) + return nil }) if err == nil { - node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs} + node := dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), newEntry) + return node, nil } - return nil, err + glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err) + + return nil, fuse.EIO } func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { - var entry *filer_pb.Entry + glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String()) - item := dir.wfs.listDirectoryEntriesCache.Get(path.Join(dir.Path, req.Name)) - if item != nil && !item.Expired() { - entry = item.Value().(*filer_pb.Entry) + fullFilePath := util.NewFullPath(dir.FullPath(), req.Name) + dirPath := util.FullPath(dir.FullPath()) + meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, util.FullPath(dirPath)) + cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath) + if cacheErr == filer_pb.ErrNotFound { + return nil, fuse.ENOENT } + entry := cachedEntry.ToProtoEntry() if entry == nil { - err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir.Path, - Name: req.Name, - } - - glog.V(4).Infof("lookup directory entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - // glog.V(0).Infof("lookup %s/%s: %v", dir.Path, name, err) - return fuse.ENOENT - } - - entry = resp.Entry - - // dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, dir.wfs.option.EntryCacheTtl) - - return nil - }) + // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) + entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath) + if err != nil { + glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) + return nil, fuse.ENOENT + } + } else { + glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath) } if entry != nil { if entry.IsDirectory { - node = &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs, attributes: entry.Attributes} + node = dir.newDirectory(fullFilePath, entry) } else { node = dir.newFile(req.Name, entry) } - resp.EntryValid = time.Duration(0) + // resp.EntryValid = time.Second + resp.Attr.Inode = fullFilePath.AsInode() + resp.Attr.Valid = time.Second resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) - resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) + resp.Attr.Crtime = time.Unix(entry.Attributes.Crtime, 0) resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode) resp.Attr.Gid = entry.Attributes.Gid resp.Attr.Uid = entry.Attributes.Uid @@ -238,203 +249,234 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse. return node, nil } + glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT } func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { - err = dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + glog.V(3).Infof("dir ReadDirAll %s", dir.FullPath()) - paginationLimit := 1024 - remaining := dir.wfs.option.DirListingLimit + processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error { + fullpath := util.NewFullPath(dir.FullPath(), entry.Name) + inode := fullpath.AsInode() + if entry.IsDirectory { + dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_Dir} + ret = append(ret, dirent) + } else { + dirent := fuse.Dirent{Inode: inode, Name: entry.Name, Type: fuse.DT_File} + ret = append(ret, dirent) + } + return nil + } - lastEntryName := "" + dirPath := util.FullPath(dir.FullPath()) + meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath) + listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(dir.wfs.option.DirListCacheLimit)) + if listErr != nil { + glog.Errorf("list meta cache: %v", listErr) + return nil, fuse.EIO + } + for _, cachedEntry := range listedEntries { + processEachEntryFn(cachedEntry.ToProtoEntry(), false) + } + return +} - for remaining >= 0 { +func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { - request := &filer_pb.ListEntriesRequest{ - Directory: dir.Path, - StartFromFileName: lastEntryName, - Limit: uint32(paginationLimit), - } + if !req.Dir { + return dir.removeOneFile(req) + } - glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(ctx, request) - if err != nil { - glog.V(0).Infof("list %s: %v", dir.Path, err) - return fuse.EIO - } + return dir.removeFolder(req) - cacheTtl := estimatedCacheTtl(len(resp.Entries)) - - for _, entry := range resp.Entries { - if entry.IsDirectory { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir} - ret = append(ret, dirent) - } else { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_File} - ret = append(ret, dirent) - } - dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, cacheTtl) - lastEntryName = entry.Name - } +} - remaining -= len(resp.Entries) +func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error { - if len(resp.Entries) < paginationLimit { - break - } + filePath := util.NewFullPath(dir.FullPath(), req.Name) + entry, err := filer_pb.GetEntry(dir.wfs, filePath) + if err != nil { + return err + } + if entry == nil { + return nil + } - } + dir.wfs.deleteFileChunks(entry.Chunks) - return nil - }) + dir.wfs.fsNodeCache.DeleteFsNode(filePath) + + dir.wfs.metaCache.DeleteEntry(context.Background(), filePath) + + glog.V(3).Infof("remove file: %v", req) + err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, false, false, false, false) + if err != nil { + glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err) + return fuse.ENOENT + } + + return nil - return ret, err } -func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { +func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error { - if !req.Dir { - return dir.removeOneFile(ctx, req) + t := util.NewFullPath(dir.FullPath(), req.Name) + dir.wfs.fsNodeCache.DeleteFsNode(t) + + dir.wfs.metaCache.DeleteEntry(context.Background(), t) + + glog.V(3).Infof("remove directory entry: %v", req) + err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, false, false) + if err != nil { + glog.V(3).Infof("not found remove %s/%s: %v", dir.FullPath(), req.Name, err) + return fuse.ENOENT } - return dir.removeFolder(ctx, req) + return nil } -func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error { +func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - var entry *filer_pb.Entry - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + glog.V(3).Infof("%v dir setattr %+v", dir.FullPath(), req) - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir.Path, - Name: req.Name, - } + if err := dir.maybeLoadEntry(); err != nil { + return err + } - glog.V(4).Infof("lookup to-be-removed entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - // glog.V(0).Infof("lookup %s/%s: %v", dir.Path, name, err) - return fuse.ENOENT - } + if req.Valid.Mode() { + dir.entry.Attributes.FileMode = uint32(req.Mode) + } - entry = resp.Entry + if req.Valid.Uid() { + dir.entry.Attributes.Uid = req.Uid + } - return nil - }) + if req.Valid.Gid() { + dir.entry.Attributes.Gid = req.Gid + } - if err != nil { - return err + if req.Valid.Mtime() { + dir.entry.Attributes.Mtime = req.Mtime.Unix() } - dir.wfs.deleteFileChunks(entry.Chunks) + return dir.saveEntry() - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { +} - request := &filer_pb.DeleteEntryRequest{ - Directory: dir.Path, - Name: req.Name, - IsDeleteData: false, - } +func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { - glog.V(3).Infof("remove file: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(3).Infof("remove file %s/%s: %v", dir.Path, req.Name, err) - return fuse.ENOENT - } + glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name) - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) + if err := dir.maybeLoadEntry(); err != nil { + return err + } - return nil - }) + if err := setxattr(dir.entry, req); err != nil { + return err + } -} + return dir.saveEntry() -func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { +} - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { +func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { - request := &filer_pb.DeleteEntryRequest{ - Directory: dir.Path, - Name: req.Name, - IsDeleteData: true, - } + glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name) - glog.V(3).Infof("remove directory entry: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(3).Infof("remove %s/%s: %v", dir.Path, req.Name, err) - return fuse.ENOENT - } + if err := dir.maybeLoadEntry(); err != nil { + return err + } - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) + if err := removexattr(dir.entry, req); err != nil { + return err + } - return nil - }) + return dir.saveEntry() } -func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { +func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { - glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle) - if req.Valid.Mode() { - dir.attributes.FileMode = uint32(req.Mode) - } + glog.V(4).Infof("dir Listxattr %s", dir.FullPath()) - if req.Valid.Uid() { - dir.attributes.Uid = req.Uid + if err := dir.maybeLoadEntry(); err != nil { + return err } - if req.Valid.Gid() { - dir.attributes.Gid = req.Gid + if err := listxattr(dir.entry, req, resp); err != nil { + return err } - if req.Valid.Mtime() { - dir.attributes.Mtime = req.Mtime.Unix() + return nil + +} + +func (dir *Dir) Forget() { + glog.V(3).Infof("Forget dir %s", dir.FullPath()) + + dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath())) +} + +func (dir *Dir) maybeLoadEntry() error { + if dir.entry == nil { + parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName() + entry, err := dir.wfs.maybeLoadEntry(parentDirPath, name) + if err != nil { + return err + } + dir.entry = entry } + return nil +} - parentDir, name := filer2.FullPath(dir.Path).DirAndName() - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { +func (dir *Dir) saveEntry() error { + + parentDir, name := util.FullPath(dir.FullPath()).DirAndName() + + return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ Directory: parentDir, - Entry: &filer_pb.Entry{ - Name: name, - Attributes: dir.attributes, - }, + Entry: dir.entry, } - glog.V(1).Infof("set attr directory entry: %v", request) - _, err := client.UpdateEntry(ctx, request) + glog.V(1).Infof("save dir entry: %v", request) + _, err := client.UpdateEntry(context.Background(), request) if err != nil { - glog.V(0).Infof("UpdateEntry %s: %v", dir.Path, err) + glog.V(0).Infof("UpdateEntry dir %s/%s: %v", parentDir, name, err) return fuse.EIO } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) + dir.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry)) return nil }) - } -func estimatedCacheTtl(numEntries int) time.Duration { - if numEntries < 100 { - // 30 ms per entry - return 3 * time.Second - } - if numEntries < 1000 { - // 10 ms per entry - return 10 * time.Second +func (dir *Dir) FullPath() string { + var parts []string + for p := dir; p != nil; p = p.parent { + if strings.HasPrefix(p.name, "/") { + if len(p.name) > 1 { + parts = append(parts, p.name[1:]) + } + } else { + parts = append(parts, p.name) + } } - if numEntries < 10000 { - // 10 ms per entry - return 100 * time.Second + + if len(parts) == 0 { + return "/" } - // 2 ms per entry - return time.Duration(numEntries*2) * time.Millisecond + var buf bytes.Buffer + for i := len(parts) - 1; i >= 0; i-- { + buf.WriteString("/") + buf.WriteString(parts[i]) + } + return buf.String() } diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index 3b3735369..4990e743c 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -6,6 +6,7 @@ import ( "syscall" "time" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" @@ -17,17 +18,17 @@ var _ = fs.NodeReadlinker(&File{}) func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) { - glog.V(3).Infof("Symlink: %v/%v to %v", dir.Path, req.NewName, req.Target) + glog.V(3).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target) request := &filer_pb.CreateEntryRequest{ - Directory: dir.Path, + Directory: dir.FullPath(), Entry: &filer_pb.Entry{ Name: req.NewName, IsDirectory: false, Attributes: &filer_pb.FuseAttributes{ Mtime: time.Now().Unix(), Crtime: time.Now().Unix(), - FileMode: uint32(os.FileMode(0755) | os.ModeSymlink), + FileMode: uint32((os.FileMode(0777) | os.ModeSymlink) &^ dir.wfs.option.Umask), Uid: req.Uid, Gid: req.Gid, SymlinkTarget: req.Target, @@ -35,11 +36,14 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, }, } - err := dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err) return fuse.EIO } + + dir.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry)) + return nil }) @@ -51,7 +55,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { - if err := file.maybeLoadAttributes(ctx); err != nil { + if err := file.maybeLoadEntry(ctx); err != nil { return "", err } @@ -59,7 +63,7 @@ func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (stri return "", fuse.Errno(syscall.EINVAL) } - glog.V(3).Infof("Readlink: %v/%v => %v", file.dir.Path, file.Name, file.entry.Attributes.SymlinkTarget) + glog.V(3).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, file.entry.Attributes.SymlinkTarget) return file.entry.Attributes.SymlinkTarget, nil diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index d29281f35..0f7f131b1 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -2,118 +2,49 @@ package filesys import ( "context" - "github.com/seaweedfs/fuse" - "github.com/seaweedfs/fuse/fs" - "math" - "path/filepath" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" ) func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { newDir := newDirectory.(*Dir) - return dir.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + newPath := util.NewFullPath(newDir.FullPath(), req.NewName) + oldPath := util.NewFullPath(dir.FullPath(), req.OldName) - // find existing entry - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir.Path, - Name: req.OldName, + glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath) + + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AtomicRenameEntryRequest{ + OldDirectory: dir.FullPath(), + OldName: req.OldName, + NewDirectory: newDir.FullPath(), + NewName: req.NewName, } - glog.V(4).Infof("find existing directory entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) + _, err := client.AtomicRenameEntry(context.Background(), request) if err != nil { - glog.V(3).Infof("renaming find %s/%s: %v", dir.Path, req.OldName, err) - return fuse.ENOENT + glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err) + return fuse.EIO } - entry := resp.Entry + return nil - glog.V(4).Infof("found existing directory entry resp: %+v", resp) - - return moveEntry(ctx, client, dir.Path, entry, newDir.Path, req.NewName) }) -} - -func moveEntry(ctx context.Context, client filer_pb.SeaweedFilerClient, oldParent string, entry *filer_pb.Entry, newParent, newName string) error { - if entry.IsDirectory { - currentDirPath := filepath.Join(oldParent, entry.Name) + if err == nil { - lastFileName := "" - includeLastFile := false - limit := math.MaxInt32 - for limit > 0 { - request := &filer_pb.ListEntriesRequest{ - Directory: currentDirPath, - StartFromFileName: lastFileName, - InclusiveStartFrom: includeLastFile, - Limit: 1024, - } - glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(ctx, request) - if err != nil { - glog.V(0).Infof("list %s: %v", oldParent, err) - return fuse.EIO - } - if len(resp.Entries) == 0 { - break - } - - for _, item := range resp.Entries { - lastFileName = item.Name - err := moveEntry(ctx, client, currentDirPath, item, filepath.Join(newParent, newName), item.Name) - if err != nil { - return err - } - limit-- - } - if len(resp.Entries) < 1024 { - break - } - } + // fmt.Printf("rename path: %v => %v\n", oldPath, newPath) + dir.wfs.fsNodeCache.Move(oldPath, newPath) + delete(dir.wfs.handles, oldPath.AsInode()) } - // add to new directory - { - request := &filer_pb.CreateEntryRequest{ - Directory: newParent, - Entry: &filer_pb.Entry{ - Name: newName, - IsDirectory: entry.IsDirectory, - Attributes: entry.Attributes, - Chunks: entry.Chunks, - }, - } - - glog.V(1).Infof("create new entry: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("renaming create %s/%s: %v", newParent, newName, err) - return fuse.EIO - } - } - - // delete old entry - { - request := &filer_pb.DeleteEntryRequest{ - Directory: oldParent, - Name: entry.Name, - IsDeleteData: false, - } - - glog.V(1).Infof("remove old entry: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(0).Infof("renaming delete %s/%s: %v", oldParent, entry.Name, err) - return fuse.EIO - } - - } - - return nil - + return err } diff --git a/weed/filesys/dir_test.go b/weed/filesys/dir_test.go new file mode 100644 index 000000000..49c76eb5e --- /dev/null +++ b/weed/filesys/dir_test.go @@ -0,0 +1,34 @@ +package filesys + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDirPath(t *testing.T) { + + p := &Dir{name: "/some"} + p = &Dir{name: "path", parent: p} + p = &Dir{name: "to", parent: p} + p = &Dir{name: "a", parent: p} + p = &Dir{name: "file", parent: p} + + assert.Equal(t, "/some/path/to/a/file", p.FullPath()) + + p = &Dir{name: "/some"} + assert.Equal(t, "/some", p.FullPath()) + + p = &Dir{name: "/"} + assert.Equal(t, "/", p.FullPath()) + + p = &Dir{name: "/"} + p = &Dir{name: "path", parent: p} + assert.Equal(t, "/path", p.FullPath()) + + p = &Dir{name: "/"} + p = &Dir{name: "path", parent: p} + p = &Dir{name: "to", parent: p} + assert.Equal(t, "/path/to", p.FullPath()) + +} diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 696296e62..45224b3e7 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -4,168 +4,149 @@ import ( "bytes" "context" "fmt" - "sync/atomic" + "io" + "sync" "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "sync" + "github.com/chrislusf/seaweedfs/weed/security" ) type ContinuousDirtyPages struct { - hasData bool - Offset int64 - Size int64 - Data []byte - f *File - lock sync.Mutex + intervals *ContinuousIntervals + f *File + lock sync.Mutex + collection string + replication string } func newDirtyPages(file *File) *ContinuousDirtyPages { return &ContinuousDirtyPages{ - Data: nil, - f: file, + intervals: &ContinuousIntervals{}, + f: file, } } func (pages *ContinuousDirtyPages) releaseResource() { - if pages.Data != nil { - pages.f.wfs.bufPool.Put(pages.Data) - pages.Data = nil - atomic.AddInt32(&counter, -1) - glog.V(3).Infof("%s/%s releasing resource %d", pages.f.dir.Path, pages.f.Name, counter) - } } var counter = int32(0) -func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { pages.lock.Lock() defer pages.lock.Unlock() - var chunk *filer_pb.FileChunk + glog.V(3).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data))) if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) { // this is more than what buffer can hold. - return pages.flushAndSave(ctx, offset, data) + return pages.flushAndSave(offset, data) } - if pages.Data == nil { - pages.Data = pages.f.wfs.bufPool.Get().([]byte) - atomic.AddInt32(&counter, 1) - glog.V(3).Infof("%s/%s acquire resource %d", pages.f.dir.Path, pages.f.Name, counter) - } - - if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) || - pages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) { - // if the data is out of range, - // or buffer is full if adding new data, - // flush current buffer and add new data - - // println("offset", offset, "size", len(data), "existing offset", pages.Offset, "size", pages.Size) + pages.intervals.AddInterval(data, offset) - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s add save [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) - chunks = append(chunks, chunk) - } - } else { - glog.V(0).Infof("%s/%s add save [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) - return - } - pages.Offset = offset - copy(pages.Data, data) - pages.Size = int64(len(data)) - return - } + var chunk *filer_pb.FileChunk + var hasSavedData bool - if offset != pages.Offset+pages.Size { - // when this happens, debug shows the data overlapping with existing data is empty - // the data is not just append - if offset == pages.Offset && int(pages.Size) < len(data) { - // glog.V(2).Infof("pages[%d,%d) pages.Data len=%v, data len=%d, pages.Size=%d", pages.Offset, pages.Offset+pages.Size, len(pages.Data), len(data), pages.Size) - copy(pages.Data[pages.Size:], data[pages.Size:]) - } else { - if pages.Size != 0 { - glog.V(1).Infof("%s/%s add page: pages [%d, %d) write [%d, %d)", pages.f.dir.Path, pages.f.Name, pages.Offset, pages.Offset+pages.Size, offset, offset+int64(len(data))) - } - return pages.flushAndSave(ctx, offset, data) + if pages.intervals.TotalSize() > pages.f.wfs.option.ChunkSizeLimit { + chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage() + if hasSavedData { + chunks = append(chunks, chunk) } - } else { - copy(pages.Data[offset-pages.Offset:], data) } - pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset) - return } -func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { var chunk *filer_pb.FileChunk + var newChunks []*filer_pb.FileChunk // flush existing - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s flush existing [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) - chunks = append(chunks, chunk) + if newChunks, err = pages.saveExistingPagesToStorage(); err == nil { + if newChunks != nil { + chunks = append(chunks, newChunks...) } } else { - glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) return } - pages.Size = 0 - pages.Offset = 0 // flush the new page - if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil { + if chunk, err = pages.saveToStorage(bytes.NewReader(data), offset, int64(len(data))); err == nil { if chunk != nil { - glog.V(4).Infof("%s/%s flush big request [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.FullPath(), pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) chunks = append(chunks, chunk) } } else { - glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) + glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.FullPath(), pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) return } return } -func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) FlushToStorage() (chunks []*filer_pb.FileChunk, err error) { pages.lock.Lock() defer pages.lock.Unlock() - if pages.Size == 0 { - return nil, nil - } + return pages.saveExistingPagesToStorage() +} - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - pages.Size = 0 - pages.Offset = 0 - if chunk != nil { - glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) +func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() (chunks []*filer_pb.FileChunk, err error) { + + var hasSavedData bool + var chunk *filer_pb.FileChunk + + for { + + chunk, hasSavedData, err = pages.saveExistingLargestPageToStorage() + if !hasSavedData { + return chunks, err + } + + if err == nil { + chunks = append(chunks, chunk) + } else { + return } } - return + } -func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) { +func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (chunk *filer_pb.FileChunk, hasSavedData bool, err error) { - if pages.Size == 0 { - return nil, nil + maxList := pages.intervals.RemoveLargestIntervalLinkedList() + if maxList == nil { + return nil, false, nil + } + + for { + chunk, err = pages.saveToStorage(maxList.ToReader(), maxList.Offset(), maxList.Size()) + if err == nil { + hasSavedData = true + glog.V(3).Infof("%s saveToStorage [%d,%d) %s", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), chunk.FileId) + return + } else { + glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), maxList.Offset(), maxList.Offset()+maxList.Size(), err) + time.Sleep(5 * time.Second) + } } - return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset) } -func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) { +func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) (*filer_pb.FileChunk, error) { var fileId, host string + var auth security.EncodedJwt + + dir, _ := pages.f.fullpath().DirAndName() - if err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := pages.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -173,15 +154,21 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte Collection: pages.f.wfs.option.Collection, TtlSec: pages.f.wfs.option.TtlSec, DataCenter: pages.f.wfs.option.DataCenter, + ParentPath: dir, } - resp, err := client.AssignVolume(ctx, request) + resp, err := client.AssignVolume(context.Background(), request) if err != nil { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } - fileId, host = resp.FileId, resp.Url + fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) + host = pages.f.wfs.AdjustedUrl(host) + pages.collection, pages.replication = resp.Collection, resp.Replication return nil }); err != nil { @@ -189,8 +176,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte } fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "application/octet-stream", nil, "") + uploadResult, err, data := operation.Upload(fileUrl, pages.f.Name, pages.f.wfs.option.Cipher, reader, false, "", nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) return nil, fmt.Errorf("upload data: %v", err) @@ -199,14 +185,9 @@ func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err) return nil, fmt.Errorf("upload result: %v", uploadResult.Error) } + pages.f.wfs.chunkCache.SetChunk(fileId, data) - return &filer_pb.FileChunk{ - FileId: fileId, - Offset: offset, - Size: uint64(len(buf)), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, - }, nil + return uploadResult.ToPbFileChunk(fileId, offset), nil } @@ -216,3 +197,18 @@ func max(x, y int64) int64 { } return y } +func min(x, y int64) int64 { + if x < y { + return x + } + return y +} + +func (pages *ContinuousDirtyPages) ReadDirtyData(data []byte, startOffset int64) (offset int64, size int) { + + pages.lock.Lock() + defer pages.lock.Unlock() + + return pages.intervals.ReadData(data, startOffset) + +} diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go new file mode 100644 index 000000000..ec94c6df1 --- /dev/null +++ b/weed/filesys/dirty_page_interval.go @@ -0,0 +1,220 @@ +package filesys + +import ( + "bytes" + "io" + "math" +) + +type IntervalNode struct { + Data []byte + Offset int64 + Size int64 + Next *IntervalNode +} + +type IntervalLinkedList struct { + Head *IntervalNode + Tail *IntervalNode +} + +type ContinuousIntervals struct { + lists []*IntervalLinkedList +} + +func (list *IntervalLinkedList) Offset() int64 { + return list.Head.Offset +} +func (list *IntervalLinkedList) Size() int64 { + return list.Tail.Offset + list.Tail.Size - list.Head.Offset +} +func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) { + // glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size) + list.Tail.Next = node + list.Tail = node +} +func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) { + // glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size) + node.Next = list.Head + list.Head = node +} + +func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) { + t := list.Head + for { + + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart < nodeStop { + // glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop) + copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset]) + } + + if t.Next == nil { + break + } + t = t.Next + } +} + +func (c *ContinuousIntervals) TotalSize() (total int64) { + for _, list := range c.lists { + total += list.Size() + } + return +} + +func subList(list *IntervalLinkedList, start, stop int64) *IntervalLinkedList { + var nodes []*IntervalNode + for t := list.Head; t != nil; t = t.Next { + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart >= nodeStop { + // skip non overlapping IntervalNode + continue + } + nodes = append(nodes, &IntervalNode{ + Data: t.Data[nodeStart-t.Offset : nodeStop-t.Offset], + Offset: nodeStart, + Size: nodeStop - nodeStart, + Next: nil, + }) + } + for i := 1; i < len(nodes); i++ { + nodes[i-1].Next = nodes[i] + } + return &IntervalLinkedList{ + Head: nodes[0], + Tail: nodes[len(nodes)-1], + } +} + +func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { + + interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))} + + var newLists []*IntervalLinkedList + for _, list := range c.lists { + // if list is to the left of new interval, add to the new list + if list.Tail.Offset+list.Tail.Size <= interval.Offset { + newLists = append(newLists, list) + } + // if list is to the right of new interval, add to the new list + if interval.Offset+interval.Size <= list.Head.Offset { + newLists = append(newLists, list) + } + // if new interval overwrite the right part of the list + if list.Head.Offset < interval.Offset && interval.Offset < list.Tail.Offset+list.Tail.Size { + // create a new list of the left part of existing list + newLists = append(newLists, subList(list, list.Offset(), interval.Offset)) + } + // if new interval overwrite the left part of the list + if list.Head.Offset < interval.Offset+interval.Size && interval.Offset+interval.Size < list.Tail.Offset+list.Tail.Size { + // create a new list of the right part of existing list + newLists = append(newLists, subList(list, interval.Offset+interval.Size, list.Tail.Offset+list.Tail.Size)) + } + // skip anything that is fully overwritten by the new interval + } + + c.lists = newLists + // add the new interval to the lists, connecting neighbor lists + var prevList, nextList *IntervalLinkedList + + for _, list := range c.lists { + if list.Head.Offset == interval.Offset+interval.Size { + nextList = list + break + } + } + + for _, list := range c.lists { + if list.Head.Offset+list.Size() == offset { + list.addNodeToTail(interval) + prevList = list + break + } + } + + if prevList != nil && nextList != nil { + // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size) + prevList.Tail.Next = nextList.Head + prevList.Tail = nextList.Tail + c.removeList(nextList) + } else if nextList != nil { + // add to head was not done when checking + nextList.addNodeToHead(interval) + } + if prevList == nil && nextList == nil { + c.lists = append(c.lists, &IntervalLinkedList{ + Head: interval, + Tail: interval, + }) + } + + return +} + +func (c *ContinuousIntervals) RemoveLargestIntervalLinkedList() *IntervalLinkedList { + var maxSize int64 + maxIndex := -1 + for k, list := range c.lists { + if maxSize <= list.Size() { + maxSize = list.Size() + maxIndex = k + } + } + if maxSize <= 0 { + return nil + } + + t := c.lists[maxIndex] + c.lists = append(c.lists[0:maxIndex], c.lists[maxIndex+1:]...) + return t + +} + +func (c *ContinuousIntervals) removeList(target *IntervalLinkedList) { + index := -1 + for k, list := range c.lists { + if list.Offset() == target.Offset() { + index = k + } + } + if index < 0 { + return + } + + c.lists = append(c.lists[0:index], c.lists[index+1:]...) + +} + +func (c *ContinuousIntervals) ReadData(data []byte, startOffset int64) (offset int64, size int) { + var minOffset int64 = math.MaxInt64 + var maxStop int64 + for _, list := range c.lists { + start := max(startOffset, list.Offset()) + stop := min(startOffset+int64(len(data)), list.Offset()+list.Size()) + if start <= stop { + list.ReadData(data[start-startOffset:], start, stop) + minOffset = min(minOffset, start) + maxStop = max(maxStop, stop) + } + } + + if minOffset == math.MaxInt64 { + return 0, 0 + } + + offset = minOffset + size = int(maxStop - offset) + return +} + +func (l *IntervalLinkedList) ToReader() io.Reader { + var readers []io.Reader + t := l.Head + readers = append(readers, bytes.NewReader(t.Data)) + for t.Next != nil { + t = t.Next + readers = append(readers, bytes.NewReader(t.Data)) + } + return io.MultiReader(readers...) +} diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go new file mode 100644 index 000000000..ab3b37b7c --- /dev/null +++ b/weed/filesys/dirty_page_interval_test.go @@ -0,0 +1,89 @@ +package filesys + +import ( + "bytes" + "testing" +) + +func TestContinuousIntervals_AddIntervalAppend(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25 + c.AddInterval(getBytes(25, 3), 0) + // _, _, 23, 23, 23, 23 + c.AddInterval(getBytes(23, 4), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 23, 23) + +} + +func TestContinuousIntervals_AddIntervalInnerOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25, 25, 25 + c.AddInterval(getBytes(25, 5), 0) + // _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 25) + +} + +func TestContinuousIntervals_AddIntervalFullOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 1, + c.AddInterval(getBytes(1, 1), 0) + // _, 2, + c.AddInterval(getBytes(2, 1), 1) + // _, _, 3, 3, 3 + c.AddInterval(getBytes(3, 3), 2) + // _, _, _, 4, 4, 4 + c.AddInterval(getBytes(4, 3), 3) + + expectedData(t, c, 0, 1, 2, 3, 4, 4, 4) + +} + +func TestContinuousIntervals_RealCase1(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, + c.AddInterval(getBytes(25, 1), 0) + // _, _, _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 4) + // _, _, _, 24, 24, 24, 24 + c.AddInterval(getBytes(24, 4), 3) + + // _, 22, 22 + c.AddInterval(getBytes(22, 2), 1) + + expectedData(t, c, 0, 25, 22, 22, 24, 24, 24, 24) + +} + +func expectedData(t *testing.T, c *ContinuousIntervals, offset int, data ...byte) { + start, stop := int64(offset), int64(offset+len(data)) + for _, list := range c.lists { + nodeStart, nodeStop := max(start, list.Head.Offset), min(stop, list.Head.Offset+list.Size()) + if nodeStart < nodeStop { + buf := make([]byte, nodeStop-nodeStart) + list.ReadData(buf, nodeStart, nodeStop) + if bytes.Compare(buf, data[nodeStart-start:nodeStop-start]) != 0 { + t.Errorf("expected %v actual %v", data[nodeStart-start:nodeStop-start], buf) + } + } + } +} + +func getBytes(content byte, length int) []byte { + data := make([]byte, length) + for i := 0; i < length; i++ { + data[i] = content + } + return data +} diff --git a/weed/filesys/file.go b/weed/filesys/file.go index 4bb169a33..4a6bc9a8a 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -2,14 +2,15 @@ package filesys import ( "context" + "io" "os" - "path/filepath" "sort" "time" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) @@ -20,6 +21,11 @@ var _ = fs.Node(&File{}) var _ = fs.NodeOpener(&File{}) var _ = fs.NodeFsyncer(&File{}) var _ = fs.NodeSetattrer(&File{}) +var _ = fs.NodeGetxattrer(&File{}) +var _ = fs.NodeSetxattrer(&File{}) +var _ = fs.NodeRemovexattrer(&File{}) +var _ = fs.NodeListxattrer(&File{}) +var _ = fs.NodeForgetter(&File{}) type File struct { Name string @@ -27,21 +33,33 @@ type File struct { wfs *WFS entry *filer_pb.Entry entryViewCache []filer2.VisibleInterval - isOpen bool + isOpen int + reader io.ReaderAt } -func (file *File) fullpath() string { - return filepath.Join(file.dir.Path, file.Name) +func (file *File) fullpath() util.FullPath { + return util.NewFullPath(file.dir.FullPath(), file.Name) } func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { - if err := file.maybeLoadAttributes(ctx); err != nil { - return err + glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr) + + if file.isOpen <= 0 { + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } } + attr.Inode = file.fullpath().AsInode() + attr.Valid = time.Second attr.Mode = os.FileMode(file.entry.Attributes.FileMode) attr.Size = filer2.TotalSize(file.entry.Chunks) + if file.isOpen > 0 { + attr.Size = file.entry.Attributes.FileSize + glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size) + } + attr.Crtime = time.Unix(file.entry.Attributes.Crtime, 0) attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0) attr.Gid = file.entry.Attributes.Gid attr.Uid = file.entry.Attributes.Uid @@ -52,11 +70,22 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { } +func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + + glog.V(4).Infof("file Getxattr %s", file.fullpath()) + + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } + + return getxattr(file.entry, req, resp) +} + func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { - glog.V(3).Infof("%v file open %+v", file.fullpath(), req) + glog.V(4).Infof("file %v open %+v", file.fullpath(), req) - file.isOpen = true + file.isOpen++ handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid) @@ -70,22 +99,30 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - if err := file.maybeLoadAttributes(ctx); err != nil { - return err - } + glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) - if file.isOpen { - return nil + if err := file.maybeLoadEntry(ctx); err != nil { + return err } - glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) if req.Valid.Size() { glog.V(3).Infof("%v file setattr set size=%v", file.fullpath(), req.Size) - if req.Size == 0 { + if req.Size < filer2.TotalSize(file.entry.Chunks) { // fmt.Printf("truncate %v \n", fullPath) - file.entry.Chunks = nil + var chunks []*filer_pb.FileChunk + for _, chunk := range file.entry.Chunks { + int64Size := int64(chunk.Size) + if chunk.Offset+int64Size > int64(req.Size) { + int64Size = int64(req.Size) - chunk.Offset + } + if int64Size > 0 { + chunks = append(chunks, chunk) + } + } + file.entry.Chunks = chunks file.entryViewCache = nil + file.reader = nil } file.entry.Attributes.FileSize = req.Size } @@ -109,75 +146,88 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f file.entry.Attributes.Mtime = req.Mtime.Unix() } - return file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if file.isOpen > 0 { + return nil + } - request := &filer_pb.UpdateEntryRequest{ - Directory: file.dir.Path, - Entry: file.entry, - } + return file.saveEntry() - glog.V(1).Infof("set attr file entry: %v", request) - _, err := client.UpdateEntry(ctx, request) - if err != nil { - glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err) - return fuse.EIO - } +} - return nil - }) +func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { -} + glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name) -func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { - // fsync works at OS level - // write the file chunks to the filerGrpcAddress - glog.V(3).Infof("%s/%s fsync file %+v", file.dir.Path, file.Name, req) + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } + + if err := setxattr(file.entry, req); err != nil { + return err + } + + return file.saveEntry() - return nil } -func (file *File) maybeLoadAttributes(ctx context.Context) error { - if file.entry == nil || !file.isOpen { - item := file.wfs.listDirectoryEntriesCache.Get(file.fullpath()) - if item != nil && !item.Expired() { - entry := item.Value().(*filer_pb.Entry) - file.setEntry(entry) - // glog.V(1).Infof("file attr read cached %v attributes", file.Name) - } else { - err := file.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { +func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { - request := &filer_pb.LookupDirectoryEntryRequest{ - Name: file.Name, - Directory: file.dir.Path, - } + glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - glog.V(3).Infof("file attr read file %v: %v", request, err) - return fuse.ENOENT - } + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } - file.setEntry(resp.Entry) + if err := removexattr(file.entry, req); err != nil { + return err + } - glog.V(3).Infof("file attr %v %+v: %d", file.fullpath(), file.entry.Attributes, filer2.TotalSize(file.entry.Chunks)) + return file.saveEntry() - // file.wfs.listDirectoryEntriesCache.Set(file.fullpath(), file.entry, file.wfs.option.EntryCacheTtl) +} - return nil - }) +func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { - if err != nil { - return err - } - } + glog.V(4).Infof("file Listxattr %s", file.fullpath()) + + if err := file.maybeLoadEntry(ctx); err != nil { + return err + } + + if err := listxattr(file.entry, req, resp); err != nil { + return err } + return nil + +} + +func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { + // fsync works at OS level + // write the file chunks to the filerGrpcAddress + glog.V(3).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req) + + return nil +} + +func (file *File) Forget() { + t := util.NewFullPath(file.dir.FullPath(), file.Name) + glog.V(3).Infof("Forget file %s", t) + file.wfs.fsNodeCache.DeleteFsNode(t) } -func (file *File) addChunk(chunk *filer_pb.FileChunk) { - if chunk != nil { - file.addChunks([]*filer_pb.FileChunk{chunk}) +func (file *File) maybeLoadEntry(ctx context.Context) error { + if file.entry == nil || file.isOpen <= 0 { + entry, err := file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name) + if err != nil { + glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err) + return err + } + if entry != nil { + file.setEntry(entry) + } } + return nil } func (file *File) addChunks(chunks []*filer_pb.FileChunk) { @@ -194,10 +244,36 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) { newVisibles = t } + file.reader = nil + + glog.V(3).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks)) + file.entry.Chunks = append(file.entry.Chunks, chunks...) } func (file *File) setEntry(entry *filer_pb.Entry) { file.entry = entry file.entryViewCache = filer2.NonOverlappingVisibleIntervals(file.entry.Chunks) + file.reader = nil +} + +func (file *File) saveEntry() error { + return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.UpdateEntryRequest{ + Directory: file.dir.FullPath(), + Entry: file.entry, + } + + glog.V(1).Infof("save file entry: %v", request) + _, err := client.UpdateEntry(context.Background(), request) + if err != nil { + glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err) + return fuse.EIO + } + + file.wfs.metaCache.UpdateEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry)) + + return nil + }) } diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 0f6ca1164..9b9df916c 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -3,17 +3,17 @@ package filesys import ( "context" "fmt" + "io" + "math" + "net/http" + "os" + "time" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" - "net/http" - "strings" - "sync" - "time" ) type FileHandle struct { @@ -28,15 +28,20 @@ type FileHandle struct { NodeId fuse.NodeID // file or directory the request is about Uid uint32 // user ID of process making request Gid uint32 // group ID of process making request + } func newFileHandle(file *File, uid, gid uint32) *FileHandle { - return &FileHandle{ + fh := &FileHandle{ f: file, dirtyPages: newDirtyPages(file), Uid: uid, Gid: gid, } + if fh.f.entry != nil { + fh.f.entry.Attributes.FileSize = filer2.TotalSize(fh.f.entry.Chunks) + } + return fh } var _ = fs.Handle(&FileHandle{}) @@ -51,115 +56,91 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size)) - // this value should come from the filer instead of the old f - if len(fh.f.entry.Chunks) == 0 { - glog.V(1).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name) - return nil - } - buff := make([]byte, req.Size) - if fh.f.entryViewCache == nil { - fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks) - } - - chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size) - - var vids []string - for _, chunkView := range chunkViews { - vids = append(vids, volumeId(chunkView.FileId)) - } - - vid2Locations := make(map[string]*filer_pb.Locations) - - err := fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - glog.V(4).Infof("read fh lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ - VolumeIds: vids, - }) - if err != nil { - return err + totalRead, err := fh.readFromChunks(buff, req.Offset) + if err == nil { + dirtyOffset, dirtySize := fh.readFromDirtyPages(buff, req.Offset) + if totalRead+req.Offset < dirtyOffset+int64(dirtySize) { + totalRead = dirtyOffset + int64(dirtySize) - req.Offset } + } - vid2Locations = resp.LocationsMap - - return nil - }) + resp.Data = buff[:totalRead] if err != nil { - glog.V(4).Infof("%v/%v read fh lookup volume ids: %v", fh.f.dir.Path, fh.f.Name, err) - return fmt.Errorf("failed to lookup volume ids %v: %v", vids, err) + glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) + return fuse.EIO } - var totalRead int64 - var wg sync.WaitGroup - for _, chunkView := range chunkViews { - wg.Add(1) - go func(chunkView *filer2.ChunkView) { - defer wg.Done() + return err +} - glog.V(4).Infof("read fh reading chunk: %+v", chunkView) +func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (offset int64, size int) { + return fh.dirtyPages.ReadDirtyData(buff, startOffset) +} - locations := vid2Locations[volumeId(chunkView.FileId)] - if locations == nil || len(locations.Locations) == 0 { - glog.V(0).Infof("failed to locate %s", chunkView.FileId) - err = fmt.Errorf("failed to locate %s", chunkView.FileId) - return - } +func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { - var n int64 - n, err = util.ReadUrl( - fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId), - chunkView.Offset, - int(chunkView.Size), - buff[chunkView.LogicOffset-req.Offset:chunkView.LogicOffset-req.Offset+int64(chunkView.Size)], - !chunkView.IsFullChunk) + // this value should come from the filer instead of the old f + if len(fh.f.entry.Chunks) == 0 { + glog.V(1).Infof("empty fh %v", fh.f.fullpath()) + return 0, nil + } - if err != nil { + if fh.f.entryViewCache == nil { + fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks) + fh.f.reader = nil + } - glog.V(0).Infof("%v/%v read http://%s/%v %v bytes: %v", fh.f.dir.Path, fh.f.Name, locations.Locations[0].Url, chunkView.FileId, n, err) + if fh.f.reader == nil { + chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32) + fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache) + } - err = fmt.Errorf("failed to read http://%s/%s: %v", - locations.Locations[0].Url, chunkView.FileId, err) - return - } + totalRead, err := fh.f.reader.ReadAt(buff, offset) - glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView) - totalRead += n + if err == io.EOF { + err = nil + } - }(chunkView) + if err != nil { + glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) } - wg.Wait() - resp.Data = buff[:totalRead] + // glog.V(0).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err) - return err + return int64(totalRead), err } // Write to the file handle func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { // write the request to volume servers + data := make([]byte, len(req.Data)) + copy(data, req.Data) - glog.V(4).Infof("%+v/%v write fh %d: [%d,%d)", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data))) + fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize))) + // glog.V(0).Infof("%v write [%d,%d)", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data))) - chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) + chunks, err := fh.dirtyPages.AddPage(req.Offset, data) if err != nil { - glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) - return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err) + glog.Errorf("%v write fh %d: [%d,%d): %v", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(len(data)), err) + return fuse.EIO } - resp.Size = len(req.Data) + resp.Size = len(data) if req.Offset == 0 { - fh.contentType = http.DetectContentType(req.Data) + // detect mime type + fh.contentType = http.DetectContentType(data) fh.dirtyMetadata = true } - fh.f.addChunks(chunks) - if len(chunks) > 0 { + + fh.f.addChunks(chunks) + fh.dirtyMetadata = true } @@ -170,11 +151,14 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle) - fh.dirtyPages.releaseResource() - - fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + fh.f.isOpen-- - fh.f.isOpen = false + if fh.f.isOpen <= 0 { + fh.dirtyPages.releaseResource() + fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + } + fh.f.entryViewCache = nil + fh.f.reader = nil return nil } @@ -184,19 +168,22 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { // send the data to the OS glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req) - chunk, err := fh.dirtyPages.FlushToStorage(ctx) + chunks, err := fh.dirtyPages.FlushToStorage() if err != nil { - glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) - return fmt.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) + glog.Errorf("flush %s: %v", fh.f.fullpath(), err) + return fuse.EIO } - fh.f.addChunk(chunk) + if len(chunks) > 0 { + fh.f.addChunks(chunks) + fh.dirtyMetadata = true + } if !fh.dirtyMetadata { return nil } - return fh.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { if fh.f.entry.Attributes != nil { fh.f.entry.Attributes.Mime = fh.contentType @@ -204,78 +191,48 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { fh.f.entry.Attributes.Gid = req.Gid fh.f.entry.Attributes.Mtime = time.Now().Unix() fh.f.entry.Attributes.Crtime = time.Now().Unix() - fh.f.entry.Attributes.FileMode = uint32(0770) + fh.f.entry.Attributes.FileMode = uint32(os.FileMode(fh.f.entry.Attributes.FileMode) &^ fh.f.wfs.option.Umask) + fh.f.entry.Attributes.Collection = fh.dirtyPages.collection + fh.f.entry.Attributes.Replication = fh.dirtyPages.replication } request := &filer_pb.CreateEntryRequest{ - Directory: fh.f.dir.Path, + Directory: fh.f.dir.FullPath(), Entry: fh.f.entry, } - //glog.V(1).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks)) - //for i, chunk := range fh.f.entry.Chunks { - // glog.V(4).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) - //} + glog.V(3).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks)) + for i, chunk := range fh.f.entry.Chunks { + glog.V(3).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + } chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks) fh.f.entry.Chunks = chunks // fh.f.entryViewCache = nil - fh.f.wfs.deleteFileChunks(garbages) - if _, err := client.CreateEntry(ctx, request); err != nil { - return fmt.Errorf("update fh: %v", err) + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) + return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) } - return nil - }) -} - -func deleteFileIds(ctx context.Context, client filer_pb.SeaweedFilerClient, fileIds []string) error { - - var vids []string - for _, fileId := range fileIds { - vids = append(vids, volumeId(fileId)) - } - - lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { + fh.f.wfs.metaCache.InsertEntry(context.Background(), filer2.FromPbEntry(request.Directory, request.Entry)) - m := make(map[string]operation.LookupResult) - - glog.V(4).Infof("remove file lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ - VolumeIds: vids, - }) - if err != nil { - return m, err + fh.f.wfs.deleteFileChunks(garbages) + for i, chunk := range garbages { + glog.V(3).Infof("garbage %s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) } - for _, vid := range vids { - lr := operation.LookupResult{ - VolumeId: vid, - Locations: nil, - } - locations := resp.LocationsMap[vid] - for _, loc := range locations.Locations { - lr.Locations = append(lr.Locations, operation.Location{ - Url: loc.Url, - PublicUrl: loc.PublicUrl, - }) - } - m[vid] = lr - } + return nil + }) - return m, err + if err == nil { + fh.dirtyMetadata = false } - _, err := operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) - - return err -} - -func volumeId(fileId string) string { - lastCommaIndex := strings.LastIndex(fileId, ",") - if lastCommaIndex > 0 { - return fileId[:lastCommaIndex] + if err != nil { + glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err) + return fuse.EIO } - return fileId + + return nil } diff --git a/weed/filesys/fscache.go b/weed/filesys/fscache.go new file mode 100644 index 000000000..b146f0615 --- /dev/null +++ b/weed/filesys/fscache.go @@ -0,0 +1,207 @@ +package filesys + +import ( + "sync" + + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/seaweedfs/fuse/fs" +) + +type FsCache struct { + root *FsNode + sync.RWMutex +} +type FsNode struct { + parent *FsNode + node fs.Node + name string + childrenLock sync.RWMutex + children map[string]*FsNode +} + +func newFsCache(root fs.Node) *FsCache { + return &FsCache{ + root: &FsNode{ + node: root, + }, + } +} + +func (c *FsCache) GetFsNode(path util.FullPath) fs.Node { + + c.RLock() + defer c.RUnlock() + + return c.doGetFsNode(path) +} + +func (c *FsCache) doGetFsNode(path util.FullPath) fs.Node { + t := c.root + for _, p := range path.Split() { + t = t.findChild(p) + if t == nil { + return nil + } + } + return t.node +} + +func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) { + + c.Lock() + defer c.Unlock() + + c.doSetFsNode(path, node) +} + +func (c *FsCache) doSetFsNode(path util.FullPath, node fs.Node) { + t := c.root + for _, p := range path.Split() { + t = t.ensureChild(p) + } + t.node = node +} + +func (c *FsCache) EnsureFsNode(path util.FullPath, genNodeFn func() fs.Node) fs.Node { + + c.Lock() + defer c.Unlock() + + t := c.doGetFsNode(path) + if t != nil { + return t + } + t = genNodeFn() + c.doSetFsNode(path, t) + return t +} + +func (c *FsCache) DeleteFsNode(path util.FullPath) { + + c.Lock() + defer c.Unlock() + + t := c.root + for _, p := range path.Split() { + t = t.findChild(p) + if t == nil { + return + } + } + if t.parent != nil { + t.parent.disconnectChild(t) + } + t.deleteSelf() +} + +// oldPath and newPath are full path including the new name +func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode { + + c.Lock() + defer c.Unlock() + + // find old node + src := c.root + for _, p := range oldPath.Split() { + src = src.findChild(p) + if src == nil { + return src + } + } + if src.parent != nil { + src.parent.disconnectChild(src) + } + + // find new node + target := c.root + for _, p := range newPath.Split() { + target = target.ensureChild(p) + } + parent := target.parent + src.name = target.name + if dir, ok := src.node.(*Dir); ok { + dir.name = target.name // target is not Dir, but a shortcut + } + if f, ok := src.node.(*File); ok { + f.Name = target.name + if f.entry != nil { + f.entry.Name = f.Name + } + } + parent.disconnectChild(target) + + target.deleteSelf() + + src.connectToParent(parent) + + return src +} + +func (n *FsNode) connectToParent(parent *FsNode) { + n.parent = parent + oldNode := parent.findChild(n.name) + if oldNode != nil { + oldNode.deleteSelf() + } + if dir, ok := n.node.(*Dir); ok { + dir.parent = parent.node.(*Dir) + } + if f, ok := n.node.(*File); ok { + f.dir = parent.node.(*Dir) + } + n.childrenLock.Lock() + parent.children[n.name] = n + n.childrenLock.Unlock() +} + +func (n *FsNode) findChild(name string) *FsNode { + n.childrenLock.RLock() + defer n.childrenLock.RUnlock() + + child, found := n.children[name] + if found { + return child + } + return nil +} + +func (n *FsNode) ensureChild(name string) *FsNode { + n.childrenLock.Lock() + defer n.childrenLock.Unlock() + + if n.children == nil { + n.children = make(map[string]*FsNode) + } + child, found := n.children[name] + if found { + return child + } + t := &FsNode{ + parent: n, + node: nil, + name: name, + children: nil, + } + n.children[name] = t + return t +} + +func (n *FsNode) disconnectChild(child *FsNode) { + n.childrenLock.Lock() + delete(n.children, child.name) + n.childrenLock.Unlock() + child.parent = nil +} + +func (n *FsNode) deleteSelf() { + n.childrenLock.Lock() + for _, child := range n.children { + child.deleteSelf() + } + n.children = nil + n.childrenLock.Unlock() + + n.node = nil + n.parent = nil + +} diff --git a/weed/filesys/fscache_test.go b/weed/filesys/fscache_test.go new file mode 100644 index 000000000..67f9aacc8 --- /dev/null +++ b/weed/filesys/fscache_test.go @@ -0,0 +1,96 @@ +package filesys + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TestPathSplit(t *testing.T) { + parts := util.FullPath("/").Split() + if len(parts) != 0 { + t.Errorf("expecting an empty list, but getting %d", len(parts)) + } + + parts = util.FullPath("/readme.md").Split() + if len(parts) != 1 { + t.Errorf("expecting an empty list, but getting %d", len(parts)) + } + +} + +func TestFsCache(t *testing.T) { + + cache := newFsCache(nil) + + x := cache.GetFsNode(util.FullPath("/y/x")) + if x != nil { + t.Errorf("wrong node!") + } + + p := util.FullPath("/a/b/c") + cache.SetFsNode(p, &File{Name: "cc"}) + tNode := cache.GetFsNode(p) + tFile := tNode.(*File) + if tFile.Name != "cc" { + t.Errorf("expecting a FsNode") + } + + cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"}) + cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"}) + cache.SetFsNode(util.FullPath("/a/b/f"), &File{Name: "ff"}) + cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"}) + cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"}) + + b := cache.GetFsNode(util.FullPath("/a/b")) + if b != nil { + t.Errorf("unexpected node!") + } + + a := cache.GetFsNode(util.FullPath("/a")) + if a == nil { + t.Errorf("missing node!") + } + + cache.DeleteFsNode(util.FullPath("/a")) + if b != nil { + t.Errorf("unexpected node!") + } + + a = cache.GetFsNode(util.FullPath("/a")) + if a != nil { + t.Errorf("wrong DeleteFsNode!") + } + + z := cache.GetFsNode(util.FullPath("/z")) + if z == nil { + t.Errorf("missing node!") + } + + y := cache.GetFsNode(util.FullPath("/x/y")) + if y != nil { + t.Errorf("wrong node!") + } + +} + +func TestFsCacheMove(t *testing.T) { + + cache := newFsCache(nil) + + cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"}) + cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"}) + cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"}) + cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"}) + + cache.Move(util.FullPath("/a/b"), util.FullPath("/z/x")) + + d := cache.GetFsNode(util.FullPath("/z/x/d")) + if d == nil { + t.Errorf("unexpected nil node!") + } + if d.(*File).Name != "dd" { + t.Errorf("unexpected non dd node!") + } + +} diff --git a/weed/filesys/meta_cache/cache_config.go b/weed/filesys/meta_cache/cache_config.go new file mode 100644 index 000000000..e6593ebde --- /dev/null +++ b/weed/filesys/meta_cache/cache_config.go @@ -0,0 +1,32 @@ +package meta_cache + +import "github.com/chrislusf/seaweedfs/weed/util" + +var ( + _ = util.Configuration(&cacheConfig{}) +) + +// implementing util.Configuraion +type cacheConfig struct { + dir string +} + +func (c cacheConfig) GetString(key string) string { + return c.dir +} + +func (c cacheConfig) GetBool(key string) bool { + panic("implement me") +} + +func (c cacheConfig) GetInt(key string) int { + panic("implement me") +} + +func (c cacheConfig) GetStringSlice(key string) []string { + panic("implement me") +} + +func (c cacheConfig) SetDefault(key string, value interface{}) { + panic("implement me") +} diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go new file mode 100644 index 000000000..3b04040a5 --- /dev/null +++ b/weed/filesys/meta_cache/meta_cache.go @@ -0,0 +1,106 @@ +package meta_cache + +import ( + "context" + "os" + "sync" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/bounded_tree" +) + +type MetaCache struct { + actualStore filer2.FilerStore + sync.RWMutex + visitedBoundary *bounded_tree.BoundedTree +} + +func NewMetaCache(dbFolder string) *MetaCache { + return &MetaCache{ + actualStore: openMetaStore(dbFolder), + visitedBoundary: bounded_tree.NewBoundedTree(), + } +} + +func openMetaStore(dbFolder string) filer2.FilerStore { + + os.RemoveAll(dbFolder) + os.MkdirAll(dbFolder, 0755) + + store := &leveldb.LevelDBStore{} + config := &cacheConfig{ + dir: dbFolder, + } + + if err := store.Initialize(config, ""); err != nil { + glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err) + } + + return store + +} + +func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer2.Entry) error { + mc.Lock() + defer mc.Unlock() + return mc.actualStore.InsertEntry(ctx, entry) +} + +func (mc *MetaCache) AtomicUpdateEntry(ctx context.Context, oldPath util.FullPath, newEntry *filer2.Entry) error { + mc.Lock() + defer mc.Unlock() + + oldDir, _ := oldPath.DirAndName() + if mc.visitedBoundary.HasVisited(util.FullPath(oldDir)) { + if oldPath != "" { + if err := mc.actualStore.DeleteEntry(ctx, oldPath); err != nil { + return err + } + } + }else{ + // println("unknown old directory:", oldDir) + } + + if newEntry != nil { + newDir, _ := newEntry.DirAndName() + if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) { + if err := mc.actualStore.InsertEntry(ctx, newEntry); err != nil { + return err + } + } + } + return nil +} + +func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer2.Entry) error { + mc.Lock() + defer mc.Unlock() + return mc.actualStore.UpdateEntry(ctx, entry) +} + +func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer2.Entry, err error) { + mc.RLock() + defer mc.RUnlock() + return mc.actualStore.FindEntry(ctx, fp) +} + +func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { + mc.Lock() + defer mc.Unlock() + return mc.actualStore.DeleteEntry(ctx, fp) +} + +func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int) ([]*filer2.Entry, error) { + mc.RLock() + defer mc.RUnlock() + return mc.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit) +} + +func (mc *MetaCache) Shutdown() { + mc.Lock() + defer mc.Unlock() + mc.actualStore.Shutdown() +} diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go new file mode 100644 index 000000000..1fbc3e532 --- /dev/null +++ b/weed/filesys/meta_cache/meta_cache_init.go @@ -0,0 +1,47 @@ +package meta_cache + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func InitMetaCache(mc *MetaCache, client filer_pb.FilerClient, path string) error { + return nil + glog.V(0).Infof("synchronizing meta data ...") + filer_pb.TraverseBfs(client, util.FullPath(path), func(parentPath util.FullPath, pbEntry *filer_pb.Entry) { + entry := filer2.FromPbEntry(string(parentPath), pbEntry) + if err := mc.InsertEntry(context.Background(), entry); err != nil { + glog.V(0).Infof("read %s: %v", entry.FullPath, err) + } + }) + return nil +} + +func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.FullPath) { + + mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) { + + glog.V(2).Infof("ReadDirAllEntries %s ...", path) + + err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error { + entry := filer2.FromPbEntry(string(dirPath), pbEntry) + if err := mc.InsertEntry(context.Background(), entry); err != nil { + glog.V(0).Infof("read %s: %v", entry.FullPath, err) + return err + } + if entry.IsDirectory() { + childDirectories = append(childDirectories, entry.Name()) + } + return nil + }) + if err != nil { + err = fmt.Errorf("list %s: %v", dirPath, err) + } + return + }) +} diff --git a/weed/filesys/meta_cache/meta_cache_subscribe.go b/weed/filesys/meta_cache/meta_cache_subscribe.go new file mode 100644 index 000000000..2e411a48a --- /dev/null +++ b/weed/filesys/meta_cache/meta_cache_subscribe.go @@ -0,0 +1,69 @@ +package meta_cache + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func SubscribeMetaEvents(mc *MetaCache, client filer_pb.FilerClient, dir string, lastTsNs int64) error { + + processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { + message := resp.EventNotification + var oldPath util.FullPath + var newEntry *filer2.Entry + if message.OldEntry != nil { + oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name) + glog.V(4).Infof("deleting %v", oldPath) + } + + if message.NewEntry != nil { + dir := resp.Directory + if message.NewParentPath != "" { + dir = message.NewParentPath + } + key := util.NewFullPath(dir, message.NewEntry.Name) + glog.V(4).Infof("creating %v", key) + newEntry = filer2.FromPbEntry(dir, message.NewEntry) + } + return mc.AtomicUpdateEntry(context.Background(), oldPath, newEntry) + } + + for { + err := client.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + stream, err := client.SubscribeMetadata(context.Background(), &filer_pb.SubscribeMetadataRequest{ + ClientName: "mount", + PathPrefix: dir, + SinceNs: lastTsNs, + }) + if err != nil { + return fmt.Errorf("subscribe: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + return fmt.Errorf("process %v: %v", resp, err) + } + lastTsNs = resp.TsNs + } + }) + if err != nil { + glog.V(0).Infof("subscribing filer meta change: %v", err) + time.Sleep(time.Second) + } + } +} diff --git a/weed/filesys/unimplemented.go b/weed/filesys/unimplemented.go new file mode 100644 index 000000000..1f4fe554d --- /dev/null +++ b/weed/filesys/unimplemented.go @@ -0,0 +1,20 @@ +package filesys + +import ( + "context" + + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" +) + +// https://github.com/bazil/fuse/issues/130 + +var _ = fs.NodeAccesser(&Dir{}) +func (dir *Dir) Access(ctx context.Context, req *fuse.AccessRequest) error { + return fuse.ENOSYS +} + +var _ = fs.NodeAccesser(&File{}) +func (file *File) Access(ctx context.Context, req *fuse.AccessRequest) error { + return fuse.ENOSYS +} diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index 969514a06..8dffa6555 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -5,32 +5,48 @@ import ( "fmt" "math" "os" + "path" "sync" "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/karlseguin/ccache" - "github.com/seaweedfs/fuse" - "github.com/seaweedfs/fuse/fs" - "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" ) type Option struct { FilerGrpcAddress string + GrpcDialOption grpc.DialOption FilerMountRootPath string Collection string Replication string TtlSec int32 ChunkSizeLimit int64 + CacheDir string + CacheSizeMB int64 DataCenter string - DirListingLimit int + DirListCacheLimit int64 EntryCacheTtl time.Duration + Umask os.FileMode + + MountUid uint32 + MountGid uint32 + MountMode os.FileMode + MountCtime time.Time + MountMtime time.Time + + OutsideContainerClusterMode bool // whether the mount runs outside SeaweedFS containers + Cipher bool // whether encrypt data on volume server - MountUid uint32 - MountGid uint32 - MountMode os.FileMode } var _ = fs.FS(&WFS{}) @@ -38,17 +54,20 @@ var _ = fs.FSStatfser(&WFS{}) type WFS struct { option *Option - listDirectoryEntriesCache *ccache.Cache - // contains all open handles - handles []*FileHandle - pathToHandleIndex map[string]int - pathToHandleLock sync.Mutex - bufPool sync.Pool + // contains all open handles, protected by handlesLock + handlesLock sync.Mutex + handles map[uint64]*FileHandle - fileIdsDeletionChan chan []string + bufPool sync.Pool stats statsCache + + root fs.Node + fsNodeCache *FsCache + + chunkCache *chunk_cache.ChunkCache + metaCache *meta_cache.MetaCache } type statsCache struct { filer_pb.StatisticsResponse @@ -58,74 +77,73 @@ type statsCache struct { func NewSeaweedFileSystem(option *Option) *WFS { wfs := &WFS{ option: option, - listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(1024 * 8).ItemsToPrune(100)), - pathToHandleIndex: make(map[string]int), + handles: make(map[uint64]*FileHandle), bufPool: sync.Pool{ New: func() interface{} { return make([]byte, option.ChunkSizeLimit) }, }, - fileIdsDeletionChan: make(chan []string, 32), + } + cacheUniqueId := util.Md5([]byte(option.FilerGrpcAddress))[0:4] + cacheDir := path.Join(option.CacheDir, cacheUniqueId) + if option.CacheSizeMB > 0 { + os.MkdirAll(cacheDir, 0755) + wfs.chunkCache = chunk_cache.NewChunkCache(256, cacheDir, option.CacheSizeMB) + grace.OnInterrupt(func() { + wfs.chunkCache.Shutdown() + }) + } + + wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta")) + startTime := time.Now() + if err := meta_cache.InitMetaCache(wfs.metaCache, wfs, wfs.option.FilerMountRootPath); err != nil { + glog.V(0).Infof("failed to init meta cache: %v", err) + } else { + go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano()) + grace.OnInterrupt(func() { + wfs.metaCache.Shutdown() + }) } - go wfs.loopProcessingDeletion() + wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs} + wfs.fsNodeCache = newFsCache(wfs.root) return wfs } func (wfs *WFS) Root() (fs.Node, error) { - return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil -} - -func (wfs *WFS) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) - }, wfs.option.FilerGrpcAddress) - + return wfs.root, nil } func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() fullpath := file.fullpath() + glog.V(4).Infof("%s AcquireHandle uid=%d gid=%d", fullpath, uid, gid) - index, found := wfs.pathToHandleIndex[fullpath] - if found && wfs.handles[index] != nil { - glog.V(2).Infoln(fullpath, "found fileHandle id", index) - return wfs.handles[index] - } + wfs.handlesLock.Lock() + defer wfs.handlesLock.Unlock() - fileHandle = newFileHandle(file, uid, gid) - for i, h := range wfs.handles { - if h == nil { - wfs.handles[i] = fileHandle - fileHandle.handle = uint64(i) - wfs.pathToHandleIndex[fullpath] = i - glog.V(4).Infoln(fullpath, "reuse fileHandle id", fileHandle.handle) - return - } + inodeId := file.fullpath().AsInode() + existingHandle, found := wfs.handles[inodeId] + if found && existingHandle != nil { + return existingHandle } - wfs.handles = append(wfs.handles, fileHandle) - fileHandle.handle = uint64(len(wfs.handles) - 1) - glog.V(2).Infoln(fullpath, "new fileHandle id", fileHandle.handle) - wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle) + fileHandle = newFileHandle(file, uid, gid) + wfs.handles[inodeId] = fileHandle + fileHandle.handle = inodeId + glog.V(4).Infof("%s new fh %d", fullpath, fileHandle.handle) return } -func (wfs *WFS) ReleaseHandle(fullpath string, handleId fuse.HandleID) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() +func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) { + wfs.handlesLock.Lock() + defer wfs.handlesLock.Unlock() - glog.V(4).Infof("%s releasing handle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) - delete(wfs.pathToHandleIndex, fullpath) - if int(handleId) < len(wfs.handles) { - wfs.handles[int(handleId)] = nil - } + glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) + + delete(wfs.handles, fullpath.AsInode()) return } @@ -137,7 +155,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. if wfs.stats.lastChecked < time.Now().Unix()-20 { - err := wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.StatisticsRequest{ Collection: wfs.option.Collection, @@ -146,7 +164,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. } glog.V(4).Infof("reading filer stats: %+v", request) - resp, err := client.Statistics(ctx, request) + resp, err := client.Statistics(context.Background(), request) if err != nil { glog.V(0).Infof("reading filer stats %v: %v", request, err) return err diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go index f58ef24f4..bf21b1808 100644 --- a/weed/filesys/wfs_deletion.go +++ b/weed/filesys/wfs_deletion.go @@ -2,39 +2,15 @@ package filesys import ( "context" - "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) -func (wfs *WFS) loopProcessingDeletion() { - - ticker := time.NewTicker(2 * time.Second) - - wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - var fileIds []string - for { - select { - case fids := <-wfs.fileIdsDeletionChan: - fileIds = append(fileIds, fids...) - if len(fileIds) >= 1024 { - glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) - deleteFileIds(context.Background(), client, fileIds) - fileIds = fileIds[:0] - } - case <-ticker.C: - if len(fileIds) > 0 { - glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) - deleteFileIds(context.Background(), client, fileIds) - fileIds = fileIds[:0] - } - } - } - }) - -} - func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { if len(chunks) == 0 { return @@ -42,17 +18,56 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) { var fileIds []string for _, chunk := range chunks { - fileIds = append(fileIds, chunk.FileId) + fileIds = append(fileIds, chunk.GetFileIdString()) } - var async = false - if async { - wfs.fileIdsDeletionChan <- fileIds - return - } - - wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - deleteFileIds(context.Background(), client, fileIds) + wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + wfs.deleteFileIds(wfs.option.GrpcDialOption, client, fileIds) return nil }) } + +func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { + + var vids []string + for _, fileId := range fileIds { + vids = append(vids, filer2.VolumeId(fileId)) + } + + lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { + + m := make(map[string]operation.LookupResult) + + glog.V(4).Infof("remove file lookup volume id locations: %v", vids) + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ + VolumeIds: vids, + }) + if err != nil { + return m, err + } + + for _, vid := range vids { + lr := operation.LookupResult{ + VolumeId: vid, + Locations: nil, + } + locations, found := resp.LocationsMap[vid] + if !found { + continue + } + for _, loc := range locations.Locations { + lr.Locations = append(lr.Locations, operation.Location{ + Url: wfs.AdjustedUrl(loc.Url), + PublicUrl: loc.PublicUrl, + }) + } + m[vid] = lr + } + + return m, err + } + + _, err := operation.DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) + + return err +} diff --git a/weed/filesys/wfs_filer_client.go b/weed/filesys/wfs_filer_client.go new file mode 100644 index 000000000..736df3588 --- /dev/null +++ b/weed/filesys/wfs_filer_client.go @@ -0,0 +1,40 @@ +package filesys + +import ( + "fmt" + "strings" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +var _ = filer_pb.FilerClient(&WFS{}) + +func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + err := pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) + + if err == nil { + return nil + } + return err + +} + +func (wfs *WFS) AdjustedUrl(hostAndPort string) string { + if !wfs.option.OutsideContainerClusterMode { + return hostAndPort + } + commaIndex := strings.Index(hostAndPort, ":") + if commaIndex < 0 { + return hostAndPort + } + filerCommaIndex := strings.Index(wfs.option.FilerGrpcAddress, ":") + return fmt.Sprintf("%s:%s", wfs.option.FilerGrpcAddress[:filerCommaIndex], hostAndPort[commaIndex+1:]) + +} diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go new file mode 100644 index 000000000..091a70fa3 --- /dev/null +++ b/weed/filesys/xattr.go @@ -0,0 +1,123 @@ +package filesys + +import ( + "context" + + "github.com/seaweedfs/fuse" + + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func getxattr(entry *filer_pb.Entry, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { + + if entry == nil { + return fuse.ErrNoXattr + } + if entry.Extended == nil { + return fuse.ErrNoXattr + } + data, found := entry.Extended[req.Name] + if !found { + return fuse.ErrNoXattr + } + if req.Position < uint32(len(data)) { + size := req.Size + if req.Position+size >= uint32(len(data)) { + size = uint32(len(data)) - req.Position + } + if size == 0 { + resp.Xattr = data[req.Position:] + } else { + resp.Xattr = data[req.Position : req.Position+size] + } + } + + return nil + +} + +func setxattr(entry *filer_pb.Entry, req *fuse.SetxattrRequest) error { + + if entry == nil { + return fuse.EIO + } + + if entry.Extended == nil { + entry.Extended = make(map[string][]byte) + } + data, _ := entry.Extended[req.Name] + + newData := make([]byte, int(req.Position)+len(req.Xattr)) + + copy(newData, data) + + copy(newData[int(req.Position):], req.Xattr) + + entry.Extended[req.Name] = newData + + return nil + +} + +func removexattr(entry *filer_pb.Entry, req *fuse.RemovexattrRequest) error { + + if entry == nil { + return fuse.ErrNoXattr + } + + if entry.Extended == nil { + return fuse.ErrNoXattr + } + + _, found := entry.Extended[req.Name] + + if !found { + return fuse.ErrNoXattr + } + + delete(entry.Extended, req.Name) + + return nil + +} + +func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { + + if entry == nil { + return fuse.EIO + } + + for k := range entry.Extended { + resp.Append(k) + } + + size := req.Size + if req.Position+size >= uint32(len(resp.Xattr)) { + size = uint32(len(resp.Xattr)) - req.Position + } + + if size == 0 { + resp.Xattr = resp.Xattr[req.Position:] + } else { + resp.Xattr = resp.Xattr[req.Position : req.Position+size] + } + + return nil + +} + +func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) { + + fullpath := util.NewFullPath(dir, name) + // glog.V(3).Infof("read entry cache miss %s", fullpath) + + // read from async meta cache + meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir)) + cachedEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullpath) + if cacheErr == filer_pb.ErrNotFound { + return nil, fuse.ENOENT + } + return cachedEntry.ToProtoEntry(), nil +} diff --git a/weed/glide.lock b/weed/glide.lock deleted file mode 100644 index fee78be42..000000000 --- a/weed/glide.lock +++ /dev/null @@ -1,190 +0,0 @@ -hash: 2e3a065472829938d25e879451b6d1aa43e55270e1166a9c044803ef8a3b9eb1 -updated: 2018-06-28T22:01:35.910567-07:00 -imports: -- name: github.com/seaweedfs/fuse - version: 65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e - subpackages: - - fs - - fuseutil -- name: github.com/boltdb/bolt - version: 2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8 -- name: github.com/chrislusf/raft - version: 5f7ddd8f479583daf05879d3d3b174aa202c8fb7 - subpackages: - - protobuf -- name: github.com/dgrijalva/jwt-go - version: 06ea1031745cb8b3dab3f6a236daf2b0aa468b7e -- name: github.com/disintegration/imaging - version: bbcee2f5c9d5e94ca42c8b50ec847fec64a6c134 -- name: github.com/fsnotify/fsnotify - version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 -- name: github.com/go-redis/redis - version: 83fb42932f6145ce52df09860384a4653d2d332a - subpackages: - - internal - - internal/consistenthash - - internal/hashtag - - internal/pool - - internal/proto - - internal/singleflight - - internal/util -- name: github.com/go-sql-driver/mysql - version: d523deb1b23d913de5bdada721a6071e71283618 -- name: github.com/gocql/gocql - version: e06f8c1bcd787e6bf0608288b314522f08cc7848 - subpackages: - - internal/lru - - internal/murmur - - internal/streams -- name: github.com/gogo/protobuf - version: 30cf7ac33676b5786e78c746683f0d4cd64fa75b - subpackages: - - proto -- name: github.com/golang/protobuf - version: b4deda0973fb4c70b50d226b1af49f3da59f5265 - subpackages: - - proto - - protoc-gen-go/descriptor - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/timestamp -- name: github.com/golang/snappy - version: 2e65f85255dbc3072edf28d6b5b8efc472979f5a -- name: github.com/google/btree - version: e89373fe6b4a7413d7acd6da1725b83ef713e6e4 -- name: github.com/gorilla/context - version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42 -- name: github.com/gorilla/mux - version: e3702bed27f0d39777b0b37b664b6280e8ef8fbf -- name: github.com/hailocab/go-hostpool - version: e80d13ce29ede4452c43dea11e79b9bc8a15b478 -- name: github.com/hashicorp/hcl - version: ef8a98b0bbce4a65b5aa4c368430a80ddc533168 - subpackages: - - hcl/ast - - hcl/parser - - hcl/printer - - hcl/scanner - - hcl/strconv - - hcl/token - - json/parser - - json/scanner - - json/token -- name: github.com/karlseguin/ccache - version: b425c9ca005a2050ebe723f6a0cddcb907354ab7 -- name: github.com/klauspost/crc32 - version: cb6bfca970f6908083f26f39a79009d608efd5cd -- name: github.com/lib/pq - version: 90697d60dd844d5ef6ff15135d0203f65d2f53b8 - subpackages: - - oid -- name: github.com/magiconair/properties - version: c2353362d570a7bfa228149c62842019201cfb71 -- name: github.com/mitchellh/mapstructure - version: bb74f1db0675b241733089d5a1faa5dd8b0ef57b -- name: github.com/pelletier/go-toml - version: c01d1270ff3e442a8a57cddc1c92dc1138598194 -- name: github.com/rwcarlsen/goexif - version: 8d986c03457a2057c7b0fb0a48113f7dd48f9619 - subpackages: - - exif - - tiff -- name: github.com/soheilhy/cmux - version: e09e9389d85d8492d313d73d1469c029e710623f -- name: github.com/spf13/afero - version: 787d034dfe70e44075ccc060d346146ef53270ad - subpackages: - - mem -- name: github.com/spf13/cast - version: 8965335b8c7107321228e3e3702cab9832751bac -- name: github.com/spf13/jwalterweatherman - version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394 -- name: github.com/spf13/pflag - version: 3ebe029320b2676d667ae88da602a5f854788a8a -- name: github.com/spf13/viper - version: 15738813a09db5c8e5b60a19d67d3f9bd38da3a4 -- name: github.com/syndtr/goleveldb - version: 0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697 - subpackages: - - leveldb - - leveldb/cache - - leveldb/comparer - - leveldb/errors - - leveldb/filter - - leveldb/iterator - - leveldb/journal - - leveldb/memdb - - leveldb/opt - - leveldb/storage - - leveldb/table - - leveldb/util -- name: golang.org/x/image - version: cc896f830cedae125428bc9fe1b0362aa91b3fb1 - subpackages: - - bmp - - tiff - - tiff/lzw -- name: golang.org/x/net - version: 4cb1c02c05b0e749b0365f61ae859a8e0cfceed9 - subpackages: - - context - - http/httpguts - - http2 - - http2/hpack - - idna - - internal/timeseries - - trace -- name: golang.org/x/sys - version: 7138fd3d9dc8335c567ca206f4333fb75eb05d56 - subpackages: - - unix -- name: golang.org/x/text - version: 5cec4b58c438bd98288aeb248bab2c1840713d21 - subpackages: - - secure/bidirule - - transform - - unicode/bidi - - unicode/norm -- name: google.golang.org/appengine - version: b1f26356af11148e710935ed1ac8a7f5702c7612 - subpackages: - - cloudsql -- name: google.golang.org/genproto - version: ff3583edef7de132f219f0efc00e097cabcc0ec0 - subpackages: - - googleapis/rpc/status -- name: google.golang.org/grpc - version: 168a6198bcb0ef175f7dacec0b8691fc141dc9b8 - subpackages: - - balancer - - balancer/base - - balancer/roundrobin - - codes - - connectivity - - credentials - - encoding - - encoding/proto - - grpclog - - internal - - internal/backoff - - internal/channelz - - internal/grpcrand - - keepalive - - metadata - - naming - - peer - - reflection - - reflection/grpc_reflection_v1alpha - - resolver - - resolver/dns - - resolver/passthrough - - stats - - status - - tap - - transport -- name: gopkg.in/inf.v0 - version: d2d2541c53f18d2a059457998ce2876cc8e67cbf -- name: gopkg.in/yaml.v2 - version: 5420a8b6744d3b0345ab293f6fcba19c978f1183 -testImports: [] diff --git a/weed/glide.yaml b/weed/glide.yaml deleted file mode 100644 index 740d2ad3d..000000000 --- a/weed/glide.yaml +++ /dev/null @@ -1,44 +0,0 @@ -package: github.com/chrislusf/seaweedfs/weed -import: -- package: github.com/seaweedfs/fuse - subpackages: - - fs -- package: github.com/boltdb/bolt - version: ^1.3.1 -- package: github.com/chrislusf/raft -- package: github.com/dgrijalva/jwt-go - version: ^3.2.0 -- package: github.com/disintegration/imaging - version: ^1.4.1 -- package: github.com/go-redis/redis - version: ^6.10.2 -- package: github.com/go-sql-driver/mysql - version: ^1.3.0 -- package: github.com/gocql/gocql -- package: github.com/golang/protobuf - version: ^1.0.0 - subpackages: - - proto -- package: github.com/google/btree -- package: github.com/gorilla/mux - version: ^1.6.1 -- package: github.com/klauspost/crc32 - version: ^1.1.0 -- package: github.com/lib/pq -- package: github.com/rwcarlsen/goexif - subpackages: - - exif -- package: github.com/soheilhy/cmux - version: ^0.1.4 -- package: github.com/syndtr/goleveldb - subpackages: - - leveldb - - leveldb/util -- package: golang.org/x/net - subpackages: - - context -- package: google.golang.org/grpc - version: ^1.11.3 - subpackages: - - peer - - reflection diff --git a/weed/images/orientation.go b/weed/images/orientation.go deleted file mode 100644 index 4bff89311..000000000 --- a/weed/images/orientation.go +++ /dev/null @@ -1,182 +0,0 @@ -package images - -import ( - "bytes" - "image" - "image/draw" - "image/jpeg" - "log" - - "github.com/rwcarlsen/goexif/exif" -) - -//many code is copied from http://camlistore.org/pkg/images/images.go -func FixJpgOrientation(data []byte) (oriented []byte) { - ex, err := exif.Decode(bytes.NewReader(data)) - if err != nil { - return data - } - tag, err := ex.Get(exif.Orientation) - if err != nil { - return data - } - angle := 0 - flipMode := FlipDirection(0) - orient, err := tag.Int(0) - if err != nil { - return data - } - switch orient { - case topLeftSide: - // do nothing - return data - case topRightSide: - flipMode = 2 - case bottomRightSide: - angle = 180 - case bottomLeftSide: - angle = 180 - flipMode = 2 - case leftSideTop: - angle = -90 - flipMode = 2 - case rightSideTop: - angle = -90 - case rightSideBottom: - angle = 90 - flipMode = 2 - case leftSideBottom: - angle = 90 - } - - if srcImage, _, err := image.Decode(bytes.NewReader(data)); err == nil { - dstImage := flip(rotate(srcImage, angle), flipMode) - var buf bytes.Buffer - jpeg.Encode(&buf, dstImage, nil) - return buf.Bytes() - } - - return data -} - -// Exif Orientation Tag values -// http://sylvana.net/jpegcrop/exif_orientation.html -const ( - topLeftSide = 1 - topRightSide = 2 - bottomRightSide = 3 - bottomLeftSide = 4 - leftSideTop = 5 - rightSideTop = 6 - rightSideBottom = 7 - leftSideBottom = 8 -) - -// The FlipDirection type is used by the Flip option in DecodeOpts -// to indicate in which direction to flip an image. -type FlipDirection int - -// FlipVertical and FlipHorizontal are two possible FlipDirections -// values to indicate in which direction an image will be flipped. -const ( - FlipVertical FlipDirection = 1 << iota - FlipHorizontal -) - -type DecodeOpts struct { - // Rotate specifies how to rotate the image. - // If nil, the image is rotated automatically based on EXIF metadata. - // If an int, Rotate is the number of degrees to rotate - // counter clockwise and must be one of 0, 90, -90, 180, or - // -180. - Rotate interface{} - - // Flip specifies how to flip the image. - // If nil, the image is flipped automatically based on EXIF metadata. - // Otherwise, Flip is a FlipDirection bitfield indicating how to flip. - Flip interface{} -} - -func rotate(im image.Image, angle int) image.Image { - var rotated *image.NRGBA - // trigonometric (i.e counter clock-wise) - switch angle { - case 90: - newH, newW := im.Bounds().Dx(), im.Bounds().Dy() - rotated = image.NewNRGBA(image.Rect(0, 0, newW, newH)) - for y := 0; y < newH; y++ { - for x := 0; x < newW; x++ { - rotated.Set(x, y, im.At(newH-1-y, x)) - } - } - case -90: - newH, newW := im.Bounds().Dx(), im.Bounds().Dy() - rotated = image.NewNRGBA(image.Rect(0, 0, newW, newH)) - for y := 0; y < newH; y++ { - for x := 0; x < newW; x++ { - rotated.Set(x, y, im.At(y, newW-1-x)) - } - } - case 180, -180: - newW, newH := im.Bounds().Dx(), im.Bounds().Dy() - rotated = image.NewNRGBA(image.Rect(0, 0, newW, newH)) - for y := 0; y < newH; y++ { - for x := 0; x < newW; x++ { - rotated.Set(x, y, im.At(newW-1-x, newH-1-y)) - } - } - default: - return im - } - return rotated -} - -// flip returns a flipped version of the image im, according to -// the direction(s) in dir. -// It may flip the imput im in place and return it, or it may allocate a -// new NRGBA (if im is an *image.YCbCr). -func flip(im image.Image, dir FlipDirection) image.Image { - if dir == 0 { - return im - } - ycbcr := false - var nrgba image.Image - dx, dy := im.Bounds().Dx(), im.Bounds().Dy() - di, ok := im.(draw.Image) - if !ok { - if _, ok := im.(*image.YCbCr); !ok { - log.Printf("failed to flip image: input does not satisfy draw.Image") - return im - } - // because YCbCr does not implement Set, we replace it with a new NRGBA - ycbcr = true - nrgba = image.NewNRGBA(image.Rect(0, 0, dx, dy)) - di, ok = nrgba.(draw.Image) - if !ok { - log.Print("failed to flip image: could not cast an NRGBA to a draw.Image") - return im - } - } - if dir&FlipHorizontal != 0 { - for y := 0; y < dy; y++ { - for x := 0; x < dx/2; x++ { - old := im.At(x, y) - di.Set(x, y, im.At(dx-1-x, y)) - di.Set(dx-1-x, y, old) - } - } - } - if dir&FlipVertical != 0 { - for y := 0; y < dy/2; y++ { - for x := 0; x < dx; x++ { - old := im.At(x, y) - di.Set(x, y, im.At(x, dy-1-y)) - di.Set(x, dy-1-y, old) - } - } - } - if ycbcr { - return nrgba - } - return im -} diff --git a/weed/images/orientation_test.go b/weed/images/orientation_test.go deleted file mode 100644 index 32fa38f76..000000000 --- a/weed/images/orientation_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package images - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestXYZ(t *testing.T) { - fname := "sample1.jpg" - - dat, _ := ioutil.ReadFile(fname) - - fixed_data := FixJpgOrientation(dat) - - ioutil.WriteFile("fixed1.jpg", fixed_data, 0644) - - os.Remove("fixed1.jpg") - -} diff --git a/weed/images/preprocess.go b/weed/images/preprocess.go deleted file mode 100644 index f6f3b554d..000000000 --- a/weed/images/preprocess.go +++ /dev/null @@ -1,29 +0,0 @@ -package images - -import ( - "bytes" - "io" - "path/filepath" - "strings" -) - -/* -* Preprocess image files on client side. -* 1. possibly adjust the orientation -* 2. resize the image to a width or height limit -* 3. remove the exif data -* Call this function on any file uploaded to SeaweedFS -* - */ -func MaybePreprocessImage(filename string, data []byte, width, height int) (resized io.ReadSeeker, w int, h int) { - ext := filepath.Ext(filename) - ext = strings.ToLower(ext) - switch ext { - case ".png", ".gif": - return Resized(ext, bytes.NewReader(data), width, height, "") - case ".jpg", ".jpeg": - data = FixJpgOrientation(data) - return Resized(ext, bytes.NewReader(data), width, height, "") - } - return bytes.NewReader(data), 0, 0 -} diff --git a/weed/images/resizing.go b/weed/images/resizing.go index ff0eff5e1..b048daa1c 100644 --- a/weed/images/resizing.go +++ b/weed/images/resizing.go @@ -6,10 +6,11 @@ import ( "image/gif" "image/jpeg" "image/png" + "io" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/disintegration/imaging" - "io" + + "github.com/chrislusf/seaweedfs/weed/glog" ) func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) { @@ -35,6 +36,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re } } } else { + read.Seek(0, 0) return read, bounds.Dx(), bounds.Dy() } var buf bytes.Buffer diff --git a/weed/messaging/broker/broker_append.go b/weed/messaging/broker/broker_append.go new file mode 100644 index 000000000..80f107e00 --- /dev/null +++ b/weed/messaging/broker/broker_append.go @@ -0,0 +1,113 @@ +package broker + +import ( + "context" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messaging_pb.TopicConfiguration, data []byte) error { + + assignResult, uploadResult, err2 := broker.assignAndUpload(topicConfig, data) + if err2 != nil { + return err2 + } + + dir, name := util.FullPath(targetFile).DirAndName() + + // append the chunk + if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AppendToEntryRequest{ + Directory: dir, + EntryName: name, + Chunks: []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(assignResult.Fid, 0)}, + } + + _, err := client.AppendToEntry(context.Background(), request) + if err != nil { + glog.V(0).Infof("append to file %v: %v", request, err) + return err + } + + return nil + }); err != nil { + return fmt.Errorf("append to file %v: %v", targetFile, err) + } + + return nil +} + +func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConfiguration, data []byte) (*operation.AssignResult, *operation.UploadResult, error) { + + var assignResult = &operation.AssignResult{} + + // assign a volume location + if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: topicConfig.Replication, + Collection: topicConfig.Collection, + } + + resp, err := client.AssignVolume(context.Background(), request) + if err != nil { + glog.V(0).Infof("assign volume failure %v: %v", request, err) + return err + } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } + + assignResult.Auth = security.EncodedJwt(resp.Auth) + assignResult.Fid = resp.FileId + assignResult.Url = resp.Url + assignResult.PublicUrl = resp.PublicUrl + assignResult.Count = uint64(resp.Count) + + return nil + }); err != nil { + return nil, nil, err + } + + // upload data + targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + uploadResult, err := operation.UploadData(targetUrl, "", broker.option.Cipher, data, false, "", nil, assignResult.Auth) + if err != nil { + return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) + } + // println("uploaded to", targetUrl) + return assignResult, uploadResult, nil +} + +var _ = filer_pb.FilerClient(&MessageBroker{}) + +func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) (err error) { + + for _, filer := range broker.option.Filers { + if err = pb.WithFilerClient(filer, broker.grpcDialOption, fn); err != nil { + if err == io.EOF { + return + } + glog.V(0).Infof("fail to connect to %s: %v", filer, err) + } else { + break + } + } + + return + +} + +func (broker *MessageBroker) AdjustedUrl(hostAndPort string) string { + return hostAndPort +} diff --git a/weed/messaging/broker/broker_grpc_server.go b/weed/messaging/broker/broker_grpc_server.go new file mode 100644 index 000000000..abd5c9f73 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server.go @@ -0,0 +1,37 @@ +package broker + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) ConfigureTopic(c context.Context, request *messaging_pb.ConfigureTopicRequest) (*messaging_pb.ConfigureTopicResponse, error) { + panic("implement me") +} + +func (broker *MessageBroker) DeleteTopic(c context.Context, request *messaging_pb.DeleteTopicRequest) (*messaging_pb.DeleteTopicResponse, error) { + resp := &messaging_pb.DeleteTopicResponse{} + dir, entry := genTopicDirEntry(request.Namespace, request.Topic) + if exists, err := filer_pb.Exists(broker, dir, entry, true); err != nil { + return nil, err + } else if exists { + err = filer_pb.Remove(broker, dir, entry, true, true, true, false) + } + return resp, nil +} + +func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *messaging_pb.GetTopicConfigurationRequest) (*messaging_pb.GetTopicConfigurationResponse, error) { + panic("implement me") +} + +func genTopicDir(namespace, topic string) string { + return fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, namespace, topic) +} + +func genTopicDirEntry(namespace, topic string) (dir, entry string) { + return fmt.Sprintf("%s/%s", filer2.TopicsDir, namespace), topic +} diff --git a/weed/messaging/broker/broker_grpc_server_discovery.go b/weed/messaging/broker/broker_grpc_server_discovery.go new file mode 100644 index 000000000..3c14f3220 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_discovery.go @@ -0,0 +1,116 @@ +package broker + +import ( + "context" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +/* +Topic discovery: + +When pub or sub connects, it ask for the whole broker list, and run consistent hashing to find the broker. + +The broker will check peers whether it is already hosted by some other broker, if that broker is alive and acknowledged alive, redirect to it. +Otherwise, just host the topic. + +So, if the pub or sub connects around the same time, they would connect to the same broker. Everyone is happy. +If one of the pub or sub connects very late, and the system topo changed quite a bit with new servers added or old servers died, checking peers will help. + +*/ + +func (broker *MessageBroker) FindBroker(c context.Context, request *messaging_pb.FindBrokerRequest) (*messaging_pb.FindBrokerResponse, error) { + + t := &messaging_pb.FindBrokerResponse{} + var peers []string + + targetTopicPartition := fmt.Sprintf(TopicPartitionFmt, request.Namespace, request.Topic, request.Parition) + + for _, filer := range broker.option.Filers { + err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.LocateBroker(context.Background(), &filer_pb.LocateBrokerRequest{ + Resource: targetTopicPartition, + }) + if err != nil { + return err + } + if resp.Found && len(resp.Resources) > 0 { + t.Broker = resp.Resources[0].GrpcAddresses + return nil + } + for _, b := range resp.Resources { + peers = append(peers, b.GrpcAddresses) + } + return nil + }) + if err != nil { + return nil, err + } + } + + t.Broker = PickMember(peers, []byte(targetTopicPartition)) + + return t, nil + +} + +func (broker *MessageBroker) checkFilers() { + + // contact a filer about masters + var masters []string + found := false + for !found { + for _, filer := range broker.option.Filers { + err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return err + } + masters = append(masters, resp.Masters...) + return nil + }) + if err == nil { + found = true + break + } + glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err) + time.Sleep(time.Second) + } + } + glog.V(0).Infof("received master list: %s", masters) + + // contact each masters for filers + var filers []string + found = false + for !found { + for _, master := range masters { + err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error { + resp, err := client.ListMasterClients(context.Background(), &master_pb.ListMasterClientsRequest{ + ClientType: "filer", + }) + if err != nil { + return err + } + + filers = append(filers, resp.GrpcAddresses...) + + return nil + }) + if err == nil { + found = true + break + } + glog.V(0).Infof("failed to list filers: %v", err) + time.Sleep(time.Second) + } + } + glog.V(0).Infof("received filer list: %s", filers) + + broker.option.Filers = filers + +} diff --git a/weed/messaging/broker/broker_grpc_server_publish.go b/weed/messaging/broker/broker_grpc_server_publish.go new file mode 100644 index 000000000..dc11061af --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_publish.go @@ -0,0 +1,112 @@ +package broker + +import ( + "crypto/md5" + "fmt" + "io" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_PublishServer) error { + + // process initial request + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + // TODO look it up + topicConfig := &messaging_pb.TopicConfiguration{ + // IsTransient: true, + } + + // send init response + initResponse := &messaging_pb.PublishResponse{ + Config: nil, + Redirect: nil, + } + err = stream.Send(initResponse) + if err != nil { + return err + } + if initResponse.Redirect != nil { + return nil + } + + // get lock + tp := TopicPartition{ + Namespace: in.Init.Namespace, + Topic: in.Init.Topic, + Partition: in.Init.Partition, + } + + tpDir := fmt.Sprintf("%s/%s/%s", filer2.TopicsDir, tp.Namespace, tp.Topic) + md5File := fmt.Sprintf("p%02d.md5", tp.Partition) + // println("chan data stored under", tpDir, "as", md5File) + + if exists, err := filer_pb.Exists(broker, tpDir, md5File, false); err == nil && exists { + return fmt.Errorf("channel is already closed") + } + + tl := broker.topicManager.RequestLock(tp, topicConfig, true) + defer broker.topicManager.ReleaseLock(tp, true) + + md5hash := md5.New() + // process each message + for { + // println("recv") + in, err := stream.Recv() + // glog.V(0).Infof("recieved %v err: %v", in, err) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + if in.Data == nil { + continue + } + + // fmt.Printf("received: %d : %s\n", len(in.Data.Value), string(in.Data.Value)) + + data, err := proto.Marshal(in.Data) + if err != nil { + glog.Errorf("marshall error: %v\n", err) + continue + } + + tl.logBuffer.AddToBuffer(in.Data.Key, data) + + if in.Data.IsClose { + // println("server received closing") + break + } + + md5hash.Write(in.Data.Value) + + } + + if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil { + glog.V(0).Infof("err writing %s: %v", md5File, err) + } + + // fmt.Printf("received md5 %X\n", md5hash.Sum(nil)) + + // send the close ack + // println("server send ack closing") + if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil { + glog.V(0).Infof("err sending close response: %v", err) + } + return nil + +} diff --git a/weed/messaging/broker/broker_grpc_server_subscribe.go b/weed/messaging/broker/broker_grpc_server_subscribe.go new file mode 100644 index 000000000..9a7d653b5 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_subscribe.go @@ -0,0 +1,162 @@ +package broker + +import ( + "fmt" + "io" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error { + + // process initial request + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + var processedTsNs int64 + var messageCount int64 + subscriberId := in.Init.SubscriberId + + // TODO look it up + topicConfig := &messaging_pb.TopicConfiguration{ + // IsTransient: true, + } + + // get lock + tp := TopicPartition{ + Namespace: in.Init.Namespace, + Topic: in.Init.Topic, + Partition: in.Init.Partition, + } + fmt.Printf("+ subscriber %s for %s\n", subscriberId, tp.String()) + defer func() { + fmt.Printf("- subscriber %s for %s %d messages last %v\n", subscriberId, tp.String(), messageCount, time.Unix(0, processedTsNs)) + }() + + lock := broker.topicManager.RequestLock(tp, topicConfig, false) + defer broker.topicManager.ReleaseLock(tp, false) + + isConnected := true + go func() { + for isConnected { + if _, err := stream.Recv(); err != nil { + // println("disconnecting connection to", subscriberId, tp.String()) + isConnected = false + lock.cond.Signal() + } + } + }() + + lastReadTime := time.Now() + switch in.Init.StartPosition { + case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP: + lastReadTime = time.Unix(0, in.Init.TimestampNs) + case messaging_pb.SubscriberMessage_InitMessage_LATEST: + case messaging_pb.SubscriberMessage_InitMessage_EARLIEST: + lastReadTime = time.Unix(0, 0) + } + + // how to process each message + // an error returned will end the subscription + eachMessageFn := func(m *messaging_pb.Message) error { + err := stream.Send(&messaging_pb.BrokerMessage{ + Data: m, + }) + if err != nil { + glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err) + } + return err + } + + eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error { + m := &messaging_pb.Message{} + if err = proto.Unmarshal(logEntry.Data, m); err != nil { + glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err) + return err + } + // fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs) + if err = eachMessageFn(m); err != nil { + glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err) + return err + } + if m.IsClose { + // println("processed EOF") + return io.EOF + } + processedTsNs = logEntry.TsNs + messageCount++ + return nil + } + + if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil { + if err != io.EOF { + // println("stopping from persisted logs", err.Error()) + return err + } + } + + if processedTsNs != 0 { + lastReadTime = time.Unix(0, processedTsNs) + } + + // fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime) + + err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool { + lock.Mutex.Lock() + lock.cond.Wait() + lock.Mutex.Unlock() + return isConnected + }, eachLogEntryFn) + + return err + +} + +func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (err error) { + startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day()) + startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute()) + + sizeBuf := make([]byte, 4) + startTsNs := startTime.UnixNano() + + topicDir := genTopicDir(tp.Namespace, tp.Topic) + partitionSuffix := fmt.Sprintf(".part%02d", tp.Partition) + + return filer_pb.List(broker, topicDir, "", func(dayEntry *filer_pb.Entry, isLast bool) error { + dayDir := fmt.Sprintf("%s/%s", topicDir, dayEntry.Name) + return filer_pb.List(broker, dayDir, "", func(hourMinuteEntry *filer_pb.Entry, isLast bool) error { + if dayEntry.Name == startDate { + if strings.Compare(hourMinuteEntry.Name, startHourMinute) < 0 { + return nil + } + } + if !strings.HasSuffix(hourMinuteEntry.Name, partitionSuffix) { + return nil + } + // println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name) + chunkedFileReader := filer2.NewChunkStreamReader(broker, hourMinuteEntry.Chunks) + defer chunkedFileReader.Close() + if _, err := filer2.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil { + chunkedFileReader.Close() + if err == io.EOF { + return err + } + return fmt.Errorf("reading %s/%s: %v", dayDir, hourMinuteEntry.Name, err) + } + return nil + }, "", false, 24*60) + }, startDate, true, 366) + +} diff --git a/weed/messaging/broker/broker_server.go b/weed/messaging/broker/broker_server.go new file mode 100644 index 000000000..0c04d2841 --- /dev/null +++ b/weed/messaging/broker/broker_server.go @@ -0,0 +1,112 @@ +package broker + +import ( + "context" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +type MessageBrokerOption struct { + Filers []string + DefaultReplication string + MaxMB int + Ip string + Port int + Cipher bool +} + +type MessageBroker struct { + option *MessageBrokerOption + grpcDialOption grpc.DialOption + topicManager *TopicManager +} + +func NewMessageBroker(option *MessageBrokerOption, grpcDialOption grpc.DialOption) (messageBroker *MessageBroker, err error) { + + messageBroker = &MessageBroker{ + option: option, + grpcDialOption: grpcDialOption, + } + + messageBroker.topicManager = NewTopicManager(messageBroker) + + messageBroker.checkFilers() + + go messageBroker.keepConnectedToOneFiler() + + return messageBroker, nil +} + +func (broker *MessageBroker) keepConnectedToOneFiler() { + + for { + for _, filer := range broker.option.Filers { + broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + stream, err := client.KeepConnected(context.Background()) + if err != nil { + glog.V(0).Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + + initRequest := &filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + } + for _, tp := range broker.topicManager.ListTopicPartitions() { + initRequest.Resources = append(initRequest.Resources, tp.String()) + } + if err := stream.Send(&filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + }); err != nil { + glog.V(0).Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + + // TODO send events of adding/removing topics + + glog.V(0).Infof("conntected with filer: %v", filer) + for { + if err := stream.Send(&filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + }); err != nil { + glog.V(0).Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + // println("send heartbeat") + if _, err := stream.Recv(); err != nil { + glog.V(0).Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + // println("received reply") + time.Sleep(11 * time.Second) + // println("woke up") + } + return nil + }) + time.Sleep(3 * time.Second) + } + } + +} + +func (broker *MessageBroker) withFilerClient(filer string, fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithFilerClient(filer, broker.grpcDialOption, fn) + +} + +func (broker *MessageBroker) withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error { + + return pb.WithMasterClient(master, broker.grpcDialOption, func(client master_pb.SeaweedClient) error { + return fn(client) + }) + +} diff --git a/weed/messaging/broker/consistent_distribution.go b/weed/messaging/broker/consistent_distribution.go new file mode 100644 index 000000000..465a2a8f2 --- /dev/null +++ b/weed/messaging/broker/consistent_distribution.go @@ -0,0 +1,38 @@ +package broker + +import ( + "github.com/buraksezer/consistent" + "github.com/cespare/xxhash" +) + +type Member string + +func (m Member) String() string { + return string(m) +} + +type hasher struct{} + +func (h hasher) Sum64(data []byte) uint64 { + return xxhash.Sum64(data) +} + +func PickMember(members []string, key []byte) string { + cfg := consistent.Config{ + PartitionCount: 9791, + ReplicationFactor: 2, + Load: 1.25, + Hasher: hasher{}, + } + + cmembers := []consistent.Member{} + for _, m := range members { + cmembers = append(cmembers, Member(m)) + } + + c := consistent.New(cmembers, cfg) + + m := c.LocateKey(key) + + return m.String() +} diff --git a/weed/messaging/broker/consistent_distribution_test.go b/weed/messaging/broker/consistent_distribution_test.go new file mode 100644 index 000000000..f58fe4e0e --- /dev/null +++ b/weed/messaging/broker/consistent_distribution_test.go @@ -0,0 +1,32 @@ +package broker + +import ( + "fmt" + "testing" +) + +func TestPickMember(t *testing.T) { + + servers := []string{ + "s1:port", + "s2:port", + "s3:port", + "s5:port", + "s4:port", + } + + total := 1000 + + distribution := make(map[string]int) + for i := 0; i < total; i++ { + tp := fmt.Sprintf("tp:%2d", i) + m := PickMember(servers, []byte(tp)) + // println(tp, "=>", m) + distribution[m]++ + } + + for member, count := range distribution { + fmt.Printf("member: %s, key count: %d load=%.2f\n", member, count, float64(count*100)/float64(total/len(servers))) + } + +} diff --git a/weed/messaging/broker/topic_manager.go b/weed/messaging/broker/topic_manager.go new file mode 100644 index 000000000..b563fffa1 --- /dev/null +++ b/weed/messaging/broker/topic_manager.go @@ -0,0 +1,123 @@ +package broker + +import ( + "fmt" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" +) + +type TopicPartition struct { + Namespace string + Topic string + Partition int32 +} + +const ( + TopicPartitionFmt = "%s/%s_%02d" +) + +func (tp *TopicPartition) String() string { + return fmt.Sprintf(TopicPartitionFmt, tp.Namespace, tp.Topic, tp.Partition) +} + +type TopicControl struct { + sync.Mutex + cond *sync.Cond + subscriberCount int + publisherCount int + logBuffer *log_buffer.LogBuffer +} + +type TopicManager struct { + sync.Mutex + topicControls map[TopicPartition]*TopicControl + broker *MessageBroker +} + +func NewTopicManager(messageBroker *MessageBroker) *TopicManager { + return &TopicManager{ + topicControls: make(map[TopicPartition]*TopicControl), + broker: messageBroker, + } +} + +func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topicConfig *messaging_pb.TopicConfiguration) *log_buffer.LogBuffer { + + flushFn := func(startTime, stopTime time.Time, buf []byte) { + + if topicConfig.IsTransient { + // return + } + + // fmt.Printf("flushing with topic config %+v\n", topicConfig) + + targetFile := fmt.Sprintf( + "%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d", + filer2.TopicsDir, tp.Namespace, tp.Topic, + startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), + tp.Partition, + ) + + if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil { + glog.V(0).Infof("log write failed %s: %v", targetFile, err) + } + } + logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() { + tl.cond.Broadcast() + }) + + return logBuffer +} + +func (tm *TopicManager) RequestLock(partition TopicPartition, topicConfig *messaging_pb.TopicConfiguration, isPublisher bool) *TopicControl { + tm.Lock() + defer tm.Unlock() + + tc, found := tm.topicControls[partition] + if !found { + tc = &TopicControl{} + tc.cond = sync.NewCond(&tc.Mutex) + tm.topicControls[partition] = tc + tc.logBuffer = tm.buildLogBuffer(tc, partition, topicConfig) + } + if isPublisher { + tc.publisherCount++ + } else { + tc.subscriberCount++ + } + return tc +} + +func (tm *TopicManager) ReleaseLock(partition TopicPartition, isPublisher bool) { + tm.Lock() + defer tm.Unlock() + + lock, found := tm.topicControls[partition] + if !found { + return + } + if isPublisher { + lock.publisherCount-- + } else { + lock.subscriberCount-- + } + if lock.subscriberCount <= 0 && lock.publisherCount <= 0 { + delete(tm.topicControls, partition) + lock.logBuffer.Shutdown() + } +} + +func (tm *TopicManager) ListTopicPartitions() (tps []TopicPartition) { + tm.Lock() + defer tm.Unlock() + + for k := range tm.topicControls { + tps = append(tps, k) + } + return +} diff --git a/weed/messaging/msgclient/chan_config.go b/weed/messaging/msgclient/chan_config.go new file mode 100644 index 000000000..a75678815 --- /dev/null +++ b/weed/messaging/msgclient/chan_config.go @@ -0,0 +1,5 @@ +package msgclient + +func (mc *MessagingClient) DeleteChannel(chanName string) error { + return mc.DeleteTopic("chan", chanName) +} diff --git a/weed/messaging/msgclient/chan_pub.go b/weed/messaging/msgclient/chan_pub.go new file mode 100644 index 000000000..9bc88f7c0 --- /dev/null +++ b/weed/messaging/msgclient/chan_pub.go @@ -0,0 +1,76 @@ +package msgclient + +import ( + "crypto/md5" + "hash" + "io" + "log" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type PubChannel struct { + client messaging_pb.SeaweedMessaging_PublishClient + grpcConnection *grpc.ClientConn + md5hash hash.Hash +} + +func (mc *MessagingClient) NewPubChannel(chanName string) (*PubChannel, error) { + tp := broker.TopicPartition{ + Namespace: "chan", + Topic: chanName, + Partition: 0, + } + grpcConnection, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + pc, err := setupPublisherClient(grpcConnection, tp) + if err != nil { + return nil, err + } + return &PubChannel{ + client: pc, + grpcConnection: grpcConnection, + md5hash: md5.New(), + }, nil +} + +func (pc *PubChannel) Publish(m []byte) error { + err := pc.client.Send(&messaging_pb.PublishRequest{ + Data: &messaging_pb.Message{ + Value: m, + }, + }) + if err == nil { + pc.md5hash.Write(m) + } + return err +} +func (pc *PubChannel) Close() error { + + // println("send closing") + if err := pc.client.Send(&messaging_pb.PublishRequest{ + Data: &messaging_pb.Message{ + IsClose: true, + }, + }); err != nil { + log.Printf("err send close: %v", err) + } + // println("receive closing") + if _, err := pc.client.Recv(); err != nil && err != io.EOF { + log.Printf("err receive close: %v", err) + } + // println("close connection") + if err := pc.grpcConnection.Close(); err != nil { + log.Printf("err connection close: %v", err) + } + return nil +} + +func (pc *PubChannel) Md5() []byte { + return pc.md5hash.Sum(nil) +} diff --git a/weed/messaging/msgclient/chan_sub.go b/weed/messaging/msgclient/chan_sub.go new file mode 100644 index 000000000..213ff4666 --- /dev/null +++ b/weed/messaging/msgclient/chan_sub.go @@ -0,0 +1,85 @@ +package msgclient + +import ( + "context" + "crypto/md5" + "hash" + "io" + "log" + "time" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type SubChannel struct { + ch chan []byte + stream messaging_pb.SeaweedMessaging_SubscribeClient + md5hash hash.Hash + cancel context.CancelFunc +} + +func (mc *MessagingClient) NewSubChannel(subscriberId, chanName string) (*SubChannel, error) { + tp := broker.TopicPartition{ + Namespace: "chan", + Topic: chanName, + Partition: 0, + } + grpcConnection, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + sc, err := setupSubscriberClient(ctx, grpcConnection, tp, subscriberId, time.Unix(0, 0)) + if err != nil { + return nil, err + } + + t := &SubChannel{ + ch: make(chan []byte), + stream: sc, + md5hash: md5.New(), + cancel: cancel, + } + + go func() { + for { + resp, subErr := t.stream.Recv() + if subErr == io.EOF { + return + } + if subErr != nil { + log.Printf("fail to receive from netchan %s: %v", chanName, subErr) + return + } + if resp.Data == nil { + // this could be heartbeat from broker + continue + } + if resp.Data.IsClose { + t.stream.Send(&messaging_pb.SubscriberMessage{ + IsClose: true, + }) + close(t.ch) + cancel() + return + } + t.ch <- resp.Data.Value + t.md5hash.Write(resp.Data.Value) + } + }() + + return t, nil +} + +func (sc *SubChannel) Channel() chan []byte { + return sc.ch +} + +func (sc *SubChannel) Md5() []byte { + return sc.md5hash.Sum(nil) +} + +func (sc *SubChannel) Cancel() { + sc.cancel() +} diff --git a/weed/messaging/msgclient/client.go b/weed/messaging/msgclient/client.go new file mode 100644 index 000000000..4d7ef2b8e --- /dev/null +++ b/weed/messaging/msgclient/client.go @@ -0,0 +1,55 @@ +package msgclient + +import ( + "context" + "fmt" + "log" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type MessagingClient struct { + bootstrapBrokers []string + grpcConnections map[broker.TopicPartition]*grpc.ClientConn + grpcDialOption grpc.DialOption +} + +func NewMessagingClient(bootstrapBrokers ...string) *MessagingClient { + return &MessagingClient{ + bootstrapBrokers: bootstrapBrokers, + grpcConnections: make(map[broker.TopicPartition]*grpc.ClientConn), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.msg_client"), + } +} + +func (mc *MessagingClient) findBroker(tp broker.TopicPartition) (*grpc.ClientConn, error) { + + for _, broker := range mc.bootstrapBrokers { + grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption) + if err != nil { + log.Printf("dial broker %s: %v", broker, err) + continue + } + defer grpcConnection.Close() + + resp, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).FindBroker(context.Background(), + &messaging_pb.FindBrokerRequest{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Parition: tp.Partition, + }) + if err != nil { + return nil, err + } + + targetBroker := resp.Broker + return pb.GrpcDial(context.Background(), targetBroker, mc.grpcDialOption) + } + return nil, fmt.Errorf("no broker found for %+v", tp) +} diff --git a/weed/messaging/msgclient/config.go b/weed/messaging/msgclient/config.go new file mode 100644 index 000000000..2b9eba1a8 --- /dev/null +++ b/weed/messaging/msgclient/config.go @@ -0,0 +1,63 @@ +package msgclient + +import ( + "context" + "log" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (mc *MessagingClient) configureTopic(tp broker.TopicPartition) error { + + return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error { + _, err := client.ConfigureTopic(context.Background(), + &messaging_pb.ConfigureTopicRequest{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Configuration: &messaging_pb.TopicConfiguration{ + PartitionCount: 0, + Collection: "", + Replication: "", + IsTransient: false, + Partitoning: 0, + }, + }) + return err + }) + +} + +func (mc *MessagingClient) DeleteTopic(namespace, topic string) error { + + return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error { + _, err := client.DeleteTopic(context.Background(), + &messaging_pb.DeleteTopicRequest{ + Namespace: namespace, + Topic: topic, + }) + return err + }) +} + +func (mc *MessagingClient) withAnyBroker(fn func(client messaging_pb.SeaweedMessagingClient) error) error { + + var lastErr error + for _, broker := range mc.bootstrapBrokers { + grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption) + if err != nil { + log.Printf("dial broker %s: %v", broker, err) + continue + } + defer grpcConnection.Close() + + err = fn(messaging_pb.NewSeaweedMessagingClient(grpcConnection)) + if err == nil { + return nil + } + lastErr = err + } + + return lastErr +} diff --git a/weed/messaging/msgclient/publisher.go b/weed/messaging/msgclient/publisher.go new file mode 100644 index 000000000..1aa483ff8 --- /dev/null +++ b/weed/messaging/msgclient/publisher.go @@ -0,0 +1,118 @@ +package msgclient + +import ( + "context" + + "github.com/OneOfOne/xxhash" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type Publisher struct { + publishClients []messaging_pb.SeaweedMessaging_PublishClient + topicConfiguration *messaging_pb.TopicConfiguration + messageCount uint64 + publisherId string +} + +func (mc *MessagingClient) NewPublisher(publisherId, namespace, topic string) (*Publisher, error) { + // read topic configuration + topicConfiguration := &messaging_pb.TopicConfiguration{ + PartitionCount: 4, + } + publishClients := make([]messaging_pb.SeaweedMessaging_PublishClient, topicConfiguration.PartitionCount) + for i := 0; i < int(topicConfiguration.PartitionCount); i++ { + tp := broker.TopicPartition{ + Namespace: namespace, + Topic: topic, + Partition: int32(i), + } + grpcClientConn, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + client, err := setupPublisherClient(grpcClientConn, tp) + if err != nil { + return nil, err + } + publishClients[i] = client + } + return &Publisher{ + publishClients: publishClients, + topicConfiguration: topicConfiguration, + }, nil +} + +func setupPublisherClient(grpcConnection *grpc.ClientConn, tp broker.TopicPartition) (messaging_pb.SeaweedMessaging_PublishClient, error) { + + stream, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).Publish(context.Background()) + if err != nil { + return nil, err + } + + // send init message + err = stream.Send(&messaging_pb.PublishRequest{ + Init: &messaging_pb.PublishRequest_InitMessage{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Partition: tp.Partition, + }, + }) + if err != nil { + return nil, err + } + + // process init response + initResponse, err := stream.Recv() + if err != nil { + return nil, err + } + if initResponse.Redirect != nil { + // TODO follow redirection + } + if initResponse.Config != nil { + } + + // setup looks for control messages + doneChan := make(chan error, 1) + go func() { + for { + in, err := stream.Recv() + if err != nil { + doneChan <- err + return + } + if in.Redirect != nil { + } + if in.Config != nil { + } + } + }() + + return stream, nil + +} + +func (p *Publisher) Publish(m *messaging_pb.Message) error { + hashValue := p.messageCount + p.messageCount++ + if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_NonNullKeyHash { + if m.Key != nil { + hashValue = xxhash.Checksum64(m.Key) + } + } else if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_KeyHash { + hashValue = xxhash.Checksum64(m.Key) + } else { + // round robin + } + + idx := int(hashValue) % len(p.publishClients) + if idx < 0 { + idx += len(p.publishClients) + } + return p.publishClients[idx].Send(&messaging_pb.PublishRequest{ + Data: m, + }) +} diff --git a/weed/messaging/msgclient/subscriber.go b/weed/messaging/msgclient/subscriber.go new file mode 100644 index 000000000..6c7dc1ab7 --- /dev/null +++ b/weed/messaging/msgclient/subscriber.go @@ -0,0 +1,120 @@ +package msgclient + +import ( + "context" + "io" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "google.golang.org/grpc" +) + +type Subscriber struct { + subscriberClients []messaging_pb.SeaweedMessaging_SubscribeClient + subscriberCancels []context.CancelFunc + subscriberId string +} + +func (mc *MessagingClient) NewSubscriber(subscriberId, namespace, topic string, partitionId int, startTime time.Time) (*Subscriber, error) { + // read topic configuration + topicConfiguration := &messaging_pb.TopicConfiguration{ + PartitionCount: 4, + } + subscriberClients := make([]messaging_pb.SeaweedMessaging_SubscribeClient, topicConfiguration.PartitionCount) + subscriberCancels := make([]context.CancelFunc, topicConfiguration.PartitionCount) + + for i := 0; i < int(topicConfiguration.PartitionCount); i++ { + if partitionId >= 0 && i != partitionId { + continue + } + tp := broker.TopicPartition{ + Namespace: namespace, + Topic: topic, + Partition: int32(i), + } + grpcClientConn, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + client, err := setupSubscriberClient(ctx, grpcClientConn, tp, subscriberId, startTime) + if err != nil { + return nil, err + } + subscriberClients[i] = client + subscriberCancels[i] = cancel + } + + return &Subscriber{ + subscriberClients: subscriberClients, + subscriberCancels: subscriberCancels, + subscriberId: subscriberId, + }, nil +} + +func setupSubscriberClient(ctx context.Context, grpcConnection *grpc.ClientConn, tp broker.TopicPartition, subscriberId string, startTime time.Time) (stream messaging_pb.SeaweedMessaging_SubscribeClient, err error) { + stream, err = messaging_pb.NewSeaweedMessagingClient(grpcConnection).Subscribe(ctx) + if err != nil { + return + } + + // send init message + err = stream.Send(&messaging_pb.SubscriberMessage{ + Init: &messaging_pb.SubscriberMessage_InitMessage{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Partition: tp.Partition, + StartPosition: messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP, + TimestampNs: startTime.UnixNano(), + SubscriberId: subscriberId, + }, + }) + if err != nil { + return + } + + return stream, nil +} + +func doSubscribe(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient, processFn func(m *messaging_pb.Message)) error { + for { + resp, listenErr := subscriberClient.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + println(listenErr.Error()) + return listenErr + } + if resp.Data == nil { + // this could be heartbeat from broker + continue + } + processFn(resp.Data) + } +} + +// Subscribe starts goroutines to process the messages +func (s *Subscriber) Subscribe(processFn func(m *messaging_pb.Message)) { + var wg sync.WaitGroup + for i := 0; i < len(s.subscriberClients); i++ { + if s.subscriberClients[i] != nil { + wg.Add(1) + go func(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient) { + defer wg.Done() + doSubscribe(subscriberClient, processFn) + }(s.subscriberClients[i]) + } + } + wg.Wait() +} + +func (s *Subscriber) Shutdown() { + for i := 0; i < len(s.subscriberClients); i++ { + if s.subscriberCancels[i] != nil { + s.subscriberCancels[i]() + } + } +} diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go index c1af7f27a..d881049dd 100644 --- a/weed/notification/aws_sqs/aws_sqs_pub.go +++ b/weed/notification/aws_sqs/aws_sqs_pub.go @@ -27,24 +27,24 @@ func (k *AwsSqsPub) GetName() string { return "aws_sqs" } -func (k *AwsSqsPub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } -func (k *AwsSqsPub) initialize(awsAccessKeyId, aswSecretAccessKey, region, queueName string) (err error) { +func (k *AwsSqsPub) initialize(awsAccessKeyId, awsSecretAccessKey, region, queueName string) (err error) { config := &aws.Config{ Region: aws.String(region), } - if awsAccessKeyId != "" && aswSecretAccessKey != "" { - config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, aswSecretAccessKey, "") + if awsAccessKeyId != "" && awsSecretAccessKey != "" { + config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") } sess, err := session.NewSession(config) diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go index 7f8765cc3..36211692c 100644 --- a/weed/notification/configuration.go +++ b/weed/notification/configuration.go @@ -11,7 +11,7 @@ type MessageQueue interface { // GetName gets the name to locate the configuration in filer.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error SendMessage(key string, message proto.Message) error } @@ -21,7 +21,7 @@ var ( Queue MessageQueue ) -func LoadConfiguration(config *viper.Viper) { +func LoadConfiguration(config *viper.Viper, prefix string) { if config == nil { return @@ -30,9 +30,8 @@ func LoadConfiguration(config *viper.Viper) { validateOneEnabledQueue(config) for _, queue := range MessageQueues { - if config.GetBool(queue.GetName() + ".enabled") { - viperSub := config.Sub(queue.GetName()) - if err := queue.Initialize(viperSub); err != nil { + if config.GetBool(prefix + queue.GetName() + ".enabled") { + if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification for %s: %+v", queue.GetName(), err) } diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go new file mode 100644 index 000000000..1ae102509 --- /dev/null +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -0,0 +1,71 @@ +// Package gocdk_pub_sub supports the Go CDK (Cloud Development Kit) PubSub API, +// which in turn supports many providers, including Amazon SNS/SQS, Azure Service Bus, +// Google Cloud PubSub, and RabbitMQ. +// +// In the config, select a provider and topic using a URL. See +// https://godoc.org/gocloud.dev/pubsub and its sub-packages for details. +// +// The Go CDK PubSub API does not support administrative operations like topic +// creation. Create the topic using a UI, CLI or provider-specific API before running +// weed. +// +// The Go CDK obtains credentials via environment variables and other +// provider-specific default mechanisms. See the provider's documentation for +// details. +package gocdk_pub_sub + +import ( + "context" + "fmt" + + "github.com/golang/protobuf/proto" + "gocloud.dev/pubsub" + _ "gocloud.dev/pubsub/awssnssqs" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/util" + // _ "gocloud.dev/pubsub/azuresb" + _ "gocloud.dev/pubsub/gcppubsub" + _ "gocloud.dev/pubsub/natspubsub" + _ "gocloud.dev/pubsub/rabbitpubsub" +) + +func init() { + notification.MessageQueues = append(notification.MessageQueues, &GoCDKPubSub{}) +} + +type GoCDKPubSub struct { + topicURL string + topic *pubsub.Topic +} + +func (k *GoCDKPubSub) GetName() string { + return "gocdk_pub_sub" +} + +func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error { + k.topicURL = configuration.GetString(prefix + "topic_url") + glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL) + topic, err := pubsub.OpenTopic(context.Background(), k.topicURL) + if err != nil { + glog.Fatalf("Failed to open topic: %v", err) + } + k.topic = topic + return nil +} + +func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error { + bytes, err := proto.Marshal(message) + if err != nil { + return err + } + err = k.topic.Send(context.Background(), &pubsub.Message{ + Body: bytes, + Metadata: map[string]string{"key": key}, + }) + if err != nil { + return fmt.Errorf("send message via Go CDK pubsub %s: %v", k.topicURL, err) + } + return nil +} diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go index 7b26bfe38..363a86eb6 100644 --- a/weed/notification/google_pub_sub/google_pub_sub.go +++ b/weed/notification/google_pub_sub/google_pub_sub.go @@ -25,13 +25,13 @@ func (k *GooglePubSub) GetName() string { return "google_pub_sub" } -func (k *GooglePubSub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go index 830709a51..8d83b5892 100644 --- a/weed/notification/kafka/kafka_queue.go +++ b/weed/notification/kafka/kafka_queue.go @@ -21,12 +21,12 @@ func (k *KafkaQueue) GetName() string { return "kafka" } -func (k *KafkaQueue) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), ) } @@ -76,7 +76,7 @@ func (k *KafkaQueue) handleError() { for { err := <-k.producer.Errors() if err != nil { - glog.Errorf("producer message error, partition:%d offset:%d key:%v valus:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic) + glog.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic) } } } diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go index dcc038dfc..1ca4786a1 100644 --- a/weed/notification/log/log_queue.go +++ b/weed/notification/log/log_queue.go @@ -18,7 +18,7 @@ func (k *LogQueue) GetName() string { return "log" } -func (k *LogQueue) Initialize(configuration util.Configuration) (err error) { +func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (err error) { return nil } diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 00e1caad5..893bf516c 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -3,30 +3,36 @@ package operation import ( "context" "fmt" - "time" + "strings" + + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) type VolumeAssignRequest struct { - Count uint64 - Replication string - Collection string - Ttl string - DataCenter string - Rack string - DataNode string + Count uint64 + Replication string + Collection string + Ttl string + DataCenter string + Rack string + DataNode string + WritableVolumeCount uint32 } type AssignResult struct { - Fid string `json:"fid,omitempty"` - Url string `json:"url,omitempty"` - PublicUrl string `json:"publicUrl,omitempty"` - Count uint64 `json:"count,omitempty"` - Error string `json:"error,omitempty"` + Fid string `json:"fid,omitempty"` + Url string `json:"url,omitempty"` + PublicUrl string `json:"publicUrl,omitempty"` + Count uint64 `json:"count,omitempty"` + Error string `json:"error,omitempty"` + Auth security.EncodedJwt `json:"auth,omitempty"` } -func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { +func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { var requests []*VolumeAssignRequest requests = append(requests, primaryRequest) @@ -40,20 +46,19 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque continue } - lastError = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() + lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.AssignRequest{ - Count: primaryRequest.Count, - Replication: primaryRequest.Replication, - Collection: primaryRequest.Collection, - Ttl: primaryRequest.Ttl, - DataCenter: primaryRequest.DataCenter, - Rack: primaryRequest.Rack, - DataNode: primaryRequest.DataNode, + Count: primaryRequest.Count, + Replication: primaryRequest.Replication, + Collection: primaryRequest.Collection, + Ttl: primaryRequest.Ttl, + DataCenter: primaryRequest.DataCenter, + Rack: primaryRequest.Rack, + DataNode: primaryRequest.DataNode, + WritableVolumeCount: primaryRequest.WritableVolumeCount, } - resp, grpcErr := masterClient.Assign(ctx, req) + resp, grpcErr := masterClient.Assign(context.Background(), req) if grpcErr != nil { return grpcErr } @@ -63,6 +68,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque ret.Url = resp.Url ret.PublicUrl = resp.PublicUrl ret.Error = resp.Error + ret.Auth = security.EncodedJwt(resp.Auth) return nil @@ -81,3 +87,17 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque return ret, lastError } + +func LookupJwt(master string, fileId string) security.EncodedJwt { + + tokenStr := "" + + if h, e := util.Head(fmt.Sprintf("http://%s/dir/lookup?fileId=%s", master, fileId)); e == nil { + bearer := h.Get("Authorization") + if len(bearer) > 7 && strings.ToUpper(bearer[0:6]) == "BEARER" { + tokenStr = bearer[7:] + } + } + + return security.EncodedJwt(tokenStr) +} diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index 9d8267dee..653b7bf13 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -5,11 +5,13 @@ import ( "errors" "fmt" "io" + "io/ioutil" "net/http" "sort" - "sync" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -38,22 +40,23 @@ type ChunkManifest struct { // seekable chunked file reader type ChunkedFileReader struct { - Manifest *ChunkManifest - Master string - pos int64 - pr *io.PipeReader - pw *io.PipeWriter - mutex sync.Mutex + totalSize int64 + chunkList []*ChunkInfo + master string + pos int64 + pr *io.PipeReader + pw *io.PipeWriter + mutex sync.Mutex } func (s ChunkList) Len() int { return len(s) } func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset } func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) { - if isGzipped { +func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error) { + if isCompressed { var err error - if buffer, err = UnGzipData(buffer); err != nil { + if buffer, err = util.DecompressData(buffer); err != nil { return nil, err } } @@ -69,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) { return json.Marshal(cm) } -func (cm *ChunkManifest) DeleteChunks(master string) error { +func (cm *ChunkManifest) DeleteChunks(master string, usePublicUrl bool, grpcDialOption grpc.DialOption) error { var fileIds []string for _, ci := range cm.Chunks { fileIds = append(fileIds, ci.Fid) } - results, err := DeleteFiles(master, fileIds) + results, err := DeleteFiles(master, usePublicUrl, grpcDialOption, fileIds) if err != nil { glog.V(0).Infof("delete %+v: %v", fileIds, err) return fmt.Errorf("chunk delete: %v", err) @@ -102,7 +105,10 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, if err != nil { return written, err } - defer resp.Body.Close() + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() switch resp.StatusCode { case http.StatusRequestedRangeNotSatisfiable: @@ -120,16 +126,29 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, return io.Copy(w, resp.Body) } +func NewChunkedFileReader(chunkList []*ChunkInfo, master string) *ChunkedFileReader { + var totalSize int64 + for _, chunk := range chunkList { + totalSize += chunk.Size + } + sort.Sort(ChunkList(chunkList)) + return &ChunkedFileReader{ + totalSize: totalSize, + chunkList: chunkList, + master: master, + } +} + func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) { var err error switch whence { - case 0: - case 1: + case io.SeekStart: + case io.SeekCurrent: offset += cf.pos - case 2: - offset = cf.Manifest.Size - offset + case io.SeekEnd: + offset = cf.totalSize + offset } - if offset > cf.Manifest.Size { + if offset > cf.totalSize { err = ErrInvalidRange } if cf.pos != offset { @@ -140,10 +159,9 @@ func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) { } func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) { - cm := cf.Manifest chunkIndex := -1 chunkStartOffset := int64(0) - for i, ci := range cm.Chunks { + for i, ci := range cf.chunkList { if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size { chunkIndex = i chunkStartOffset = cf.pos - ci.Offset @@ -153,10 +171,10 @@ func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) { if chunkIndex < 0 { return n, ErrInvalidRange } - for ; chunkIndex < cm.Chunks.Len(); chunkIndex++ { - ci := cm.Chunks[chunkIndex] + for ; chunkIndex < len(cf.chunkList); chunkIndex++ { + ci := cf.chunkList[chunkIndex] // if we need read date from local volume server first? - fileUrl, lookupError := LookupFileId(cf.Master, ci.Fid) + fileUrl, lookupError := LookupFileId(cf.master, ci.Fid) if lookupError != nil { return n, lookupError } diff --git a/weed/operation/compress.go b/weed/operation/compress.go deleted file mode 100644 index 65979d529..000000000 --- a/weed/operation/compress.go +++ /dev/null @@ -1,82 +0,0 @@ -package operation - -import ( - "bytes" - "compress/flate" - "compress/gzip" - "io/ioutil" - "strings" - - "github.com/chrislusf/seaweedfs/weed/glog" - "golang.org/x/tools/godoc/util" -) - -/* -* Default more not to gzip since gzip can be done on client side. - */ -func IsGzippable(ext, mtype string, data []byte) bool { - - // text - if strings.HasPrefix(mtype, "text/") { - return true - } - - // images - switch ext { - case ".svg", ".bmp": - return true - } - if strings.HasPrefix(mtype, "image/") { - return false - } - - // by file name extention - switch ext { - case ".zip", ".rar", ".gz", ".bz2", ".xz": - return false - case ".pdf", ".txt", ".html", ".htm", ".css", ".js", ".json": - return true - case ".php", ".java", ".go", ".rb", ".c", ".cpp", ".h", ".hpp": - return true - case ".png", ".jpg", ".jpeg": - return false - } - - // by mime type - if strings.HasPrefix(mtype, "application/") { - if strings.HasSuffix(mtype, "xml") { - return true - } - if strings.HasSuffix(mtype, "script") { - return true - } - } - - isMostlyText := util.IsText(data) - - return isMostlyText -} - -func GzipData(input []byte) ([]byte, error) { - buf := new(bytes.Buffer) - w, _ := gzip.NewWriterLevel(buf, flate.BestCompression) - if _, err := w.Write(input); err != nil { - glog.V(2).Infoln("error compressing data:", err) - return nil, err - } - if err := w.Close(); err != nil { - glog.V(2).Infoln("error closing compressed data:", err) - return nil, err - } - return buf.Bytes(), nil -} -func UnGzipData(input []byte) ([]byte, error) { - buf := bytes.NewBuffer(input) - r, _ := gzip.NewReader(buf) - defer r.Close() - output, err := ioutil.ReadAll(r) - if err != nil { - glog.V(2).Infoln("error uncompressing data:", err) - } - return output, err -} diff --git a/weed/operation/data_struts.go b/weed/operation/data_struts.go index bfc53aa50..4980f9913 100644 --- a/weed/operation/data_struts.go +++ b/weed/operation/data_struts.go @@ -2,6 +2,5 @@ package operation type JoinResult struct { VolumeSizeLimit uint64 `json:"VolumeSizeLimit,omitempty"` - SecretKey string `json:"secretKey,omitempty"` Error string `json:"error,omitempty"` } diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 3e468e1a3..9868a411d 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -7,7 +7,8 @@ import ( "net/http" "strings" "sync" - "time" + + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) @@ -28,17 +29,25 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) { } // DeleteFiles batch deletes a list of fileIds -func DeleteFiles(master string, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { - - lookupFunc := func(vids []string) (map[string]LookupResult, error) { - return LookupVolumeIds(master, vids) +func DeleteFiles(master string, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { + + lookupFunc := func(vids []string) (results map[string]LookupResult, err error) { + results, err = LookupVolumeIds(master, grpcDialOption, vids) + if err == nil && usePublicUrl { + for _, result := range results { + for _, loc := range result.Locations { + loc.Url = loc.PublicUrl + } + } + } + return } - return DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) + return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) } -func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) { +func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) { var ret []*volume_server_pb.DeleteResult @@ -48,7 +57,7 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin vid, _, err := ParseFileId(fileId) if err != nil { ret = append(ret, &volume_server_pb.DeleteResult{ - FileId: vid, + FileId: fileId, Status: http.StatusBadRequest, Error: err.Error()}, ) @@ -85,38 +94,42 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin } } + resultChan := make(chan []*volume_server_pb.DeleteResult, len(server_to_fileIds)) var wg sync.WaitGroup - for server, fidList := range server_to_fileIds { wg.Add(1) go func(server string, fidList []string) { defer wg.Done() - if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, fidList); deleteErr != nil { + if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList, true); deleteErr != nil { err = deleteErr - } else { - ret = append(ret, deleteResults...) + } else if deleteResults != nil { + resultChan <- deleteResults } }(server, fidList) } wg.Wait() + close(resultChan) + + for result := range resultChan { + ret = append(ret, result...) + } return ret, err } // DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc -func DeleteFilesAtOneVolumeServer(volumeServer string, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { +func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string, includeCookie bool) (ret []*volume_server_pb.DeleteResult, err error) { - err = WithVolumeServerClient(volumeServer, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() + err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { req := &volume_server_pb.BatchDeleteRequest{ - FileIds: fileIds, + FileIds: fileIds, + SkipCookieCheck: !includeCookie, } - resp, err := volumeServerClient.BatchDelete(ctx, req) + resp, err := volumeServerClient.BatchDelete(context.Background(), req) // fmt.Printf("deleted %v %v: %v\n", fileIds, err, resp) diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index d0931a8d3..025a65b38 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -4,31 +4,27 @@ import ( "fmt" "strconv" "strings" - "sync" + + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" ) -var ( - grpcClients = make(map[string]*grpc.ClientConn) - grpcClientsLock sync.Mutex -) - -func WithVolumeServerClient(volumeServer string, fn func(volume_server_pb.VolumeServerClient) error) error { +func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { grpcAddress, err := toVolumeServerGrpcAddress(volumeServer) if err != nil { - return err + return fmt.Errorf("failed to parse volume server %v: %v", volumeServer, err) } - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) return fn(client) - }, grpcAddress) + }, grpcAddress, grpcDialOption) } @@ -42,16 +38,30 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil } -func withMasterServerClient(masterServer string, fn func(masterClient master_pb.SeaweedClient) error) error { +func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer, 0) + masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(masterServer) if parseErr != nil { - return fmt.Errorf("failed to parse master grpc %v", masterServer) + return fmt.Errorf("failed to parse master %v: %v", masterServer, parseErr) } - return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) - }, masterGrpcAddress) + }, masterGrpcAddress, grpcDialOption) + +} + +func WithFilerServerClient(filerServer string, grpcDialOption grpc.DialOption, fn func(masterClient filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(filerServer) + if parseErr != nil { + return fmt.Errorf("failed to parse filer %v: %v", filerGrpcAddress, parseErr) + } + + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, filerGrpcAddress, grpcDialOption) } diff --git a/weed/operation/list_masters.go b/weed/operation/list_masters.go deleted file mode 100644 index 75838de4d..000000000 --- a/weed/operation/list_masters.go +++ /dev/null @@ -1,32 +0,0 @@ -package operation - -import ( - "encoding/json" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" -) - -type ClusterStatusResult struct { - IsLeader bool `json:"IsLeader,omitempty"` - Leader string `json:"Leader,omitempty"` - Peers []string `json:"Peers,omitempty"` -} - -func ListMasters(server string) (leader string, peers []string, err error) { - jsonBlob, err := util.Get("http://" + server + "/cluster/status") - glog.V(2).Info("list masters result :", string(jsonBlob)) - if err != nil { - return "", nil, err - } - var ret ClusterStatusResult - err = json.Unmarshal(jsonBlob, &ret) - if err != nil { - return "", nil, err - } - peers = ret.Peers - if ret.IsLeader { - peers = append(peers, ret.Leader) - } - return ret.Leader, peers, nil -} diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go index 562a11580..d0773e7fd 100644 --- a/weed/operation/lookup.go +++ b/weed/operation/lookup.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "google.golang.org/grpc" "math/rand" "net/url" "strings" @@ -78,7 +79,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) { } // LookupVolumeIds find volume locations by cache and actual lookup -func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, error) { +func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) { ret := make(map[string]LookupResult) var unknown_vids []string @@ -98,14 +99,12 @@ func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, err //only query unknown_vids - err := withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() + err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupVolumeRequest{ VolumeIds: unknown_vids, } - resp, grpcErr := masterClient.LookupVolume(ctx, req) + resp, grpcErr := masterClient.LookupVolume(context.Background(), req) if grpcErr != nil { return grpcErr } diff --git a/weed/operation/needle_parse_test.go b/weed/operation/needle_parse_test.go new file mode 100644 index 000000000..20a610eaa --- /dev/null +++ b/weed/operation/needle_parse_test.go @@ -0,0 +1,130 @@ +package operation + +import ( + "bytes" + "fmt" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type MockClient struct { + needleHandling func(n *needle.Needle, originalSize int, e error) +} + +func (m *MockClient) Do(req *http.Request) (*http.Response, error) { + n, originalSize, err := needle.CreateNeedleFromRequest(req, 1024*1024) + if m.needleHandling != nil { + m.needleHandling(n, originalSize, err) + } + return &http.Response{ + StatusCode: http.StatusNoContent, + }, io.EOF +} + +/* + +The mime type is always the value passed in. + +Compress or not depends on the content detection, file name extension, and compression ratio. + +If the content is already compressed, need to know the content size. + +*/ + +func TestCreateNeedleFromRequest(t *testing.T) { + mc := &MockClient{} + tmp := HttpClient + HttpClient = mc + defer func() { + HttpClient = tmp + }() + + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, true, n.IsCompressed(), "this should be compressed") + assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip") + fmt.Printf("needle: %v, originalSize: %d\n", n, originalSize) + } + uploadResult, err, data := Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader([]byte(textContent)), false, "", nil, "") + if len(data) != len(textContent) { + t.Errorf("data actual %d expected %d", len(data), len(textContent)) + } + if err != nil { + fmt.Printf("err: %v\n", err) + } + fmt.Printf("uploadResult: %+v\n", uploadResult) + } + + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, true, n.IsCompressed(), "this should be compressed") + assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip") + fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize) + } + gzippedData, _ := util.GzipData([]byte(textContent)) + Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(gzippedData), true, "text/plain", nil, "") + } + + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, true, n.IsCompressed(), "this should be compressed") + assert.Equal(t, true, util.IsZstdContent(n.Data), "this should be zstd") + fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize) + } + zstdData, _ := util.ZstdData([]byte(textContent)) + Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(zstdData), true, "text/plain", nil, "") + } + + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "application/zstd", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, false, n.IsCompressed(), "this should not be compressed") + assert.Equal(t, true, util.IsZstdContent(n.Data), "this should still be zstd") + fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize) + } + zstdData, _ := util.ZstdData([]byte(textContent)) + Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(zstdData), false, "application/zstd", nil, "") + } + +} + + +var textContent = `Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +` \ No newline at end of file diff --git a/weed/operation/stats.go b/weed/operation/stats.go deleted file mode 100644 index 364727272..000000000 --- a/weed/operation/stats.go +++ /dev/null @@ -1,28 +0,0 @@ -package operation - -import ( - "context" - "time" - - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" -) - -func Statistics(server string, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { - - err = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() - - grpcResponse, grpcErr := masterClient.Statistics(ctx, req) - if grpcErr != nil { - return grpcErr - } - - resp = grpcResponse - - return nil - - }) - - return -} diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 7a1a3085e..e8bec382a 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -1,7 +1,6 @@ package operation import ( - "bytes" "io" "mime" "net/url" @@ -10,6 +9,8 @@ import ( "strconv" "strings" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" ) @@ -26,6 +27,7 @@ type FilePart struct { Ttl string Server string //this comes from assign result Fid string //this comes from assign result, but customizable + Fsync bool } type SubmitResult struct { @@ -36,10 +38,7 @@ type SubmitResult struct { Error string `json:"error,omitempty"` } -func SubmitFiles(master string, files []FilePart, - replication string, collection string, dataCenter string, ttl string, maxMB int, - secret security.Secret, -) ([]SubmitResult, error) { +func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) { results := make([]SubmitResult, len(files)) for index, file := range files { results[index].FileName = file.FileName @@ -51,9 +50,9 @@ func SubmitFiles(master string, files []FilePart, DataCenter: dataCenter, Ttl: ttl, } - ret, err := Assign(master, ar) + ret, err := Assign(master, grpcDialOption, ar) if err != nil { - for index, _ := range files { + for index := range files { results[index].Error = err.Error() } return results, err @@ -64,10 +63,13 @@ func SubmitFiles(master string, files []FilePart, file.Fid = file.Fid + "_" + strconv.Itoa(index) } file.Server = ret.Url + if usePublicUrl { + file.Server = ret.PublicUrl + } file.Replication = replication file.Collection = collection file.DataCenter = dataCenter - results[index].Size, err = file.Upload(maxMB, master, secret) + results[index].Size, err = file.Upload(maxMB, master, usePublicUrl, ret.Auth, grpcDialOption) if err != nil { results[index].Error = err.Error() } @@ -110,12 +112,14 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) { return ret, nil } -func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (retSize uint32, err error) { - jwt := security.GenJwt(secret, fi.Fid) +func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { fileUrl := "http://" + fi.Server + "/" + fi.Fid if fi.ModTime != 0 { fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime)) } + if fi.Fsync { + fileUrl += "?fsync=true" + } if closer, ok := fi.Reader.(io.Closer); ok { defer closer.Close() } @@ -139,7 +143,7 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret Collection: fi.Collection, Ttl: fi.Ttl, } - ret, err = Assign(master, ar) + ret, err = Assign(master, grpcDialOption, ar) if err != nil { return } @@ -152,10 +156,10 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret Collection: fi.Collection, Ttl: fi.Ttl, } - ret, err = Assign(master, ar) + ret, err = Assign(master, grpcDialOption, ar) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master) + cm.DeleteChunks(master, usePublicUrl, grpcDialOption) return } id = ret.Fid @@ -170,10 +174,10 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret baseName+"-"+strconv.FormatInt(i+1, 10), io.LimitReader(fi.Reader, chunkSize), master, fileUrl, - jwt) + ret.Auth) if e != nil { // delete all uploaded chunks - cm.DeleteChunks(master) + cm.DeleteChunks(master, usePublicUrl, grpcDialOption) return 0, e } cm.Chunks = append(cm.Chunks, @@ -188,10 +192,10 @@ func (fi FilePart) Upload(maxMB int, master string, secret security.Secret) (ret err = upload_chunked_file_manifest(fileUrl, &cm, jwt) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master) + cm.DeleteChunks(master, usePublicUrl, grpcDialOption) } } else { - ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt) + ret, e, _ := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt) if e != nil { return 0, e } @@ -204,8 +208,7 @@ func upload_one_chunk(filename string, reader io.Reader, master, fileUrl string, jwt security.EncodedJwt, ) (size uint32, e error) { glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") - uploadResult, uploadError := Upload(fileUrl, filename, reader, false, - "application/octet-stream", nil, jwt) + uploadResult, uploadError, _ := Upload(fileUrl, filename, false, reader, false, "", nil, jwt) if uploadError != nil { return 0, uploadError } @@ -217,12 +220,11 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s if e != nil { return e } - bufReader := bytes.NewReader(buf) glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...") u, _ := url.Parse(fileUrl) q := u.Query() q.Set("cm", "true") u.RawQuery = q.Encode() - _, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", nil, jwt) + _, e = UploadData(u.String(), manifest.Name, false, buf, false, "application/json", nil, jwt) return e } diff --git a/weed/operation/sync_volume.go b/weed/operation/sync_volume.go index e40c7de41..5562f12ab 100644 --- a/weed/operation/sync_volume.go +++ b/weed/operation/sync_volume.go @@ -2,63 +2,19 @@ package operation import ( "context" - "fmt" - "io" - "time" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - . "github.com/chrislusf/seaweedfs/weed/storage/types" - "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" ) -func GetVolumeSyncStatus(server string, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { +func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { - WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() + WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { - resp, err = client.VolumeSyncStatus(ctx, &volume_server_pb.VolumeSyncStatusRequest{ - VolumdId: vid, + resp, err = client.VolumeSyncStatus(context.Background(), &volume_server_pb.VolumeSyncStatusRequest{ + VolumeId: vid, }) return nil }) return } - -func GetVolumeIdxEntries(server string, vid uint32, eachEntryFn func(key NeedleId, offset Offset, size uint32)) error { - - return WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error { - stream, err := client.VolumeSyncIndex(context.Background(), &volume_server_pb.VolumeSyncIndexRequest{ - VolumdId: vid, - }) - if err != nil { - return err - } - - var indexFileContent []byte - - for { - resp, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - return fmt.Errorf("read index entries: %v", err) - } - indexFileContent = append(indexFileContent, resp.IndexFileContent...) - } - - dataSize := len(indexFileContent) - - for idx := 0; idx+NeedleEntrySize <= dataSize; idx += NeedleEntrySize { - line := indexFileContent[idx : idx+NeedleEntrySize] - key := BytesToNeedleId(line[:NeedleIdSize]) - offset := BytesToOffset(line[NeedleIdSize : NeedleIdSize+OffsetSize]) - size := util.BytesToUint32(line[NeedleIdSize+OffsetSize : NeedleIdSize+OffsetSize+SizeSize]) - eachEntryFn(key, offset, size) - } - - return nil - }) -} diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go new file mode 100644 index 000000000..3cd66b5da --- /dev/null +++ b/weed/operation/tail_volume.go @@ -0,0 +1,83 @@ +package operation + +import ( + "context" + "fmt" + "io" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error { + // find volume location, replication, ttl info + lookup, err := Lookup(master, vid.String()) + if err != nil { + return fmt.Errorf("look up volume %d: %v", vid, err) + } + if len(lookup.Locations) == 0 { + return fmt.Errorf("unable to locate volume %d", vid) + } + + volumeServer := lookup.Locations[0].Url + + return TailVolumeFromSource(volumeServer, grpcDialOption, vid, sinceNs, timeoutSeconds, fn) +} + +func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error { + return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + + stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{ + VolumeId: uint32(vid), + SinceNs: sinceNs, + IdleTimeoutSeconds: uint32(idleTimeoutSeconds), + }) + if err != nil { + return err + } + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + needleHeader := resp.NeedleHeader + needleBody := resp.NeedleBody + + if len(needleHeader) == 0 { + continue + } + + for !resp.IsLastChunk { + resp, recvErr = stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + needleBody = append(needleBody, resp.NeedleBody...) + } + + n := new(needle.Needle) + n.ParseNeedleHeader(needleHeader) + n.ReadNeedleBodyBytes(needleBody, needle.CurrentVersion) + + err = fn(n) + + if err != nil { + return err + } + + } + return nil + }) +} diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index 030bf5889..658588ec3 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -2,6 +2,7 @@ package operation import ( "bytes" + "crypto/md5" "encoding/json" "errors" "fmt" @@ -13,38 +14,166 @@ import ( "net/textproto" "path/filepath" "strings" + "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" ) type UploadResult struct { - Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` - Error string `json:"error,omitempty"` - ETag string `json:"eTag,omitempty"` + Name string `json:"name,omitempty"` + Size uint32 `json:"size,omitempty"` + Error string `json:"error,omitempty"` + ETag string `json:"eTag,omitempty"` + CipherKey []byte `json:"cipherKey,omitempty"` + Mime string `json:"mime,omitempty"` + Gzip uint32 `json:"gzip,omitempty"` + Md5 string `json:"md5,omitempty"` +} + +func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *filer_pb.FileChunk { + return &filer_pb.FileChunk{ + FileId: fileId, + Offset: offset, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, + IsCompressed: uploadResult.Gzip > 0, + } +} + +// HTTPClient interface for testing +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) } var ( - client *http.Client + HttpClient HTTPClient ) func init() { - client = &http.Client{Transport: &http.Transport{ + HttpClient = &http.Client{Transport: &http.Transport{ MaxIdleConnsPerHost: 1024, }} } var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") -// Upload sends a POST request to a volume server to upload the content -func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - return upload_content(uploadUrl, func(w io.Writer) (err error) { - _, err = io.Copy(w, reader) +// Upload sends a POST request to a volume server to upload the content with adjustable compression level +func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) + if uploadResult != nil { + uploadResult.Md5 = util.Md5(data) + } + return +} + +// Upload sends a POST request to a volume server to upload the content with fast compression +func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) { + hash := md5.New() + reader = io.TeeReader(reader, hash) + uploadResult, err, data = doUpload(uploadUrl, filename, cipher, reader, isInputCompressed, mtype, pairMap, jwt) + if uploadResult != nil { + uploadResult.Md5 = fmt.Sprintf("%x", hash.Sum(nil)) + } + return +} + +func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) { + data, err = ioutil.ReadAll(reader) + if err != nil { + err = fmt.Errorf("read input: %v", err) + return + } + uploadResult, uploadErr := doUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) + return uploadResult, uploadErr, data +} + +func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + contentIsGzipped := isInputCompressed + shouldGzipNow := false + if !isInputCompressed { + if mtype == "" { + mtype = http.DetectContentType(data) + // println("detect1 mimetype to", mtype) + if mtype == "application/octet-stream" { + mtype = "" + } + } + if shouldBeCompressed, iAmSure := util.IsCompressableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeCompressed { + shouldGzipNow = true + } else if !iAmSure && mtype == "" && len(data) > 128 { + var compressed []byte + compressed, err = util.GzipData(data[0:128]) + shouldGzipNow = len(compressed)*10 < 128*9 // can not compress to less than 90% + } + } + + var clearDataLen int + + // gzip if possible + // this could be double copying + clearDataLen = len(data) + if shouldGzipNow { + compressed, compressErr := util.GzipData(data) + // fmt.Printf("data is compressed from %d ==> %d\n", len(data), len(compressed)) + if compressErr == nil { + data = compressed + contentIsGzipped = true + } + } else if isInputCompressed { + // just to get the clear data length + clearData, err := util.DecompressData(data) + if err == nil { + clearDataLen = len(clearData) + } + } + + if cipher { + // encrypt(gzip(data)) + + // encrypt + cipherKey := util.GenCipherKey() + encryptedData, encryptionErr := util.Encrypt(data, cipherKey) + if encryptionErr != nil { + err = fmt.Errorf("encrypt input: %v", encryptionErr) + return + } + + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(encryptedData) + return + }, "", false, len(encryptedData), "", nil, jwt) + if uploadResult != nil { + uploadResult.Name = filename + uploadResult.Mime = mtype + uploadResult.CipherKey = cipherKey + } + } else { + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(data) + return + }, filename, contentIsGzipped, 0, mtype, pairMap, jwt) + } + + if uploadResult == nil { return - }, filename, isGzipped, mtype, pairMap, jwt) + } + + uploadResult.Size = uint32(clearDataLen) + if contentIsGzipped { + uploadResult.Gzip = 1 + } + + return uploadResult, err } -func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { + +func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, originalDataSize int, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { body_buf := bytes.NewBufferString("") body_writer := multipart.NewWriter(body_buf) h := make(textproto.MIMEHeader) @@ -58,9 +187,6 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error if isGzipped { h.Set("Content-Encoding", "gzip") } - if jwt != "" { - h.Set("Authorization", "BEARER "+string(jwt)) - } file_writer, cp_err := body_writer.CreatePart(h) if cp_err != nil { @@ -86,24 +212,26 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error for k, v := range pairMap { req.Header.Set(k, v) } - resp, post_err := client.Do(req) + if jwt != "" { + req.Header.Set("Authorization", "BEARER "+string(jwt)) + } + resp, post_err := HttpClient.Do(req) if post_err != nil { glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error()) return nil, post_err } defer resp.Body.Close() - if resp.StatusCode < http.StatusOK || - resp.StatusCode > http.StatusIMUsed { - return nil, errors.New(http.StatusText(resp.StatusCode)) - } - + var ret UploadResult etag := getEtag(resp) + if resp.StatusCode == http.StatusNoContent { + ret.ETag = etag + return &ret, nil + } resp_body, ra_err := ioutil.ReadAll(resp.Body) if ra_err != nil { return nil, ra_err } - var ret UploadResult unmarshal_err := json.Unmarshal(resp_body, &ret) if unmarshal_err != nil { glog.V(0).Infoln("failing to read upload response", uploadUrl, string(resp_body)) diff --git a/weed/pb/Makefile b/weed/pb/Makefile index c50410574..d2618937b 100644 --- a/weed/pb/Makefile +++ b/weed/pb/Makefile @@ -3,8 +3,10 @@ all: gen .PHONY : gen gen: - protoc master.proto --go_out=plugins=grpc:./master_pb - protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb - protoc filer.proto --go_out=plugins=grpc:./filer_pb + protoc master.proto --go_out=plugins=grpc:./master_pb --go_opt=paths=source_relative + protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb --go_opt=paths=source_relative + protoc filer.proto --go_out=plugins=grpc:./filer_pb --go_opt=paths=source_relative + protoc iam.proto --go_out=plugins=grpc:./iam_pb --go_opt=paths=source_relative + protoc messaging.proto --go_out=plugins=grpc:./messaging_pb --go_opt=paths=source_relative # protoc filer.proto --java_out=../../other/java/client/src/main/java cp filer.proto ../../other/java/client/src/main/proto diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index 6cd4df6b4..37121f29c 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package filer_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "FilerProto"; @@ -12,7 +13,7 @@ service SeaweedFiler { rpc LookupDirectoryEntry (LookupDirectoryEntryRequest) returns (LookupDirectoryEntryResponse) { } - rpc ListEntries (ListEntriesRequest) returns (ListEntriesResponse) { + rpc ListEntries (ListEntriesRequest) returns (stream ListEntriesResponse) { } rpc CreateEntry (CreateEntryRequest) returns (CreateEntryResponse) { @@ -21,9 +22,15 @@ service SeaweedFiler { rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) { } + rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) { + } + rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } + rpc AtomicRenameEntry (AtomicRenameEntryRequest) returns (AtomicRenameEntryResponse) { + } + rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { } @@ -36,6 +43,21 @@ service SeaweedFiler { rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { } + rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { + } + + rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { + } + + rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { + } + + rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) { + } + + rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) { + } + } ////////////////////////////////////////////////// @@ -58,7 +80,7 @@ message ListEntriesRequest { } message ListEntriesResponse { - repeated Entry entries = 1; + Entry entry = 1; } message Entry { @@ -69,19 +91,36 @@ message Entry { map extended = 5; } +message FullEntry { + string dir = 1; + Entry entry = 2; +} + message EventNotification { Entry old_entry = 1; Entry new_entry = 2; bool delete_chunks = 3; + string new_parent_path = 4; + bool is_from_other_cluster = 5; } message FileChunk { - string file_id = 1; + string file_id = 1; // to be deprecated int64 offset = 2; uint64 size = 3; int64 mtime = 4; string e_tag = 5; - string source_file_id = 6; + string source_file_id = 6; // to be deprecated + FileId fid = 7; + FileId source_fid = 8; + bytes cipher_key = 9; + bool is_compressed = 10; +} + +message FileId { + uint32 volume_id = 1; + uint64 file_key = 2; + fixed32 cookie = 3; } message FuseAttributes { @@ -98,32 +137,58 @@ message FuseAttributes { string user_name = 11; // for hdfs repeated string group_name = 12; // for hdfs string symlink_target = 13; + bytes md5 = 14; } message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; + bool is_from_other_cluster = 4; } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { string directory = 1; Entry entry = 2; + bool is_from_other_cluster = 3; } message UpdateEntryResponse { } +message AppendToEntryRequest { + string directory = 1; + string entry_name = 2; + repeated FileChunk chunks = 3; +} +message AppendToEntryResponse { +} + message DeleteEntryRequest { string directory = 1; string name = 2; // bool is_directory = 3; bool is_delete_data = 4; bool is_recursive = 5; + bool ignore_recursive_error = 6; + bool is_from_other_cluster = 7; } message DeleteEntryResponse { + string error = 1; +} + +message AtomicRenameEntryRequest { + string old_directory = 1; + string old_name = 2; + string new_directory = 3; + string new_name = 4; +} + +message AtomicRenameEntryResponse { } message AssignVolumeRequest { @@ -132,6 +197,7 @@ message AssignVolumeRequest { string replication = 3; int32 ttl_sec = 4; string data_center = 5; + string parent_path = 6; } message AssignVolumeResponse { @@ -139,6 +205,10 @@ message AssignVolumeResponse { string url = 2; string public_url = 3; int32 count = 4; + string auth = 5; + string collection = 6; + string replication = 7; + string error = 8; } message LookupVolumeRequest { @@ -177,3 +247,53 @@ message StatisticsResponse { uint64 used_size = 5; uint64 file_count = 6; } + +message GetFilerConfigurationRequest { +} +message GetFilerConfigurationResponse { + repeated string masters = 1; + string replication = 2; + string collection = 3; + uint32 max_mb = 4; + string dir_buckets = 5; + bool cipher = 7; +} + +message SubscribeMetadataRequest { + string client_name = 1; + string path_prefix = 2; + int64 since_ns = 3; +} +message SubscribeMetadataResponse { + string directory = 1; + EventNotification event_notification = 2; + int64 ts_ns = 3; +} + +message LogEntry { + int64 ts_ns = 1; + int32 partition_key_hash = 2; + bytes data = 3; +} + +message KeepConnectedRequest { + string name = 1; + uint32 grpc_port = 2; + repeated string resources = 3; +} +message KeepConnectedResponse { +} + +message LocateBrokerRequest { + string resource = 1; +} +message LocateBrokerResponse { + bool found = 1; + // if found, send the exact address + // if not found, send the full list of existing brokers + message Resource { + string grpc_addresses = 1; + int32 resource_count = 2; + } + repeated Resource resources = 2; +} diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index 6b4a27c0a..456a31a1d 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -1,876 +1,3686 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.24.0 +// protoc v3.12.3 // source: filer.proto -// DO NOT EDIT! - -/* -Package filer_pb is a generated protocol buffer package. - -It is generated from these files: - filer.proto - -It has these top-level messages: - LookupDirectoryEntryRequest - LookupDirectoryEntryResponse - ListEntriesRequest - ListEntriesResponse - Entry - EventNotification - FileChunk - FuseAttributes - CreateEntryRequest - CreateEntryResponse - UpdateEntryRequest - UpdateEntryResponse - DeleteEntryRequest - DeleteEntryResponse - AssignVolumeRequest - AssignVolumeResponse - LookupVolumeRequest - Locations - Location - LookupVolumeResponse - DeleteCollectionRequest - DeleteCollectionResponse - StatisticsRequest - StatisticsResponse -*/ -package filer_pb -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package filer_pb import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type LookupDirectoryEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *LookupDirectoryEntryRequest) Reset() { + *x = LookupDirectoryEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupDirectoryEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupDirectoryEntryRequest) ProtoMessage() {} + +func (x *LookupDirectoryEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LookupDirectoryEntryRequest) Reset() { *m = LookupDirectoryEntryRequest{} } -func (m *LookupDirectoryEntryRequest) String() string { return proto.CompactTextString(m) } -func (*LookupDirectoryEntryRequest) ProtoMessage() {} -func (*LookupDirectoryEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +// Deprecated: Use LookupDirectoryEntryRequest.ProtoReflect.Descriptor instead. +func (*LookupDirectoryEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{0} +} -func (m *LookupDirectoryEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *LookupDirectoryEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *LookupDirectoryEntryRequest) GetName() string { - if m != nil { - return m.Name +func (x *LookupDirectoryEntryRequest) GetName() string { + if x != nil { + return x.Name } return "" } type LookupDirectoryEntryResponse struct { - Entry *Entry `protobuf:"bytes,1,opt,name=entry" json:"entry,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *LookupDirectoryEntryResponse) Reset() { + *x = LookupDirectoryEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupDirectoryEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupDirectoryEntryResponse) ProtoMessage() {} + +func (x *LookupDirectoryEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LookupDirectoryEntryResponse) Reset() { *m = LookupDirectoryEntryResponse{} } -func (m *LookupDirectoryEntryResponse) String() string { return proto.CompactTextString(m) } -func (*LookupDirectoryEntryResponse) ProtoMessage() {} -func (*LookupDirectoryEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +// Deprecated: Use LookupDirectoryEntryResponse.ProtoReflect.Descriptor instead. +func (*LookupDirectoryEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{1} +} -func (m *LookupDirectoryEntryResponse) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *LookupDirectoryEntryResponse) GetEntry() *Entry { + if x != nil { + return x.Entry } return nil } type ListEntriesRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Prefix string `protobuf:"bytes,2,opt,name=prefix" json:"prefix,omitempty"` - StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName" json:"startFromFileName,omitempty"` - InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom" json:"inclusiveStartFrom,omitempty"` - Limit uint32 `protobuf:"varint,5,opt,name=limit" json:"limit,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"` + StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName,proto3" json:"startFromFileName,omitempty"` + InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom,proto3" json:"inclusiveStartFrom,omitempty"` + Limit uint32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *ListEntriesRequest) Reset() { + *x = ListEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListEntriesRequest) Reset() { *m = ListEntriesRequest{} } -func (m *ListEntriesRequest) String() string { return proto.CompactTextString(m) } -func (*ListEntriesRequest) ProtoMessage() {} -func (*ListEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*ListEntriesRequest) ProtoMessage() {} -func (m *ListEntriesRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *ListEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEntriesRequest.ProtoReflect.Descriptor instead. +func (*ListEntriesRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{2} +} + +func (x *ListEntriesRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *ListEntriesRequest) GetPrefix() string { - if m != nil { - return m.Prefix +func (x *ListEntriesRequest) GetPrefix() string { + if x != nil { + return x.Prefix } return "" } -func (m *ListEntriesRequest) GetStartFromFileName() string { - if m != nil { - return m.StartFromFileName +func (x *ListEntriesRequest) GetStartFromFileName() string { + if x != nil { + return x.StartFromFileName } return "" } -func (m *ListEntriesRequest) GetInclusiveStartFrom() bool { - if m != nil { - return m.InclusiveStartFrom +func (x *ListEntriesRequest) GetInclusiveStartFrom() bool { + if x != nil { + return x.InclusiveStartFrom } return false } -func (m *ListEntriesRequest) GetLimit() uint32 { - if m != nil { - return m.Limit +func (x *ListEntriesRequest) GetLimit() uint32 { + if x != nil { + return x.Limit } return 0 } type ListEntriesResponse struct { - Entries []*Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *ListEntriesResponse) Reset() { + *x = ListEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesResponse) ProtoMessage() {} + +func (x *ListEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *ListEntriesResponse) Reset() { *m = ListEntriesResponse{} } -func (m *ListEntriesResponse) String() string { return proto.CompactTextString(m) } -func (*ListEntriesResponse) ProtoMessage() {} -func (*ListEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +// Deprecated: Use ListEntriesResponse.ProtoReflect.Descriptor instead. +func (*ListEntriesResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{3} +} -func (m *ListEntriesResponse) GetEntries() []*Entry { - if m != nil { - return m.Entries +func (x *ListEntriesResponse) GetEntry() *Entry { + if x != nil { + return x.Entry } return nil } type Entry struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"` - Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"` - Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes" json:"attributes,omitempty"` - Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory,proto3" json:"is_directory,omitempty"` + Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` + Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended,proto3" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *Entry) Reset() { *m = Entry{} } -func (m *Entry) String() string { return proto.CompactTextString(m) } -func (*Entry) ProtoMessage() {} -func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (x *Entry) Reset() { + *x = Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entry) ProtoMessage() {} + +func (x *Entry) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -func (m *Entry) GetName() string { - if m != nil { - return m.Name +// Deprecated: Use Entry.ProtoReflect.Descriptor instead. +func (*Entry) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{4} +} + +func (x *Entry) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Entry) GetIsDirectory() bool { - if m != nil { - return m.IsDirectory +func (x *Entry) GetIsDirectory() bool { + if x != nil { + return x.IsDirectory } return false } -func (m *Entry) GetChunks() []*FileChunk { - if m != nil { - return m.Chunks +func (x *Entry) GetChunks() []*FileChunk { + if x != nil { + return x.Chunks + } + return nil +} + +func (x *Entry) GetAttributes() *FuseAttributes { + if x != nil { + return x.Attributes } return nil } -func (m *Entry) GetAttributes() *FuseAttributes { - if m != nil { - return m.Attributes +func (x *Entry) GetExtended() map[string][]byte { + if x != nil { + return x.Extended } return nil } -func (m *Entry) GetExtended() map[string][]byte { - if m != nil { - return m.Extended +type FullEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *FullEntry) Reset() { + *x = FullEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FullEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FullEntry) ProtoMessage() {} + +func (x *FullEntry) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FullEntry.ProtoReflect.Descriptor instead. +func (*FullEntry) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{5} +} + +func (x *FullEntry) GetDir() string { + if x != nil { + return x.Dir + } + return "" +} + +func (x *FullEntry) GetEntry() *Entry { + if x != nil { + return x.Entry } return nil } type EventNotification struct { - OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"` - NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"` - DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry,proto3" json:"old_entry,omitempty"` + NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry,proto3" json:"new_entry,omitempty"` + DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks,proto3" json:"delete_chunks,omitempty"` + NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath,proto3" json:"new_parent_path,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,5,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` } -func (m *EventNotification) Reset() { *m = EventNotification{} } -func (m *EventNotification) String() string { return proto.CompactTextString(m) } -func (*EventNotification) ProtoMessage() {} -func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (x *EventNotification) Reset() { + *x = EventNotification{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventNotification) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventNotification) ProtoMessage() {} + +func (x *EventNotification) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -func (m *EventNotification) GetOldEntry() *Entry { - if m != nil { - return m.OldEntry +// Deprecated: Use EventNotification.ProtoReflect.Descriptor instead. +func (*EventNotification) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{6} +} + +func (x *EventNotification) GetOldEntry() *Entry { + if x != nil { + return x.OldEntry } return nil } -func (m *EventNotification) GetNewEntry() *Entry { - if m != nil { - return m.NewEntry +func (x *EventNotification) GetNewEntry() *Entry { + if x != nil { + return x.NewEntry } return nil } -func (m *EventNotification) GetDeleteChunks() bool { - if m != nil { - return m.DeleteChunks +func (x *EventNotification) GetDeleteChunks() bool { + if x != nil { + return x.DeleteChunks + } + return false +} + +func (x *EventNotification) GetNewParentPath() string { + if x != nil { + return x.NewParentPath + } + return "" +} + +func (x *EventNotification) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster } return false } type FileChunk struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"` - Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` - Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"` - ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag" json:"e_tag,omitempty"` - SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` // to be deprecated + Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` + Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Mtime int64 `protobuf:"varint,4,opt,name=mtime,proto3" json:"mtime,omitempty"` + ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"` + SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId,proto3" json:"source_file_id,omitempty"` // to be deprecated + Fid *FileId `protobuf:"bytes,7,opt,name=fid,proto3" json:"fid,omitempty"` + SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid,proto3" json:"source_fid,omitempty"` + CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"` + IsCompressed bool `protobuf:"varint,10,opt,name=is_compressed,json=isCompressed,proto3" json:"is_compressed,omitempty"` +} + +func (x *FileChunk) Reset() { + *x = FileChunk{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileChunk) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileChunk) ProtoMessage() {} + +func (x *FileChunk) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *FileChunk) Reset() { *m = FileChunk{} } -func (m *FileChunk) String() string { return proto.CompactTextString(m) } -func (*FileChunk) ProtoMessage() {} -func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +// Deprecated: Use FileChunk.ProtoReflect.Descriptor instead. +func (*FileChunk) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{7} +} -func (m *FileChunk) GetFileId() string { - if m != nil { - return m.FileId +func (x *FileChunk) GetFileId() string { + if x != nil { + return x.FileId } return "" } -func (m *FileChunk) GetOffset() int64 { - if m != nil { - return m.Offset +func (x *FileChunk) GetOffset() int64 { + if x != nil { + return x.Offset } return 0 } -func (m *FileChunk) GetSize() uint64 { - if m != nil { - return m.Size +func (x *FileChunk) GetSize() uint64 { + if x != nil { + return x.Size } return 0 } -func (m *FileChunk) GetMtime() int64 { - if m != nil { - return m.Mtime +func (x *FileChunk) GetMtime() int64 { + if x != nil { + return x.Mtime } return 0 } -func (m *FileChunk) GetETag() string { - if m != nil { - return m.ETag +func (x *FileChunk) GetETag() string { + if x != nil { + return x.ETag } return "" } -func (m *FileChunk) GetSourceFileId() string { - if m != nil { - return m.SourceFileId +func (x *FileChunk) GetSourceFileId() string { + if x != nil { + return x.SourceFileId } return "" } +func (x *FileChunk) GetFid() *FileId { + if x != nil { + return x.Fid + } + return nil +} + +func (x *FileChunk) GetSourceFid() *FileId { + if x != nil { + return x.SourceFid + } + return nil +} + +func (x *FileChunk) GetCipherKey() []byte { + if x != nil { + return x.CipherKey + } + return nil +} + +func (x *FileChunk) GetIsCompressed() bool { + if x != nil { + return x.IsCompressed + } + return false +} + +type FileId struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie,proto3" json:"cookie,omitempty"` +} + +func (x *FileId) Reset() { + *x = FileId{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileId) ProtoMessage() {} + +func (x *FileId) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileId.ProtoReflect.Descriptor instead. +func (*FileId) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{8} +} + +func (x *FileId) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *FileId) GetFileKey() uint64 { + if x != nil { + return x.FileKey + } + return 0 +} + +func (x *FileId) GetCookie() uint32 { + if x != nil { + return x.Cookie + } + return 0 +} + type FuseAttributes struct { - FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` - Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"` - FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode" json:"file_mode,omitempty"` - Uid uint32 `protobuf:"varint,4,opt,name=uid" json:"uid,omitempty"` - Gid uint32 `protobuf:"varint,5,opt,name=gid" json:"gid,omitempty"` - Crtime int64 `protobuf:"varint,6,opt,name=crtime" json:"crtime,omitempty"` - Mime string `protobuf:"bytes,7,opt,name=mime" json:"mime,omitempty"` - Replication string `protobuf:"bytes,8,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,9,opt,name=collection" json:"collection,omitempty"` - TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"` - UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName" json:"user_name,omitempty"` - GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName" json:"group_name,omitempty"` - SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget" json:"symlink_target,omitempty"` -} - -func (m *FuseAttributes) Reset() { *m = FuseAttributes{} } -func (m *FuseAttributes) String() string { return proto.CompactTextString(m) } -func (*FuseAttributes) ProtoMessage() {} -func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *FuseAttributes) GetFileSize() uint64 { - if m != nil { - return m.FileSize + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` + Mtime int64 `protobuf:"varint,2,opt,name=mtime,proto3" json:"mtime,omitempty"` // unix time in seconds + FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode,proto3" json:"file_mode,omitempty"` + Uid uint32 `protobuf:"varint,4,opt,name=uid,proto3" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,5,opt,name=gid,proto3" json:"gid,omitempty"` + Crtime int64 `protobuf:"varint,6,opt,name=crtime,proto3" json:"crtime,omitempty"` // unix time in seconds + Mime string `protobuf:"bytes,7,opt,name=mime,proto3" json:"mime,omitempty"` + Replication string `protobuf:"bytes,8,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,9,opt,name=collection,proto3" json:"collection,omitempty"` + TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"` + UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` // for hdfs + GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName,proto3" json:"group_name,omitempty"` // for hdfs + SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlink_target,omitempty"` + Md5 []byte `protobuf:"bytes,14,opt,name=md5,proto3" json:"md5,omitempty"` +} + +func (x *FuseAttributes) Reset() { + *x = FuseAttributes{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FuseAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FuseAttributes) ProtoMessage() {} + +func (x *FuseAttributes) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FuseAttributes.ProtoReflect.Descriptor instead. +func (*FuseAttributes) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{9} +} + +func (x *FuseAttributes) GetFileSize() uint64 { + if x != nil { + return x.FileSize } return 0 } -func (m *FuseAttributes) GetMtime() int64 { - if m != nil { - return m.Mtime +func (x *FuseAttributes) GetMtime() int64 { + if x != nil { + return x.Mtime } return 0 } -func (m *FuseAttributes) GetFileMode() uint32 { - if m != nil { - return m.FileMode +func (x *FuseAttributes) GetFileMode() uint32 { + if x != nil { + return x.FileMode } return 0 } -func (m *FuseAttributes) GetUid() uint32 { - if m != nil { - return m.Uid +func (x *FuseAttributes) GetUid() uint32 { + if x != nil { + return x.Uid } return 0 } -func (m *FuseAttributes) GetGid() uint32 { - if m != nil { - return m.Gid +func (x *FuseAttributes) GetGid() uint32 { + if x != nil { + return x.Gid } return 0 } -func (m *FuseAttributes) GetCrtime() int64 { - if m != nil { - return m.Crtime +func (x *FuseAttributes) GetCrtime() int64 { + if x != nil { + return x.Crtime } return 0 } -func (m *FuseAttributes) GetMime() string { - if m != nil { - return m.Mime +func (x *FuseAttributes) GetMime() string { + if x != nil { + return x.Mime } return "" } -func (m *FuseAttributes) GetReplication() string { - if m != nil { - return m.Replication +func (x *FuseAttributes) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *FuseAttributes) GetCollection() string { - if m != nil { - return m.Collection +func (x *FuseAttributes) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *FuseAttributes) GetTtlSec() int32 { - if m != nil { - return m.TtlSec +func (x *FuseAttributes) GetTtlSec() int32 { + if x != nil { + return x.TtlSec } return 0 } -func (m *FuseAttributes) GetUserName() string { - if m != nil { - return m.UserName +func (x *FuseAttributes) GetUserName() string { + if x != nil { + return x.UserName } return "" } -func (m *FuseAttributes) GetGroupName() []string { - if m != nil { - return m.GroupName +func (x *FuseAttributes) GetGroupName() []string { + if x != nil { + return x.GroupName } return nil } -func (m *FuseAttributes) GetSymlinkTarget() string { - if m != nil { - return m.SymlinkTarget +func (x *FuseAttributes) GetSymlinkTarget() string { + if x != nil { + return x.SymlinkTarget } return "" } +func (x *FuseAttributes) GetMd5() []byte { + if x != nil { + return x.Md5 + } + return nil +} + type CreateEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` + OExcl bool `protobuf:"varint,3,opt,name=o_excl,json=oExcl,proto3" json:"o_excl,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,4,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` +} + +func (x *CreateEntryRequest) Reset() { + *x = CreateEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateEntryRequest) ProtoMessage() {} + +func (x *CreateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} } -func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) } -func (*CreateEntryRequest) ProtoMessage() {} -func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +// Deprecated: Use CreateEntryRequest.ProtoReflect.Descriptor instead. +func (*CreateEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{10} +} -func (m *CreateEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *CreateEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *CreateEntryRequest) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *CreateEntryRequest) GetEntry() *Entry { + if x != nil { + return x.Entry } return nil } +func (x *CreateEntryRequest) GetOExcl() bool { + if x != nil { + return x.OExcl + } + return false +} + +func (x *CreateEntryRequest) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster + } + return false +} + type CreateEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } -func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} } -func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) } -func (*CreateEntryResponse) ProtoMessage() {} -func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (x *CreateEntryResponse) Reset() { + *x = CreateEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type UpdateEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` +func (x *CreateEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} } -func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) } -func (*UpdateEntryRequest) ProtoMessage() {} -func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*CreateEntryResponse) ProtoMessage() {} -func (m *UpdateEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *CreateEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use CreateEntryResponse.ProtoReflect.Descriptor instead. +func (*CreateEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{11} } -func (m *UpdateEntryRequest) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *CreateEntryResponse) GetError() string { + if x != nil { + return x.Error } - return nil + return "" } -type UpdateEntryResponse struct { +type UpdateEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,3,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` } -func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} } -func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) } -func (*UpdateEntryResponse) ProtoMessage() {} -func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (x *UpdateEntryRequest) Reset() { + *x = UpdateEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type DeleteEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - // bool is_directory = 3; - IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData" json:"is_delete_data,omitempty"` - IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive" json:"is_recursive,omitempty"` +func (x *UpdateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} } -func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteEntryRequest) ProtoMessage() {} -func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*UpdateEntryRequest) ProtoMessage() {} -func (m *DeleteEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *UpdateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEntryRequest.ProtoReflect.Descriptor instead. +func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{12} } -func (m *DeleteEntryRequest) GetName() string { - if m != nil { - return m.Name +func (x *UpdateEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *DeleteEntryRequest) GetIsDeleteData() bool { - if m != nil { - return m.IsDeleteData +func (x *UpdateEntryRequest) GetEntry() *Entry { + if x != nil { + return x.Entry } - return false + return nil } -func (m *DeleteEntryRequest) GetIsRecursive() bool { - if m != nil { - return m.IsRecursive +func (x *UpdateEntryRequest) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster } return false } -type DeleteEntryResponse struct { +type UpdateEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} } -func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteEntryResponse) ProtoMessage() {} -func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (x *UpdateEntryResponse) Reset() { + *x = UpdateEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type AssignVolumeRequest struct { - Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` - TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` +func (x *UpdateEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } -func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeRequest) ProtoMessage() {} -func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*UpdateEntryResponse) ProtoMessage() {} -func (m *AssignVolumeRequest) GetCount() int32 { - if m != nil { - return m.Count +func (x *UpdateEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *AssignVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +// Deprecated: Use UpdateEntryResponse.ProtoReflect.Descriptor instead. +func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{13} } -func (m *AssignVolumeRequest) GetReplication() string { - if m != nil { - return m.Replication - } - return "" +type AppendToEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + EntryName string `protobuf:"bytes,2,opt,name=entry_name,json=entryName,proto3" json:"entry_name,omitempty"` + Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` } -func (m *AssignVolumeRequest) GetTtlSec() int32 { - if m != nil { - return m.TtlSec +func (x *AppendToEntryRequest) Reset() { + *x = AppendToEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *AssignVolumeRequest) GetDataCenter() string { - if m != nil { - return m.DataCenter - } - return "" +func (x *AppendToEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -type AssignVolumeResponse struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` +func (*AppendToEntryRequest) ProtoMessage() {} + +func (x *AppendToEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } -func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeResponse) ProtoMessage() {} -func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +// Deprecated: Use AppendToEntryRequest.ProtoReflect.Descriptor instead. +func (*AppendToEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{14} +} -func (m *AssignVolumeResponse) GetFileId() string { - if m != nil { - return m.FileId +func (x *AppendToEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *AssignVolumeResponse) GetUrl() string { - if m != nil { - return m.Url +func (x *AppendToEntryRequest) GetEntryName() string { + if x != nil { + return x.EntryName } return "" } -func (m *AssignVolumeResponse) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *AppendToEntryRequest) GetChunks() []*FileChunk { + if x != nil { + return x.Chunks } - return "" + return nil +} + +type AppendToEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *AssignVolumeResponse) GetCount() int32 { - if m != nil { - return m.Count +func (x *AppendToEntryResponse) Reset() { + *x = AppendToEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type LookupVolumeRequest struct { - VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` +func (x *AppendToEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } -func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*AppendToEntryResponse) ProtoMessage() {} -func (m *LookupVolumeRequest) GetVolumeIds() []string { - if m != nil { - return m.VolumeIds +func (x *AppendToEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -type Locations struct { - Locations []*Location `protobuf:"bytes,1,rep,name=locations" json:"locations,omitempty"` +// Deprecated: Use AppendToEntryResponse.ProtoReflect.Descriptor instead. +func (*AppendToEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{15} } -func (m *Locations) Reset() { *m = Locations{} } -func (m *Locations) String() string { return proto.CompactTextString(m) } -func (*Locations) ProtoMessage() {} -func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +type DeleteEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // bool is_directory = 3; + IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData,proto3" json:"is_delete_data,omitempty"` + IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive,proto3" json:"is_recursive,omitempty"` + IgnoreRecursiveError bool `protobuf:"varint,6,opt,name=ignore_recursive_error,json=ignoreRecursiveError,proto3" json:"ignore_recursive_error,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,7,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` +} -func (m *Locations) GetLocations() []*Location { - if m != nil { - return m.Locations +func (x *DeleteEntryRequest) Reset() { + *x = DeleteEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type Location struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` +func (x *DeleteEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteEntryRequest) ProtoMessage() {} + +func (x *DeleteEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Location) Reset() { *m = Location{} } -func (m *Location) String() string { return proto.CompactTextString(m) } -func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +// Deprecated: Use DeleteEntryRequest.ProtoReflect.Descriptor instead. +func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{16} +} -func (m *Location) GetUrl() string { - if m != nil { - return m.Url +func (x *DeleteEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *Location) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *DeleteEntryRequest) GetName() string { + if x != nil { + return x.Name } return "" } -type LookupVolumeResponse struct { - LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +func (x *DeleteEntryRequest) GetIsDeleteData() bool { + if x != nil { + return x.IsDeleteData + } + return false +} + +func (x *DeleteEntryRequest) GetIsRecursive() bool { + if x != nil { + return x.IsRecursive + } + return false +} + +func (x *DeleteEntryRequest) GetIgnoreRecursiveError() bool { + if x != nil { + return x.IgnoreRecursiveError + } + return false } -func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } -func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +func (x *DeleteEntryRequest) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster + } + return false +} + +type DeleteEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *DeleteEntryResponse) Reset() { + *x = DeleteEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteEntryResponse) ProtoMessage() {} + +func (x *DeleteEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteEntryResponse.ProtoReflect.Descriptor instead. +func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{17} +} + +func (x *DeleteEntryResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type AtomicRenameEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory,proto3" json:"old_directory,omitempty"` + OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"` + NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory,proto3" json:"new_directory,omitempty"` + NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` +} + +func (x *AtomicRenameEntryRequest) Reset() { + *x = AtomicRenameEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AtomicRenameEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AtomicRenameEntryRequest) ProtoMessage() {} + +func (x *AtomicRenameEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AtomicRenameEntryRequest.ProtoReflect.Descriptor instead. +func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{18} +} + +func (x *AtomicRenameEntryRequest) GetOldDirectory() string { + if x != nil { + return x.OldDirectory + } + return "" +} + +func (x *AtomicRenameEntryRequest) GetOldName() string { + if x != nil { + return x.OldName + } + return "" +} + +func (x *AtomicRenameEntryRequest) GetNewDirectory() string { + if x != nil { + return x.NewDirectory + } + return "" +} + +func (x *AtomicRenameEntryRequest) GetNewName() string { + if x != nil { + return x.NewName + } + return "" +} + +type AtomicRenameEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AtomicRenameEntryResponse) Reset() { + *x = AtomicRenameEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AtomicRenameEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AtomicRenameEntryResponse) ProtoMessage() {} + +func (x *AtomicRenameEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AtomicRenameEntryResponse.ProtoReflect.Descriptor instead. +func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{19} +} + +type AssignVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"` + DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + ParentPath string `protobuf:"bytes,6,opt,name=parent_path,json=parentPath,proto3" json:"parent_path,omitempty"` +} + +func (x *AssignVolumeRequest) Reset() { + *x = AssignVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignVolumeRequest) ProtoMessage() {} + +func (x *AssignVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignVolumeRequest.ProtoReflect.Descriptor instead. +func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{20} +} + +func (x *AssignVolumeRequest) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *AssignVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *AssignVolumeRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *AssignVolumeRequest) GetTtlSec() int32 { + if x != nil { + return x.TtlSec + } + return 0 +} + +func (x *AssignVolumeRequest) GetDataCenter() string { + if x != nil { + return x.DataCenter + } + return "" +} + +func (x *AssignVolumeRequest) GetParentPath() string { + if x != nil { + return x.ParentPath + } + return "" +} + +type AssignVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` + Auth string `protobuf:"bytes,5,opt,name=auth,proto3" json:"auth,omitempty"` + Collection string `protobuf:"bytes,6,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,7,opt,name=replication,proto3" json:"replication,omitempty"` + Error string `protobuf:"bytes,8,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *AssignVolumeResponse) Reset() { + *x = AssignVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignVolumeResponse) ProtoMessage() {} + +func (x *AssignVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignVolumeResponse.ProtoReflect.Descriptor instead. +func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{21} +} + +func (x *AssignVolumeResponse) GetFileId() string { + if x != nil { + return x.FileId + } + return "" +} + +func (x *AssignVolumeResponse) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *AssignVolumeResponse) GetPublicUrl() string { + if x != nil { + return x.PublicUrl + } + return "" +} + +func (x *AssignVolumeResponse) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *AssignVolumeResponse) GetAuth() string { + if x != nil { + return x.Auth + } + return "" +} + +func (x *AssignVolumeResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *AssignVolumeResponse) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *AssignVolumeResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type LookupVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` +} + +func (x *LookupVolumeRequest) Reset() { + *x = LookupVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupVolumeRequest) ProtoMessage() {} + +func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupVolumeRequest.ProtoReflect.Descriptor instead. +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{22} +} + +func (x *LookupVolumeRequest) GetVolumeIds() []string { + if x != nil { + return x.VolumeIds + } + return nil +} + +type Locations struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` +} + +func (x *Locations) Reset() { + *x = Locations{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Locations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Locations) ProtoMessage() {} + +func (x *Locations) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Locations.ProtoReflect.Descriptor instead. +func (*Locations) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{23} +} + +func (x *Locations) GetLocations() []*Location { + if x != nil { + return x.Locations + } + return nil +} + +type Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` +} + +func (x *Location) Reset() { + *x = Location{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Location) ProtoMessage() {} + +func (x *Location) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Location.ProtoReflect.Descriptor instead. +func (*Location) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{24} +} + +func (x *Location) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Location) GetPublicUrl() string { + if x != nil { + return x.PublicUrl + } + return "" +} + +type LookupVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap,proto3" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *LookupVolumeResponse) Reset() { + *x = LookupVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupVolumeResponse) ProtoMessage() {} + +func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupVolumeResponse.ProtoReflect.Descriptor instead. +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{25} +} + +func (x *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { + if x != nil { + return x.LocationsMap + } + return nil +} + +type DeleteCollectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *DeleteCollectionRequest) Reset() { + *x = DeleteCollectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteCollectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCollectionRequest) ProtoMessage() {} + +func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead. +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{26} +} + +func (x *DeleteCollectionRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +type DeleteCollectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteCollectionResponse) Reset() { + *x = DeleteCollectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteCollectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCollectionResponse) ProtoMessage() {} + +func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead. +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{27} +} + +type StatisticsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *StatisticsRequest) Reset() { + *x = StatisticsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsRequest) ProtoMessage() {} + +func (x *StatisticsRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead. +func (*StatisticsRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{28} +} + +func (x *StatisticsRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *StatisticsRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *StatisticsRequest) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +type StatisticsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` +} + +func (x *StatisticsResponse) Reset() { + *x = StatisticsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsResponse) ProtoMessage() {} + +func (x *StatisticsResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead. +func (*StatisticsResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{29} +} + +func (x *StatisticsResponse) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *StatisticsResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *StatisticsResponse) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +func (x *StatisticsResponse) GetTotalSize() uint64 { + if x != nil { + return x.TotalSize + } + return 0 +} + +func (x *StatisticsResponse) GetUsedSize() uint64 { + if x != nil { + return x.UsedSize + } + return 0 +} + +func (x *StatisticsResponse) GetFileCount() uint64 { + if x != nil { + return x.FileCount + } + return 0 +} + +type GetFilerConfigurationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFilerConfigurationRequest) Reset() { + *x = GetFilerConfigurationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFilerConfigurationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFilerConfigurationRequest) ProtoMessage() {} + +func (x *GetFilerConfigurationRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFilerConfigurationRequest.ProtoReflect.Descriptor instead. +func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{30} +} + +type GetFilerConfigurationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Masters []string `protobuf:"bytes,1,rep,name=masters,proto3" json:"masters,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb,proto3" json:"max_mb,omitempty"` + DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets,proto3" json:"dir_buckets,omitempty"` + Cipher bool `protobuf:"varint,7,opt,name=cipher,proto3" json:"cipher,omitempty"` +} + +func (x *GetFilerConfigurationResponse) Reset() { + *x = GetFilerConfigurationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFilerConfigurationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFilerConfigurationResponse) ProtoMessage() {} + +func (x *GetFilerConfigurationResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFilerConfigurationResponse.ProtoReflect.Descriptor instead. +func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{31} +} + +func (x *GetFilerConfigurationResponse) GetMasters() []string { + if x != nil { + return x.Masters + } + return nil +} + +func (x *GetFilerConfigurationResponse) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *GetFilerConfigurationResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *GetFilerConfigurationResponse) GetMaxMb() uint32 { + if x != nil { + return x.MaxMb + } + return 0 +} + +func (x *GetFilerConfigurationResponse) GetDirBuckets() string { + if x != nil { + return x.DirBuckets + } + return "" +} + +func (x *GetFilerConfigurationResponse) GetCipher() bool { + if x != nil { + return x.Cipher + } + return false +} + +type SubscribeMetadataRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"` + PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix,proto3" json:"path_prefix,omitempty"` + SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` +} + +func (x *SubscribeMetadataRequest) Reset() { + *x = SubscribeMetadataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscribeMetadataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscribeMetadataRequest) ProtoMessage() {} + +func (x *SubscribeMetadataRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscribeMetadataRequest.ProtoReflect.Descriptor instead. +func (*SubscribeMetadataRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{32} +} + +func (x *SubscribeMetadataRequest) GetClientName() string { + if x != nil { + return x.ClientName + } + return "" +} + +func (x *SubscribeMetadataRequest) GetPathPrefix() string { + if x != nil { + return x.PathPrefix + } + return "" +} + +func (x *SubscribeMetadataRequest) GetSinceNs() int64 { + if x != nil { + return x.SinceNs + } + return 0 +} + +type SubscribeMetadataResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification,proto3" json:"event_notification,omitempty"` + TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` +} + +func (x *SubscribeMetadataResponse) Reset() { + *x = SubscribeMetadataResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscribeMetadataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscribeMetadataResponse) ProtoMessage() {} + +func (x *SubscribeMetadataResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscribeMetadataResponse.ProtoReflect.Descriptor instead. +func (*SubscribeMetadataResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{33} +} + +func (x *SubscribeMetadataResponse) GetDirectory() string { + if x != nil { + return x.Directory + } + return "" +} + +func (x *SubscribeMetadataResponse) GetEventNotification() *EventNotification { + if x != nil { + return x.EventNotification + } + return nil +} + +func (x *SubscribeMetadataResponse) GetTsNs() int64 { + if x != nil { + return x.TsNs + } + return 0 +} + +type LogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` + PartitionKeyHash int32 `protobuf:"varint,2,opt,name=partition_key_hash,json=partitionKeyHash,proto3" json:"partition_key_hash,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *LogEntry) Reset() { + *x = LogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogEntry) ProtoMessage() {} + +func (x *LogEntry) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead. +func (*LogEntry) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{34} +} + +func (x *LogEntry) GetTsNs() int64 { + if x != nil { + return x.TsNs + } + return 0 +} + +func (x *LogEntry) GetPartitionKeyHash() int32 { + if x != nil { + return x.PartitionKeyHash + } + return 0 +} + +func (x *LogEntry) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type KeepConnectedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` + Resources []string `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources,omitempty"` +} + +func (x *KeepConnectedRequest) Reset() { + *x = KeepConnectedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepConnectedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepConnectedRequest) ProtoMessage() {} + +func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead. +func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{35} +} + +func (x *KeepConnectedRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *KeepConnectedRequest) GetGrpcPort() uint32 { + if x != nil { + return x.GrpcPort + } + return 0 +} + +func (x *KeepConnectedRequest) GetResources() []string { + if x != nil { + return x.Resources + } + return nil +} + +type KeepConnectedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *KeepConnectedResponse) Reset() { + *x = KeepConnectedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepConnectedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepConnectedResponse) ProtoMessage() {} + +func (x *KeepConnectedResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepConnectedResponse.ProtoReflect.Descriptor instead. +func (*KeepConnectedResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{36} +} + +type LocateBrokerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *LocateBrokerRequest) Reset() { + *x = LocateBrokerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocateBrokerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocateBrokerRequest) ProtoMessage() {} + +func (x *LocateBrokerRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocateBrokerRequest.ProtoReflect.Descriptor instead. +func (*LocateBrokerRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{37} +} + +func (x *LocateBrokerRequest) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +type LocateBrokerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Found bool `protobuf:"varint,1,opt,name=found,proto3" json:"found,omitempty"` + Resources []*LocateBrokerResponse_Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` +} -func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { - if m != nil { - return m.LocationsMap +func (x *LocateBrokerResponse) Reset() { + *x = LocateBrokerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type DeleteCollectionRequest struct { - Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"` +func (x *LocateBrokerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } -func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*LocateBrokerResponse) ProtoMessage() {} -func (m *DeleteCollectionRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *LocateBrokerResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -type DeleteCollectionResponse struct { +// Deprecated: Use LocateBrokerResponse.ProtoReflect.Descriptor instead. +func (*LocateBrokerResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{38} } -func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } -func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -type StatisticsRequest struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` +func (x *LocateBrokerResponse) GetFound() bool { + if x != nil { + return x.Found + } + return false } -func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } -func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } -func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -func (m *StatisticsRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *LocateBrokerResponse) GetResources() []*LocateBrokerResponse_Resource { + if x != nil { + return x.Resources } - return "" + return nil } -func (m *StatisticsRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +// if found, send the exact address +// if not found, send the full list of existing brokers +type LocateBrokerResponse_Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GrpcAddresses string `protobuf:"bytes,1,opt,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"` + ResourceCount int32 `protobuf:"varint,2,opt,name=resource_count,json=resourceCount,proto3" json:"resource_count,omitempty"` } -func (m *StatisticsRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *LocateBrokerResponse_Resource) Reset() { + *x = LocateBrokerResponse_Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type StatisticsResponse struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` - TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` - UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize" json:"used_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` +func (x *LocateBrokerResponse_Resource) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } -func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } -func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*LocateBrokerResponse_Resource) ProtoMessage() {} -func (m *StatisticsResponse) GetReplication() string { - if m != nil { - return m.Replication +func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *StatisticsResponse) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +// Deprecated: Use LocateBrokerResponse_Resource.ProtoReflect.Descriptor instead. +func (*LocateBrokerResponse_Resource) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{38, 0} } -func (m *StatisticsResponse) GetTtl() string { - if m != nil { - return m.Ttl +func (x *LocateBrokerResponse_Resource) GetGrpcAddresses() string { + if x != nil { + return x.GrpcAddresses } return "" } -func (m *StatisticsResponse) GetTotalSize() uint64 { - if m != nil { - return m.TotalSize +func (x *LocateBrokerResponse_Resource) GetResourceCount() int32 { + if x != nil { + return x.ResourceCount } return 0 } -func (m *StatisticsResponse) GetUsedSize() uint64 { - if m != nil { - return m.UsedSize - } - return 0 +var File_filer_proto protoreflect.FileDescriptor + +var file_filer_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x4f, 0x0a, 0x1b, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x45, 0x0a, 0x1c, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, + 0xbe, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2c, 0x0a, 0x11, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, + 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x22, 0x3c, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x9d, + 0x02, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x69, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x38, 0x0a, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x75, 0x73, 0x65, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, + 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, + 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, + 0x0a, 0x09, 0x46, 0x75, 0x6c, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x64, + 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x69, 0x72, 0x12, 0x25, 0x0a, + 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x22, 0xef, 0x01, 0x0a, 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x09, 0x6f, 0x6c, + 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, + 0x6f, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x2c, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6e, 0x65, + 0x77, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, + 0x65, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x77, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, + 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0xba, 0x02, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x12, + 0x13, 0x0a, 0x05, 0x65, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x65, 0x54, 0x61, 0x67, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x03, 0x66, 0x69, + 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x52, 0x03, 0x66, 0x69, 0x64, 0x12, 0x2f, + 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, + 0x6c, 0x65, 0x49, 0x64, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x23, + 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x07, 0x52, 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x22, 0x80, 0x03, + 0x0a, 0x0e, 0x46, 0x75, 0x73, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x74, + 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x6f, 0x64, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75, + 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x03, 0x67, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x63, 0x72, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6d, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x69, 0x6d, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, 0x53, 0x65, 0x63, 0x12, 0x1b, 0x0a, 0x09, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x79, 0x6d, 0x6c, 0x69, + 0x6e, 0x6b, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x10, + 0x0a, 0x03, 0x6d, 0x64, 0x35, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x64, 0x35, + 0x22, 0xa3, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x15, 0x0a, 0x06, + 0x6f, 0x5f, 0x65, 0x78, 0x63, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x6f, 0x45, + 0x78, 0x63, 0x6c, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, + 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x2b, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, + 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, + 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x80, 0x01, 0x0a, 0x14, 0x41, 0x70, + 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x22, 0x17, 0x0a, 0x15, + 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf8, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, + 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, + 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, + 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x52, 0x65, + 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x67, 0x6e, 0x6f, 0x72, + 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, + 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x31, 0x0a, + 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, + 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x22, 0x2b, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x9a, 0x01, + 0x0a, 0x18, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x6c, + 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x65, + 0x77, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x74, + 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x41, 0x73, 0x73, 0x69, + 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, 0x6c, 0x5f, 0x73, + 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, 0x53, 0x65, 0x63, + 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, + 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x61, + 0x74, 0x68, 0x22, 0xe2, 0x01, 0x0a, 0x14, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, + 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, + 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x34, 0x0a, 0x13, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, 0x3d, 0x0a, + 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x09, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3b, 0x0a, 0x08, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x22, 0xc3, 0x01, 0x0a, 0x14, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, + 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x1a, 0x54, 0x0a, 0x11, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, + 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, + 0xc3, 0x01, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, + 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x73, + 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xcb, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x62, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x78, 0x4d, 0x62, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x69, + 0x72, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x64, 0x69, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, + 0x69, 0x70, 0x68, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x22, 0x77, 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x22, 0x9a, 0x01, 0x0a, + 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x22, 0x61, 0x0a, 0x08, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, + 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, 0x0a, 0x14, + 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, + 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, + 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x0a, 0x13, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, + 0xcd, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x45, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x58, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x32, + 0x8d, 0x0b, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, + 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, + 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, + 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x11, 0x41, 0x74, 0x6f, 0x6d, 0x69, + 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, + 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, + 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x6a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, + 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, + 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x65, 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x56, 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4f, + 0x0a, 0x0c, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1d, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, + 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, + 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, + 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, + 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, + 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *StatisticsResponse) GetFileCount() uint64 { - if m != nil { - return m.FileCount - } - return 0 +var ( + file_filer_proto_rawDescOnce sync.Once + file_filer_proto_rawDescData = file_filer_proto_rawDesc +) + +func file_filer_proto_rawDescGZIP() []byte { + file_filer_proto_rawDescOnce.Do(func() { + file_filer_proto_rawDescData = protoimpl.X.CompressGZIP(file_filer_proto_rawDescData) + }) + return file_filer_proto_rawDescData +} + +var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 42) +var file_filer_proto_goTypes = []interface{}{ + (*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest + (*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse + (*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest + (*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse + (*Entry)(nil), // 4: filer_pb.Entry + (*FullEntry)(nil), // 5: filer_pb.FullEntry + (*EventNotification)(nil), // 6: filer_pb.EventNotification + (*FileChunk)(nil), // 7: filer_pb.FileChunk + (*FileId)(nil), // 8: filer_pb.FileId + (*FuseAttributes)(nil), // 9: filer_pb.FuseAttributes + (*CreateEntryRequest)(nil), // 10: filer_pb.CreateEntryRequest + (*CreateEntryResponse)(nil), // 11: filer_pb.CreateEntryResponse + (*UpdateEntryRequest)(nil), // 12: filer_pb.UpdateEntryRequest + (*UpdateEntryResponse)(nil), // 13: filer_pb.UpdateEntryResponse + (*AppendToEntryRequest)(nil), // 14: filer_pb.AppendToEntryRequest + (*AppendToEntryResponse)(nil), // 15: filer_pb.AppendToEntryResponse + (*DeleteEntryRequest)(nil), // 16: filer_pb.DeleteEntryRequest + (*DeleteEntryResponse)(nil), // 17: filer_pb.DeleteEntryResponse + (*AtomicRenameEntryRequest)(nil), // 18: filer_pb.AtomicRenameEntryRequest + (*AtomicRenameEntryResponse)(nil), // 19: filer_pb.AtomicRenameEntryResponse + (*AssignVolumeRequest)(nil), // 20: filer_pb.AssignVolumeRequest + (*AssignVolumeResponse)(nil), // 21: filer_pb.AssignVolumeResponse + (*LookupVolumeRequest)(nil), // 22: filer_pb.LookupVolumeRequest + (*Locations)(nil), // 23: filer_pb.Locations + (*Location)(nil), // 24: filer_pb.Location + (*LookupVolumeResponse)(nil), // 25: filer_pb.LookupVolumeResponse + (*DeleteCollectionRequest)(nil), // 26: filer_pb.DeleteCollectionRequest + (*DeleteCollectionResponse)(nil), // 27: filer_pb.DeleteCollectionResponse + (*StatisticsRequest)(nil), // 28: filer_pb.StatisticsRequest + (*StatisticsResponse)(nil), // 29: filer_pb.StatisticsResponse + (*GetFilerConfigurationRequest)(nil), // 30: filer_pb.GetFilerConfigurationRequest + (*GetFilerConfigurationResponse)(nil), // 31: filer_pb.GetFilerConfigurationResponse + (*SubscribeMetadataRequest)(nil), // 32: filer_pb.SubscribeMetadataRequest + (*SubscribeMetadataResponse)(nil), // 33: filer_pb.SubscribeMetadataResponse + (*LogEntry)(nil), // 34: filer_pb.LogEntry + (*KeepConnectedRequest)(nil), // 35: filer_pb.KeepConnectedRequest + (*KeepConnectedResponse)(nil), // 36: filer_pb.KeepConnectedResponse + (*LocateBrokerRequest)(nil), // 37: filer_pb.LocateBrokerRequest + (*LocateBrokerResponse)(nil), // 38: filer_pb.LocateBrokerResponse + nil, // 39: filer_pb.Entry.ExtendedEntry + nil, // 40: filer_pb.LookupVolumeResponse.LocationsMapEntry + (*LocateBrokerResponse_Resource)(nil), // 41: filer_pb.LocateBrokerResponse.Resource +} +var file_filer_proto_depIdxs = []int32{ + 4, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry + 4, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry + 7, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk + 9, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes + 39, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry + 4, // 5: filer_pb.FullEntry.entry:type_name -> filer_pb.Entry + 4, // 6: filer_pb.EventNotification.old_entry:type_name -> filer_pb.Entry + 4, // 7: filer_pb.EventNotification.new_entry:type_name -> filer_pb.Entry + 8, // 8: filer_pb.FileChunk.fid:type_name -> filer_pb.FileId + 8, // 9: filer_pb.FileChunk.source_fid:type_name -> filer_pb.FileId + 4, // 10: filer_pb.CreateEntryRequest.entry:type_name -> filer_pb.Entry + 4, // 11: filer_pb.UpdateEntryRequest.entry:type_name -> filer_pb.Entry + 7, // 12: filer_pb.AppendToEntryRequest.chunks:type_name -> filer_pb.FileChunk + 24, // 13: filer_pb.Locations.locations:type_name -> filer_pb.Location + 40, // 14: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry + 6, // 15: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification + 41, // 16: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource + 23, // 17: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations + 0, // 18: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest + 2, // 19: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest + 10, // 20: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest + 12, // 21: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest + 14, // 22: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest + 16, // 23: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest + 18, // 24: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest + 20, // 25: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest + 22, // 26: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest + 26, // 27: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest + 28, // 28: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest + 30, // 29: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest + 32, // 30: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest + 32, // 31: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest + 35, // 32: filer_pb.SeaweedFiler.KeepConnected:input_type -> filer_pb.KeepConnectedRequest + 37, // 33: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest + 1, // 34: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse + 3, // 35: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse + 11, // 36: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse + 13, // 37: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse + 15, // 38: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse + 17, // 39: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse + 19, // 40: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse + 21, // 41: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse + 25, // 42: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse + 27, // 43: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse + 29, // 44: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse + 31, // 45: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse + 33, // 46: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse + 33, // 47: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse + 36, // 48: filer_pb.SeaweedFiler.KeepConnected:output_type -> filer_pb.KeepConnectedResponse + 38, // 49: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse + 34, // [34:50] is the sub-list for method output_type + 18, // [18:34] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name } -func init() { - proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest") - proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse") - proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest") - proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse") - proto.RegisterType((*Entry)(nil), "filer_pb.Entry") - proto.RegisterType((*EventNotification)(nil), "filer_pb.EventNotification") - proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk") - proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes") - proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest") - proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse") - proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest") - proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse") - proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest") - proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse") - proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest") - proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse") - proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest") - proto.RegisterType((*Locations)(nil), "filer_pb.Locations") - proto.RegisterType((*Location)(nil), "filer_pb.Location") - proto.RegisterType((*LookupVolumeResponse)(nil), "filer_pb.LookupVolumeResponse") - proto.RegisterType((*DeleteCollectionRequest)(nil), "filer_pb.DeleteCollectionRequest") - proto.RegisterType((*DeleteCollectionResponse)(nil), "filer_pb.DeleteCollectionResponse") - proto.RegisterType((*StatisticsRequest)(nil), "filer_pb.StatisticsRequest") - proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse") +func init() { file_filer_proto_init() } +func file_filer_proto_init() { + if File_filer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_filer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupDirectoryEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupDirectoryEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FullEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileChunk); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FuseAttributes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendToEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendToEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AtomicRenameEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AtomicRenameEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Locations); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFilerConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFilerConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeMetadataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeMetadataResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerResponse_Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_filer_proto_rawDesc, + NumEnums: 0, + NumMessages: 42, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_filer_proto_goTypes, + DependencyIndexes: file_filer_proto_depIdxs, + MessageInfos: file_filer_proto_msgTypes, + }.Build() + File_filer_proto = out.File + file_filer_proto_rawDesc = nil + file_filer_proto_goTypes = nil + file_filer_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for SeaweedFiler service +const _ = grpc.SupportPackageIsVersion6 +// SeaweedFilerClient is the client API for SeaweedFiler service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type SeaweedFilerClient interface { LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) - ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) + ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) + AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) + AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) + GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) + SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) + SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) + KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) + LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) } type seaweedFilerClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewSeaweedFilerClient(cc *grpc.ClientConn) SeaweedFilerClient { +func NewSeaweedFilerClient(cc grpc.ClientConnInterface) SeaweedFilerClient { return &seaweedFilerClient{cc} } func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) { out := new(LookupDirectoryEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) { - out := new(ListEntriesResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/ListEntries", in, out, c.cc, opts...) +func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[0], "/filer_pb.SeaweedFiler/ListEntries", opts...) if err != nil { return nil, err } - return out, nil + x := &seaweedFilerListEntriesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedFiler_ListEntriesClient interface { + Recv() (*ListEntriesResponse, error) + grpc.ClientStream +} + +type seaweedFilerListEntriesClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerListEntriesClient) Recv() (*ListEntriesResponse, error) { + m := new(ListEntriesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil } func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) { out := new(CreateEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, opts...) if err != nil { return nil, err } @@ -879,7 +3689,16 @@ func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryReq func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) { out := new(UpdateEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) { + out := new(AppendToEntryResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AppendToEntry", in, out, opts...) if err != nil { return nil, err } @@ -888,7 +3707,16 @@ func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryReq func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) { out := new(DeleteEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) { + out := new(AtomicRenameEntryResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, opts...) if err != nil { return nil, err } @@ -897,7 +3725,7 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) { out := new(AssignVolumeResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, opts...) if err != nil { return nil, err } @@ -906,7 +3734,7 @@ func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeR func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) { out := new(LookupVolumeResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, opts...) if err != nil { return nil, err } @@ -915,7 +3743,7 @@ func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeR func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { out := new(DeleteCollectionResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, opts...) if err != nil { return nil, err } @@ -924,25 +3752,197 @@ func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCol func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) { out := new(StatisticsResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) { + out := new(GetFilerConfigurationResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for SeaweedFiler service +func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], "/filer_pb.SeaweedFiler/SubscribeMetadata", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerSubscribeMetadataClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedFiler_SubscribeMetadataClient interface { + Recv() (*SubscribeMetadataResponse, error) + grpc.ClientStream +} + +type seaweedFilerSubscribeMetadataClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse, error) { + m := new(SubscribeMetadataResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[2], "/filer_pb.SeaweedFiler/SubscribeLocalMetadata", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerSubscribeLocalMetadataClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedFiler_SubscribeLocalMetadataClient interface { + Recv() (*SubscribeMetadataResponse, error) + grpc.ClientStream +} + +type seaweedFilerSubscribeLocalMetadataClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerSubscribeLocalMetadataClient) Recv() (*SubscribeMetadataResponse, error) { + m := new(SubscribeMetadataResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedFilerClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[3], "/filer_pb.SeaweedFiler/KeepConnected", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerKeepConnectedClient{stream} + return x, nil +} + +type SeaweedFiler_KeepConnectedClient interface { + Send(*KeepConnectedRequest) error + Recv() (*KeepConnectedResponse, error) + grpc.ClientStream +} + +type seaweedFilerKeepConnectedClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerKeepConnectedClient) Send(m *KeepConnectedRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedFilerKeepConnectedClient) Recv() (*KeepConnectedResponse, error) { + m := new(KeepConnectedResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} +func (c *seaweedFilerClient) LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) { + out := new(LocateBrokerResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LocateBroker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SeaweedFilerServer is the server API for SeaweedFiler service. type SeaweedFilerServer interface { LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) - ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error) + ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) + AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) + AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) + GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) + SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error + SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error + KeepConnected(SeaweedFiler_KeepConnectedServer) error + LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) +} + +// UnimplementedSeaweedFilerServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedFilerServer struct { +} + +func (*UnimplementedSeaweedFilerServer) LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupDirectoryEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error { + return status.Errorf(codes.Unimplemented, "method ListEntries not implemented") +} +func (*UnimplementedSeaweedFilerServer) CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AppendToEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AtomicRenameEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AssignVolume not implemented") +} +func (*UnimplementedSeaweedFilerServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupVolume not implemented") +} +func (*UnimplementedSeaweedFilerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented") +} +func (*UnimplementedSeaweedFilerServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented") +} +func (*UnimplementedSeaweedFilerServer) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFilerConfiguration not implemented") +} +func (*UnimplementedSeaweedFilerServer) SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeMetadata not implemented") +} +func (*UnimplementedSeaweedFilerServer) SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeLocalMetadata not implemented") +} +func (*UnimplementedSeaweedFilerServer) KeepConnected(SeaweedFiler_KeepConnectedServer) error { + return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented") +} +func (*UnimplementedSeaweedFilerServer) LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LocateBroker not implemented") } func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) { @@ -967,22 +3967,25 @@ func _SeaweedFiler_LookupDirectoryEntry_Handler(srv interface{}, ctx context.Con return interceptor(ctx, in, info, handler) } -func _SeaweedFiler_ListEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListEntriesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeaweedFilerServer).ListEntries(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/filer_pb.SeaweedFiler/ListEntries", +func _SeaweedFiler_ListEntries_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListEntriesRequest) + if err := stream.RecvMsg(m); err != nil { + return err } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeaweedFilerServer).ListEntries(ctx, req.(*ListEntriesRequest)) - } - return interceptor(ctx, in, info, handler) + return srv.(SeaweedFilerServer).ListEntries(m, &seaweedFilerListEntriesServer{stream}) +} + +type SeaweedFiler_ListEntriesServer interface { + Send(*ListEntriesResponse) error + grpc.ServerStream +} + +type seaweedFilerListEntriesServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerListEntriesServer) Send(m *ListEntriesResponse) error { + return x.ServerStream.SendMsg(m) } func _SeaweedFiler_CreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -1021,6 +4024,24 @@ func _SeaweedFiler_UpdateEntry_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_AppendToEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AppendToEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).AppendToEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/AppendToEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).AppendToEntry(ctx, req.(*AppendToEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteEntryRequest) if err := dec(in); err != nil { @@ -1039,6 +4060,24 @@ func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_AtomicRenameEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AtomicRenameEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/AtomicRenameEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).AtomicRenameEntry(ctx, req.(*AtomicRenameEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _SeaweedFiler_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AssignVolumeRequest) if err := dec(in); err != nil { @@ -1111,6 +4150,110 @@ func _SeaweedFiler_Statistics_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFilerConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).GetFilerConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/GetFilerConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).GetFilerConfiguration(ctx, req.(*GetFilerConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeMetadataRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SeaweedFilerServer).SubscribeMetadata(m, &seaweedFilerSubscribeMetadataServer{stream}) +} + +type SeaweedFiler_SubscribeMetadataServer interface { + Send(*SubscribeMetadataResponse) error + grpc.ServerStream +} + +type seaweedFilerSubscribeMetadataServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerSubscribeMetadataServer) Send(m *SubscribeMetadataResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SeaweedFiler_SubscribeLocalMetadata_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeMetadataRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SeaweedFilerServer).SubscribeLocalMetadata(m, &seaweedFilerSubscribeLocalMetadataServer{stream}) +} + +type SeaweedFiler_SubscribeLocalMetadataServer interface { + Send(*SubscribeMetadataResponse) error + grpc.ServerStream +} + +type seaweedFilerSubscribeLocalMetadataServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerSubscribeLocalMetadataServer) Send(m *SubscribeMetadataResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SeaweedFiler_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedFilerServer).KeepConnected(&seaweedFilerKeepConnectedServer{stream}) +} + +type SeaweedFiler_KeepConnectedServer interface { + Send(*KeepConnectedResponse) error + Recv() (*KeepConnectedRequest, error) + grpc.ServerStream +} + +type seaweedFilerKeepConnectedServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerKeepConnectedServer) Send(m *KeepConnectedResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedFilerKeepConnectedServer) Recv() (*KeepConnectedRequest, error) { + m := new(KeepConnectedRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedFiler_LocateBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LocateBrokerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).LocateBroker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/LocateBroker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).LocateBroker(ctx, req.(*LocateBrokerRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ ServiceName: "filer_pb.SeaweedFiler", HandlerType: (*SeaweedFilerServer)(nil), @@ -1119,10 +4262,6 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "LookupDirectoryEntry", Handler: _SeaweedFiler_LookupDirectoryEntry_Handler, }, - { - MethodName: "ListEntries", - Handler: _SeaweedFiler_ListEntries_Handler, - }, { MethodName: "CreateEntry", Handler: _SeaweedFiler_CreateEntry_Handler, @@ -1131,10 +4270,18 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "UpdateEntry", Handler: _SeaweedFiler_UpdateEntry_Handler, }, + { + MethodName: "AppendToEntry", + Handler: _SeaweedFiler_AppendToEntry_Handler, + }, { MethodName: "DeleteEntry", Handler: _SeaweedFiler_DeleteEntry_Handler, }, + { + MethodName: "AtomicRenameEntry", + Handler: _SeaweedFiler_AtomicRenameEntry_Handler, + }, { MethodName: "AssignVolume", Handler: _SeaweedFiler_AssignVolume_Handler, @@ -1151,94 +4298,37 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "Statistics", Handler: _SeaweedFiler_Statistics_Handler, }, + { + MethodName: "GetFilerConfiguration", + Handler: _SeaweedFiler_GetFilerConfiguration_Handler, + }, + { + MethodName: "LocateBroker", + Handler: _SeaweedFiler_LocateBroker_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListEntries", + Handler: _SeaweedFiler_ListEntries_Handler, + ServerStreams: true, + }, + { + StreamName: "SubscribeMetadata", + Handler: _SeaweedFiler_SubscribeMetadata_Handler, + ServerStreams: true, + }, + { + StreamName: "SubscribeLocalMetadata", + Handler: _SeaweedFiler_SubscribeLocalMetadata_Handler, + ServerStreams: true, + }, + { + StreamName: "KeepConnected", + Handler: _SeaweedFiler_KeepConnected_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, - Streams: []grpc.StreamDesc{}, Metadata: "filer.proto", } - -func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1291 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x57, 0x4d, 0x8f, 0xdc, 0x44, - 0x13, 0x8e, 0xe7, 0x2b, 0xe3, 0x9a, 0x99, 0xbc, 0xbb, 0x3d, 0xfb, 0x12, 0x6b, 0xb2, 0x1b, 0x26, - 0x86, 0xa0, 0x8d, 0x88, 0x46, 0x51, 0xe0, 0x90, 0x10, 0x21, 0x91, 0x6c, 0x36, 0x52, 0xa4, 0x4d, - 0x82, 0xbc, 0x09, 0x12, 0xe2, 0x60, 0x79, 0xed, 0x9e, 0xa1, 0xb5, 0x1e, 0x7b, 0x70, 0xb7, 0x37, - 0x09, 0x7f, 0x82, 0x0b, 0x57, 0x0e, 0x9c, 0xf8, 0x17, 0x5c, 0xf8, 0x3f, 0xdc, 0xb9, 0xa1, 0xae, - 0x6e, 0x7b, 0xda, 0x63, 0xef, 0x06, 0x84, 0x72, 0xeb, 0x7e, 0xaa, 0xba, 0xbe, 0xfa, 0xe9, 0x2a, - 0x1b, 0x06, 0x73, 0x16, 0xd3, 0x6c, 0xb6, 0xca, 0x52, 0x91, 0x92, 0x3e, 0x6e, 0xfc, 0xd5, 0x89, - 0xfb, 0x02, 0xae, 0x1d, 0xa5, 0xe9, 0x69, 0xbe, 0x7a, 0xcc, 0x32, 0x1a, 0x8a, 0x34, 0x7b, 0x7b, - 0x98, 0x88, 0xec, 0xad, 0x47, 0x7f, 0xc8, 0x29, 0x17, 0x64, 0x17, 0xec, 0xa8, 0x10, 0x38, 0xd6, - 0xd4, 0xda, 0xb7, 0xbd, 0x35, 0x40, 0x08, 0x74, 0x92, 0x60, 0x49, 0x9d, 0x16, 0x0a, 0x70, 0xed, - 0x1e, 0xc2, 0x6e, 0xb3, 0x41, 0xbe, 0x4a, 0x13, 0x4e, 0xc9, 0x4d, 0xe8, 0x52, 0x09, 0xa0, 0xb5, - 0xc1, 0xdd, 0xff, 0xcd, 0x8a, 0x50, 0x66, 0x4a, 0x4f, 0x49, 0xdd, 0xdf, 0x2d, 0x20, 0x47, 0x8c, - 0x0b, 0x09, 0x32, 0xca, 0xff, 0x59, 0x3c, 0x1f, 0x40, 0x6f, 0x95, 0xd1, 0x39, 0x7b, 0xa3, 0x23, - 0xd2, 0x3b, 0x72, 0x1b, 0xb6, 0xb9, 0x08, 0x32, 0xf1, 0x24, 0x4b, 0x97, 0x4f, 0x58, 0x4c, 0x9f, - 0xcb, 0xa0, 0xdb, 0xa8, 0x52, 0x17, 0x90, 0x19, 0x10, 0x96, 0x84, 0x71, 0xce, 0xd9, 0x19, 0x3d, - 0x2e, 0xa4, 0x4e, 0x67, 0x6a, 0xed, 0xf7, 0xbd, 0x06, 0x09, 0xd9, 0x81, 0x6e, 0xcc, 0x96, 0x4c, - 0x38, 0xdd, 0xa9, 0xb5, 0x3f, 0xf2, 0xd4, 0xc6, 0xfd, 0x0a, 0xc6, 0x95, 0xf8, 0x75, 0xfa, 0xb7, - 0xe0, 0x32, 0x55, 0x90, 0x63, 0x4d, 0xdb, 0x4d, 0x05, 0x28, 0xe4, 0xee, 0x2f, 0x2d, 0xe8, 0x22, - 0x54, 0xd6, 0xd9, 0x5a, 0xd7, 0x99, 0xdc, 0x80, 0x21, 0xe3, 0xfe, 0xba, 0x18, 0x2d, 0x8c, 0x6f, - 0xc0, 0x78, 0x59, 0x77, 0xf2, 0x29, 0xf4, 0xc2, 0xef, 0xf3, 0xe4, 0x94, 0x3b, 0x6d, 0x74, 0x35, - 0x5e, 0xbb, 0x92, 0xc9, 0x1e, 0x48, 0x99, 0xa7, 0x55, 0xc8, 0x3d, 0x80, 0x40, 0x88, 0x8c, 0x9d, - 0xe4, 0x82, 0x72, 0xcc, 0x76, 0x70, 0xd7, 0x31, 0x0e, 0xe4, 0x9c, 0x3e, 0x2c, 0xe5, 0x9e, 0xa1, - 0x4b, 0xee, 0x43, 0x9f, 0xbe, 0x11, 0x34, 0x89, 0x68, 0xe4, 0x74, 0xd1, 0xd1, 0xde, 0x46, 0x4e, - 0xb3, 0x43, 0x2d, 0x57, 0x19, 0x96, 0xea, 0x93, 0x07, 0x30, 0xaa, 0x88, 0xc8, 0x16, 0xb4, 0x4f, - 0x69, 0x71, 0xb3, 0x72, 0x29, 0xab, 0x7b, 0x16, 0xc4, 0xb9, 0x22, 0xd9, 0xd0, 0x53, 0x9b, 0x2f, - 0x5a, 0xf7, 0x2c, 0xf7, 0x67, 0x0b, 0xb6, 0x0f, 0xcf, 0x68, 0x22, 0x9e, 0xa7, 0x82, 0xcd, 0x59, - 0x18, 0x08, 0x96, 0x26, 0xe4, 0x36, 0xd8, 0x69, 0x1c, 0xf9, 0x17, 0x72, 0xac, 0x9f, 0xc6, 0xda, - 0xdf, 0x6d, 0xb0, 0x13, 0xfa, 0x5a, 0x6b, 0xb7, 0xce, 0xd1, 0x4e, 0xe8, 0x6b, 0xa5, 0xfd, 0x11, - 0x8c, 0x22, 0x1a, 0x53, 0x41, 0xfd, 0xb2, 0xae, 0xb2, 0xe8, 0x43, 0x05, 0x62, 0x3d, 0xb9, 0xfb, - 0xab, 0x05, 0x76, 0x59, 0x5e, 0x72, 0x15, 0x2e, 0x4b, 0x73, 0x3e, 0x8b, 0x74, 0x52, 0x3d, 0xb9, - 0x7d, 0x1a, 0x49, 0xae, 0xa6, 0xf3, 0x39, 0xa7, 0x02, 0xdd, 0xb6, 0x3d, 0xbd, 0x93, 0x77, 0xcd, - 0xd9, 0x8f, 0x8a, 0x9e, 0x1d, 0x0f, 0xd7, 0xb2, 0x06, 0x4b, 0xc1, 0x96, 0x14, 0xaf, 0xa5, 0xed, - 0xa9, 0x0d, 0x19, 0x43, 0x97, 0xfa, 0x22, 0x58, 0x20, 0xef, 0x6c, 0xaf, 0x43, 0x5f, 0x06, 0x0b, - 0xf2, 0x31, 0x5c, 0xe1, 0x69, 0x9e, 0x85, 0xd4, 0x2f, 0xdc, 0xf6, 0x50, 0x3a, 0x54, 0xe8, 0x13, - 0x74, 0xee, 0xfe, 0xd9, 0x82, 0x2b, 0xd5, 0x1b, 0x25, 0xd7, 0xc0, 0xc6, 0x13, 0xe8, 0xdc, 0x42, - 0xe7, 0xd8, 0x25, 0x8e, 0x2b, 0x01, 0xb4, 0xcc, 0x00, 0x8a, 0x23, 0xcb, 0x34, 0x52, 0xf1, 0x8e, - 0xd4, 0x91, 0x67, 0x69, 0x44, 0xe5, 0x4d, 0xe6, 0x2c, 0xc2, 0x88, 0x47, 0x9e, 0x5c, 0x4a, 0x64, - 0xc1, 0x22, 0xfd, 0x4a, 0xe4, 0x52, 0xd6, 0x20, 0xcc, 0xd0, 0x6e, 0x4f, 0xd5, 0x40, 0xed, 0x64, - 0x0d, 0x96, 0x12, 0xbd, 0xac, 0x12, 0x93, 0x6b, 0x32, 0x85, 0x41, 0x46, 0x57, 0xb1, 0xbe, 0x66, - 0xa7, 0x8f, 0x22, 0x13, 0x22, 0xd7, 0x01, 0xc2, 0x34, 0x8e, 0x69, 0x88, 0x0a, 0x36, 0x2a, 0x18, - 0x88, 0xbc, 0x0a, 0x21, 0x62, 0x9f, 0xd3, 0xd0, 0x81, 0xa9, 0xb5, 0xdf, 0xf5, 0x7a, 0x42, 0xc4, - 0xc7, 0x34, 0x94, 0x79, 0xe4, 0x9c, 0x66, 0x3e, 0xbe, 0xb1, 0x01, 0x9e, 0xeb, 0x4b, 0x00, 0xbb, - 0xc1, 0x1e, 0xc0, 0x22, 0x4b, 0xf3, 0x95, 0x92, 0x0e, 0xa7, 0x6d, 0xd9, 0x72, 0x10, 0x41, 0xf1, - 0x4d, 0xb8, 0xc2, 0xdf, 0x2e, 0x63, 0x96, 0x9c, 0xfa, 0x22, 0xc8, 0x16, 0x54, 0x38, 0x23, 0x34, - 0x30, 0xd2, 0xe8, 0x4b, 0x04, 0xdd, 0x6f, 0x81, 0x1c, 0x64, 0x34, 0x10, 0xf4, 0x5f, 0x74, 0xd7, - 0xb2, 0x53, 0xb6, 0x2e, 0xec, 0x94, 0xff, 0x87, 0x71, 0xc5, 0xb4, 0x6a, 0x34, 0xd2, 0xe3, 0xab, - 0x55, 0xf4, 0xbe, 0x3c, 0x56, 0x4c, 0x6b, 0x8f, 0x3f, 0x59, 0x40, 0x1e, 0xe3, 0x4b, 0xf8, 0x6f, - 0x23, 0x44, 0x72, 0x58, 0xb6, 0x36, 0xf5, 0xd2, 0xa2, 0x40, 0x04, 0xba, 0xf9, 0x0e, 0x19, 0x57, - 0xf6, 0x1f, 0x07, 0x22, 0xd0, 0x0d, 0x30, 0xa3, 0x61, 0x9e, 0xc9, 0x7e, 0x8c, 0xbc, 0xc2, 0x06, - 0xe8, 0x15, 0x90, 0x0c, 0xb4, 0x12, 0x90, 0x0e, 0xf4, 0x37, 0x0b, 0xc6, 0x0f, 0x39, 0x67, 0x8b, - 0xe4, 0x9b, 0x34, 0xce, 0x97, 0xb4, 0x88, 0x74, 0x07, 0xba, 0x61, 0x9a, 0x27, 0x02, 0xa3, 0xec, - 0x7a, 0x6a, 0xb3, 0x41, 0xab, 0x56, 0x8d, 0x56, 0x1b, 0xc4, 0x6c, 0xd7, 0x89, 0x69, 0x10, 0xaf, - 0x53, 0x21, 0xde, 0x87, 0x30, 0x90, 0xe9, 0xf9, 0x21, 0x4d, 0x04, 0xcd, 0xf4, 0x3b, 0x06, 0x09, - 0x1d, 0x20, 0xe2, 0x9e, 0xc1, 0x4e, 0x35, 0x50, 0x3d, 0x45, 0xce, 0xed, 0x2a, 0xf2, 0xd5, 0x65, - 0xb1, 0x8e, 0x52, 0x2e, 0x25, 0x7f, 0x57, 0xf9, 0x49, 0xcc, 0x42, 0x5f, 0x0a, 0x54, 0x74, 0xb6, - 0x42, 0x5e, 0x65, 0xf1, 0x3a, 0xe7, 0x8e, 0x91, 0xb3, 0xfb, 0x39, 0x8c, 0xd5, 0x10, 0xaf, 0x16, - 0x68, 0x0f, 0xe0, 0x0c, 0x01, 0x9f, 0x45, 0x6a, 0x7e, 0xd9, 0x9e, 0xad, 0x90, 0xa7, 0x11, 0x77, - 0xbf, 0x04, 0xfb, 0x28, 0x55, 0x39, 0x73, 0x72, 0x07, 0xec, 0xb8, 0xd8, 0xe8, 0x51, 0x47, 0xd6, - 0x7c, 0x2a, 0xf4, 0xbc, 0xb5, 0x92, 0xfb, 0x00, 0xfa, 0x05, 0x5c, 0xe4, 0x61, 0x9d, 0x97, 0x47, - 0x6b, 0x23, 0x0f, 0xf7, 0x0f, 0x0b, 0x76, 0xaa, 0x21, 0xeb, 0x52, 0xbd, 0x82, 0x51, 0xe9, 0xc2, - 0x5f, 0x06, 0x2b, 0x1d, 0xcb, 0x1d, 0x33, 0x96, 0xfa, 0xb1, 0x32, 0x40, 0xfe, 0x2c, 0x58, 0x29, - 0xf6, 0x0c, 0x63, 0x03, 0x9a, 0xbc, 0x84, 0xed, 0x9a, 0x4a, 0xc3, 0xf4, 0xba, 0x65, 0x4e, 0xaf, - 0xca, 0x04, 0x2e, 0x4f, 0x9b, 0x23, 0xed, 0x3e, 0x5c, 0x55, 0x84, 0x3d, 0x28, 0xf9, 0x55, 0xd4, - 0xbe, 0x4a, 0x43, 0x6b, 0x93, 0x86, 0xee, 0x04, 0x9c, 0xfa, 0x51, 0x4d, 0xf8, 0x05, 0x6c, 0x1f, - 0x8b, 0x40, 0x30, 0x2e, 0x58, 0x58, 0x7e, 0x4a, 0x6d, 0xf0, 0xd6, 0x7a, 0x57, 0x43, 0xad, 0x33, - 0x7f, 0x0b, 0xda, 0x42, 0x14, 0x9c, 0x92, 0x4b, 0x79, 0x0b, 0xc4, 0xf4, 0xa4, 0xef, 0xe0, 0x3d, - 0xb8, 0x92, 0x7c, 0x10, 0xa9, 0x08, 0x62, 0x35, 0xb0, 0x3a, 0x38, 0xb0, 0x6c, 0x44, 0x70, 0x62, - 0xa9, 0x9e, 0x1e, 0x29, 0x69, 0x57, 0x8d, 0x33, 0x09, 0xa0, 0x70, 0x0f, 0x00, 0x9f, 0x8f, 0x62, - 0x7e, 0x4f, 0x9d, 0x95, 0xc8, 0x81, 0x04, 0xee, 0xfe, 0xd5, 0x85, 0xe1, 0x31, 0x0d, 0x5e, 0x53, - 0x1a, 0xc9, 0x79, 0x99, 0x91, 0x45, 0xc1, 0xad, 0xea, 0x37, 0x2d, 0xb9, 0xb9, 0x49, 0xa2, 0xc6, - 0x8f, 0xe8, 0xc9, 0x27, 0xef, 0x52, 0xd3, 0xd7, 0x74, 0x89, 0x1c, 0xc1, 0xc0, 0xf8, 0x68, 0x24, - 0xbb, 0xc6, 0xc1, 0xda, 0xb7, 0xf0, 0x64, 0xef, 0x1c, 0xa9, 0x69, 0xcd, 0x98, 0x0c, 0xa6, 0xb5, - 0xfa, 0x2c, 0x32, 0xad, 0x35, 0x8d, 0x13, 0xb4, 0x66, 0x74, 0x7d, 0xd3, 0x5a, 0x7d, 0xce, 0x98, - 0xd6, 0x9a, 0x46, 0x05, 0x5a, 0x33, 0x5a, 0xb3, 0x69, 0xad, 0x3e, 0x42, 0x4c, 0x6b, 0x4d, 0xfd, - 0xfc, 0x12, 0x79, 0x01, 0x43, 0xb3, 0x4f, 0x12, 0xe3, 0x40, 0x43, 0xa3, 0x9f, 0x5c, 0x3f, 0x4f, - 0x6c, 0x1a, 0x34, 0xdb, 0x82, 0x69, 0xb0, 0xa1, 0x31, 0x9a, 0x06, 0x9b, 0xba, 0x89, 0x7b, 0x89, - 0x7c, 0x07, 0x5b, 0x9b, 0xcf, 0x93, 0xdc, 0xd8, 0x4c, 0xab, 0xf6, 0xea, 0x27, 0xee, 0x45, 0x2a, - 0xa5, 0xf1, 0xa7, 0x00, 0xeb, 0x57, 0x47, 0xae, 0xad, 0xcf, 0xd4, 0x5e, 0xfd, 0x64, 0xb7, 0x59, - 0x58, 0x98, 0x7a, 0x74, 0x1d, 0xb6, 0xb8, 0xa2, 0xfe, 0x9c, 0xcf, 0xc2, 0x98, 0xd1, 0x44, 0x3c, - 0x02, 0x7c, 0x05, 0x5f, 0xcb, 0x3f, 0xc7, 0x93, 0x1e, 0xfe, 0x40, 0x7e, 0xf6, 0x77, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x8d, 0x38, 0xa9, 0x9f, 0x4f, 0x0e, 0x00, 0x00, -} diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go new file mode 100644 index 000000000..535a3c247 --- /dev/null +++ b/weed/pb/filer_pb/filer_client.go @@ -0,0 +1,237 @@ +package filer_pb + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + OS_UID = uint32(os.Getuid()) + OS_GID = uint32(os.Getgid()) +) + +type FilerClient interface { + WithFilerClient(fn func(SeaweedFilerClient) error) error + AdjustedUrl(hostAndPort string) string +} + +func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) { + + dir, name := fullFilePath.DirAndName() + + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, + } + + // glog.V(3).Infof("read %s request: %v", fullFilePath, request) + resp, err := LookupEntry(client, request) + if err != nil { + if err == ErrNotFound { + return nil + } + glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) + return err + } + + if resp.Entry == nil { + // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry) + return nil + } + + entry = resp.Entry + return nil + }) + + return +} + +type EachEntryFunciton func(entry *Entry, isLast bool) error + +func ReadDirAllEntries(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton) (err error) { + + return doList(filerClient, fullDirPath, prefix, fn, "", false, math.MaxUint32) + +} + +func List(filerClient FilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + + return doList(filerClient, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit) + +} + +func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &ListEntriesRequest{ + Directory: string(fullDirPath), + Prefix: prefix, + StartFromFileName: startFrom, + Limit: limit, + InclusiveStartFrom: inclusive, + } + + glog.V(3).Infof("read directory: %v", request) + ctx, cancel := context.WithCancel(context.Background()) + stream, err := client.ListEntries(ctx, request) + if err != nil { + return fmt.Errorf("list %s: %v", fullDirPath, err) + } + defer cancel() + + var prevEntry *Entry + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + if prevEntry != nil { + if err := fn(prevEntry, true); err != nil { + return err + } + } + break + } else { + return recvErr + } + } + if prevEntry != nil { + if err := fn(prevEntry, false); err != nil { + return err + } + } + prevEntry = resp.Entry + } + + return nil + + }) + + return +} + +func Exists(filerClient FilerClient, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { + + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &LookupDirectoryEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + } + + glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) + resp, err := LookupEntry(client, request) + if err != nil { + if err == ErrNotFound { + exists = false + return nil + } + glog.V(0).Infof("exists entry %v: %v", request, err) + return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) + } + + exists = resp.Entry.IsDirectory == isDirectory + + return nil + }) + + return +} + +func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + entry := &Entry{ + Name: dirName, + IsDirectory: true, + Attributes: &FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0777 | os.ModeDir), + Uid: OS_UID, + Gid: OS_GID, + }, + } + + if fn != nil { + fn(entry) + } + + request := &CreateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(1).Infof("mkdir: %v", request) + if err := CreateEntry(client, request); err != nil { + glog.V(0).Infof("mkdir %v: %v", request, err) + return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) + } + + return nil + }) +} + +func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + entry := &Entry{ + Name: fileName, + IsDirectory: false, + Attributes: &FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0770), + Uid: OS_UID, + Gid: OS_GID, + }, + Chunks: chunks, + } + + request := &CreateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) + if err := CreateEntry(client, request); err != nil { + glog.V(0).Infof("create file %v:%v", request, err) + return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) + } + + return nil + }) +} + +func Remove(filerClient FilerClient, parentDirectoryPath, name string, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster bool) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + if resp, err := client.DeleteEntry(context.Background(), &DeleteEntryRequest{ + Directory: parentDirectoryPath, + Name: name, + IsDeleteData: isDeleteData, + IsRecursive: isRecursive, + IgnoreRecursiveError: ignoreRecursiveErr, + IsFromOtherCluster: isFromOtherCluster, + }); err != nil { + return err + } else { + if resp.Error != "" { + return errors.New(resp.Error) + } + } + + return nil + + }) +} diff --git a/weed/pb/filer_pb/filer_client_bfs.go b/weed/pb/filer_pb/filer_client_bfs.go new file mode 100644 index 000000000..4e5b65f12 --- /dev/null +++ b/weed/pb/filer_pb/filer_client_bfs.go @@ -0,0 +1,63 @@ +package filer_pb + +import ( + "fmt" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *Entry)) (err error) { + + K := 5 + + var jobQueueWg sync.WaitGroup + queue := util.NewQueue() + jobQueueWg.Add(1) + queue.Enqueue(parentPath) + var isTerminating bool + + for i := 0; i < K; i++ { + go func() { + for { + if isTerminating { + break + } + t := queue.Dequeue() + if t == nil { + time.Sleep(329 * time.Millisecond) + continue + } + dir := t.(util.FullPath) + processErr := processOneDirectory(filerClient, dir, queue, &jobQueueWg, fn) + if processErr != nil { + err = processErr + } + jobQueueWg.Done() + } + }() + } + jobQueueWg.Wait() + isTerminating = true + return +} + +func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) { + + return ReadDirAllEntries(filerClient, parentPath, "", func(entry *Entry, isLast bool) error { + + fn(parentPath, entry) + + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) + if parentPath == "/" { + subDir = "/" + entry.Name + } + jobQueueWg.Add(1) + queue.Enqueue(util.FullPath(subDir)) + } + return nil + }) + +} diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go new file mode 100644 index 000000000..96ab2154f --- /dev/null +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -0,0 +1,105 @@ +package filer_pb + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func toFileIdObject(fileIdStr string) (*FileId, error) { + t, err := needle.ParseFileIdFromString(fileIdStr) + if err != nil { + return nil, err + } + return &FileId{ + VolumeId: uint32(t.VolumeId), + Cookie: uint32(t.Cookie), + FileKey: uint64(t.Key), + }, nil + +} + +func (fid *FileId) toFileIdString() string { + return needle.NewFileId(needle.VolumeId(fid.VolumeId), fid.FileKey, fid.Cookie).String() +} + +func (c *FileChunk) GetFileIdString() string { + if c.FileId != "" { + return c.FileId + } + if c.Fid != nil { + c.FileId = c.Fid.toFileIdString() + return c.FileId + } + return "" +} + +func BeforeEntrySerialization(chunks []*FileChunk) { + + for _, chunk := range chunks { + + if chunk.FileId != "" { + if fid, err := toFileIdObject(chunk.FileId); err == nil { + chunk.Fid = fid + chunk.FileId = "" + } + } + + if chunk.SourceFileId != "" { + if fid, err := toFileIdObject(chunk.SourceFileId); err == nil { + chunk.SourceFid = fid + chunk.SourceFileId = "" + } + } + + } +} + +func AfterEntryDeserialization(chunks []*FileChunk) { + + for _, chunk := range chunks { + + if chunk.Fid != nil && chunk.FileId == "" { + chunk.FileId = chunk.Fid.toFileIdString() + } + + if chunk.SourceFid != nil && chunk.SourceFileId == "" { + chunk.SourceFileId = chunk.SourceFid.toFileIdString() + } + + } +} + +func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { + resp, err := client.CreateEntry(context.Background(), request) + if err != nil { + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) + return fmt.Errorf("CreateEntry: %v", err) + } + if resp.Error != "" { + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) + return fmt.Errorf("CreateEntry : %v", resp.Error) + } + return nil +} + +func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { + resp, err := client.LookupDirectoryEntry(context.Background(), request) + if err != nil { + if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { + return nil, ErrNotFound + } + glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err) + return nil, fmt.Errorf("LookupEntry1: %v", err) + } + if resp.Entry == nil { + return nil, ErrNotFound + } + return resp, nil +} + +var ErrNotFound = errors.New("filer: no entry is found in filer store") diff --git a/weed/pb/filer_pb/filer_pb_helper_test.go b/weed/pb/filer_pb/filer_pb_helper_test.go new file mode 100644 index 000000000..d4468c011 --- /dev/null +++ b/weed/pb/filer_pb/filer_pb_helper_test.go @@ -0,0 +1,17 @@ +package filer_pb + +import ( + "testing" + + "github.com/golang/protobuf/proto" +) + +func TestFileIdSize(t *testing.T) { + fileIdStr := "11745,0293434534cbb9892b" + + fid, _ := toFileIdObject(fileIdStr) + bytes, _ := proto.Marshal(fid) + + println(len(fileIdStr)) + println(len(bytes)) +} diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go new file mode 100644 index 000000000..ce706e282 --- /dev/null +++ b/weed/pb/grpc_client_server.go @@ -0,0 +1,197 @@ +package pb + +import ( + "context" + "fmt" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +const ( + Max_Message_Size = 1 << 30 // 1 GB +) + +var ( + // cache grpc connections + grpcClients = make(map[string]*grpc.ClientConn) + grpcClientsLock sync.Mutex +) + +func init() { + http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024 +} + +func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { + var options []grpc.ServerOption + options = append(options, + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 10 * time.Second, // wait time before ping if no activity + Timeout: 20 * time.Second, // ping timeout + }), + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: 60 * time.Second, // min time a client should wait before sending a ping + PermitWithoutStream: false, + }), + grpc.MaxRecvMsgSize(Max_Message_Size), + grpc.MaxSendMsgSize(Max_Message_Size), + ) + for _, opt := range opts { + if opt != nil { + options = append(options, opt) + } + } + return grpc.NewServer(options...) +} + +func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + // opts = append(opts, grpc.WithBlock()) + // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second))) + var options []grpc.DialOption + options = append(options, + // grpc.WithInsecure(), + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(Max_Message_Size), + grpc.MaxCallRecvMsgSize(Max_Message_Size), + ), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 30 * time.Second, // client ping server if no activity for this long + Timeout: 20 * time.Second, + PermitWithoutStream: false, + })) + for _, opt := range opts { + if opt != nil { + options = append(options, opt) + } + } + return grpc.DialContext(ctx, address, options...) +} + +func getOrCreateConnection(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + + grpcClientsLock.Lock() + defer grpcClientsLock.Unlock() + + existingConnection, found := grpcClients[address] + if found { + return existingConnection, nil + } + + grpcConnection, err := GrpcDial(context.Background(), address, opts...) + if err != nil { + return nil, fmt.Errorf("fail to dial %s: %v", address, err) + } + + grpcClients[address] = grpcConnection + + return grpcConnection, nil +} + +func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { + + grpcConnection, err := getOrCreateConnection(address, opts...) + if err != nil { + return fmt.Errorf("getOrCreateConnection %s: %v", address, err) + } + return fn(grpcConnection) +} + +func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) { + colonIndex := strings.LastIndex(server, ":") + if colonIndex < 0 { + return "", fmt.Errorf("server should have hostname:port format: %v", server) + } + + port, parseErr := strconv.ParseUint(server[colonIndex+1:], 10, 64) + if parseErr != nil { + return "", fmt.Errorf("server port parse error: %v", parseErr) + } + + grpcPort := int(port) + 10000 + + return fmt.Sprintf("%s:%d", server[:colonIndex], grpcPort), nil +} + +func ServerToGrpcAddress(server string) (serverGrpcAddress string) { + hostnameAndPort := strings.Split(server, ":") + if len(hostnameAndPort) != 2 { + return fmt.Sprintf("unexpected server address: %s", server) + } + + port, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) + if parseErr != nil { + return fmt.Sprintf("failed to parse port for %s:%s", hostnameAndPort[0], hostnameAndPort[1]) + } + + grpcPort := int(port) + 10000 + + return fmt.Sprintf("%s:%d", hostnameAndPort[0], grpcPort) +} + +func WithMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { + + masterGrpcAddress, parseErr := ParseServerToGrpcAddress(master) + if parseErr != nil { + return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) + } + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := master_pb.NewSeaweedClient(grpcConnection) + return fn(client) + }, masterGrpcAddress, grpcDialOption) + +} + +func WithBrokerGrpcClient(brokerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client messaging_pb.SeaweedMessagingClient) error) error { + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := messaging_pb.NewSeaweedMessagingClient(grpcConnection) + return fn(client) + }, brokerGrpcAddress, grpcDialOption) + +} + +func WithFilerClient(filer string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress, parseErr := ParseServerToGrpcAddress(filer) + if parseErr != nil { + return fmt.Errorf("failed to parse filer grpc %v: %v", filer, parseErr) + } + + return WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, fn) + +} + +func WithGrpcFilerClient(filerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, filerGrpcAddress, grpcDialOption) + +} + +func ParseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { + hostnameAndPort := strings.Split(filer, ":") + if len(hostnameAndPort) != 2 { + return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort) + } + + filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) + if parseErr != nil { + return "", fmt.Errorf("filer port parse error: %v", parseErr) + } + + filerGrpcPort := int(filerPort) + 10000 + + return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil +} diff --git a/weed/pb/iam.proto b/weed/pb/iam.proto new file mode 100644 index 000000000..558bd2b70 --- /dev/null +++ b/weed/pb/iam.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package iam_pb; + +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"; +option java_package = "seaweedfs.client"; +option java_outer_classname = "IamProto"; + +////////////////////////////////////////////////// + +service SeaweedIdentityAccessManagement { + +} + +////////////////////////////////////////////////// + +message S3ApiConfiguration { + repeated Identity identities = 1; +} + +message Identity { + string name = 1; + repeated Credential credentials = 2; + repeated string actions = 3; +} + +message Credential { + string access_key = 1; + string secret_key = 2; + // uint64 expiration = 3; + // bool is_disabled = 4; +} + +/* +message Policy { + repeated Statement statements = 1; +} + +message Statement { + repeated Action action = 1; + repeated Resource resource = 2; +} + +message Action { + string action = 1; +} +message Resource { + string bucket = 1; + // string path = 2; +} +*/ \ No newline at end of file diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go new file mode 100644 index 000000000..93bc854cc --- /dev/null +++ b/weed/pb/iam_pb/iam.pb.go @@ -0,0 +1,356 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.24.0 +// protoc v3.12.3 +// source: iam.proto + +package iam_pb + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type S3ApiConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"` +} + +func (x *S3ApiConfiguration) Reset() { + *x = S3ApiConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S3ApiConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S3ApiConfiguration) ProtoMessage() {} + +func (x *S3ApiConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_iam_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S3ApiConfiguration.ProtoReflect.Descriptor instead. +func (*S3ApiConfiguration) Descriptor() ([]byte, []int) { + return file_iam_proto_rawDescGZIP(), []int{0} +} + +func (x *S3ApiConfiguration) GetIdentities() []*Identity { + if x != nil { + return x.Identities + } + return nil +} + +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"` + Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_iam_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_iam_proto_rawDescGZIP(), []int{1} +} + +func (x *Identity) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Identity) GetCredentials() []*Credential { + if x != nil { + return x.Credentials + } + return nil +} + +func (x *Identity) GetActions() []string { + if x != nil { + return x.Actions + } + return nil +} + +type Credential struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"` + SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"` +} + +func (x *Credential) Reset() { + *x = Credential{} + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Credential) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Credential) ProtoMessage() {} + +func (x *Credential) ProtoReflect() protoreflect.Message { + mi := &file_iam_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Credential.ProtoReflect.Descriptor instead. +func (*Credential) Descriptor() ([]byte, []int) { + return file_iam_proto_rawDescGZIP(), []int{2} +} + +func (x *Credential) GetAccessKey() string { + if x != nil { + return x.AccessKey + } + return "" +} + +func (x *Credential) GetSecretKey() string { + if x != nil { + return x.SecretKey + } + return "" +} + +var File_iam_proto protoreflect.FileDescriptor + +var file_iam_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x69, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x69, 0x61, 0x6d, + 0x5f, 0x70, 0x62, 0x22, 0x46, 0x0a, 0x12, 0x53, 0x33, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x08, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4a, 0x0a, 0x0a, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x32, 0x21, 0x0a, 0x1f, 0x53, 0x65, 0x61, 0x77, 0x65, + 0x65, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x4b, 0x0a, 0x10, 0x73, 0x65, + 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x08, + 0x49, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, + 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, + 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_iam_proto_rawDescOnce sync.Once + file_iam_proto_rawDescData = file_iam_proto_rawDesc +) + +func file_iam_proto_rawDescGZIP() []byte { + file_iam_proto_rawDescOnce.Do(func() { + file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(file_iam_proto_rawDescData) + }) + return file_iam_proto_rawDescData +} + +var file_iam_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_iam_proto_goTypes = []interface{}{ + (*S3ApiConfiguration)(nil), // 0: iam_pb.S3ApiConfiguration + (*Identity)(nil), // 1: iam_pb.Identity + (*Credential)(nil), // 2: iam_pb.Credential +} +var file_iam_proto_depIdxs = []int32{ + 1, // 0: iam_pb.S3ApiConfiguration.identities:type_name -> iam_pb.Identity + 2, // 1: iam_pb.Identity.credentials:type_name -> iam_pb.Credential + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_iam_proto_init() } +func file_iam_proto_init() { + if File_iam_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_iam_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S3ApiConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_iam_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_iam_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Credential); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_iam_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_iam_proto_goTypes, + DependencyIndexes: file_iam_proto_depIdxs, + MessageInfos: file_iam_proto_msgTypes, + }.Build() + File_iam_proto = out.File + file_iam_proto_rawDesc = nil + file_iam_proto_goTypes = nil + file_iam_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// SeaweedIdentityAccessManagementClient is the client API for SeaweedIdentityAccessManagement service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SeaweedIdentityAccessManagementClient interface { +} + +type seaweedIdentityAccessManagementClient struct { + cc grpc.ClientConnInterface +} + +func NewSeaweedIdentityAccessManagementClient(cc grpc.ClientConnInterface) SeaweedIdentityAccessManagementClient { + return &seaweedIdentityAccessManagementClient{cc} +} + +// SeaweedIdentityAccessManagementServer is the server API for SeaweedIdentityAccessManagement service. +type SeaweedIdentityAccessManagementServer interface { +} + +// UnimplementedSeaweedIdentityAccessManagementServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedIdentityAccessManagementServer struct { +} + +func RegisterSeaweedIdentityAccessManagementServer(s *grpc.Server, srv SeaweedIdentityAccessManagementServer) { + s.RegisterService(&_SeaweedIdentityAccessManagement_serviceDesc, srv) +} + +var _SeaweedIdentityAccessManagement_serviceDesc = grpc.ServiceDesc{ + ServiceName: "iam_pb.SeaweedIdentityAccessManagement", + HandlerType: (*SeaweedIdentityAccessManagementServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: "iam.proto", +} diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 544160c06..4a612b8bc 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -2,12 +2,14 @@ syntax = "proto3"; package master_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/master_pb"; + ////////////////////////////////////////////////// service Seaweed { rpc SendHeartbeat (stream Heartbeat) returns (stream HeartbeatResponse) { } - rpc KeepConnected (stream ClientListenRequest) returns (stream VolumeLocation) { + rpc KeepConnected (stream KeepConnectedRequest) returns (stream VolumeLocation) { } rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) { } @@ -15,6 +17,23 @@ service Seaweed { } rpc Statistics (StatisticsRequest) returns (StatisticsResponse) { } + rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) { + } + rpc CollectionDelete (CollectionDeleteRequest) returns (CollectionDeleteResponse) { + } + rpc VolumeList (VolumeListRequest) returns (VolumeListResponse) { + } + rpc LookupEcVolume (LookupEcVolumeRequest) returns (LookupEcVolumeResponse) { + } + rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) { + } + rpc ListMasterClients (ListMasterClientsRequest) returns (ListMasterClientsResponse) { + } + rpc LeaseAdminToken (LeaseAdminTokenRequest) returns (LeaseAdminTokenResponse) { + } + rpc ReleaseAdminToken (ReleaseAdminTokenRequest) returns (ReleaseAdminTokenResponse) { + } + } ////////////////////////////////////////////////// @@ -29,15 +48,26 @@ message Heartbeat { string rack = 7; uint32 admin_port = 8; repeated VolumeInformationMessage volumes = 9; - // delta volume ids - repeated uint32 new_vids = 10; - repeated uint32 deleted_vids = 11; + // delta volumes + repeated VolumeShortInformationMessage new_volumes = 10; + repeated VolumeShortInformationMessage deleted_volumes = 11; + bool has_no_volumes = 12; + + // erasure coding + repeated VolumeEcShardInformationMessage ec_shards = 16; + // delta erasure coding shards + repeated VolumeEcShardInformationMessage new_ec_shards = 17; + repeated VolumeEcShardInformationMessage deleted_ec_shards = 18; + bool has_no_ec_shards = 19; + } message HeartbeatResponse { - uint64 volumeSizeLimit = 1; - string secretKey = 2; - string leader = 3; + uint64 volume_size_limit = 1; + string leader = 2; + string metrics_address = 3; + uint32 metrics_interval_seconds = 4; + repeated StorageBackend storage_backends = 5; } message VolumeInformationMessage { @@ -51,6 +81,30 @@ message VolumeInformationMessage { uint32 replica_placement = 8; uint32 version = 9; uint32 ttl = 10; + uint32 compact_revision = 11; + int64 modified_at_second = 12; + string remote_storage_name = 13; + string remote_storage_key = 14; +} + +message VolumeShortInformationMessage { + uint32 id = 1; + string collection = 3; + uint32 replica_placement = 8; + uint32 version = 9; + uint32 ttl = 10; +} + +message VolumeEcShardInformationMessage { + uint32 id = 1; + string collection = 2; + uint32 ec_index_bits = 3; +} + +message StorageBackend { + string type = 1; + string id = 2; + map properties = 3; } message Empty { @@ -65,8 +119,9 @@ message SuperBlockExtra { ErasureCoding erasure_coding = 1; } -message ClientListenRequest { +message KeepConnectedRequest { string name = 1; + uint32 grpc_port = 2; } message VolumeLocation { @@ -74,6 +129,7 @@ message VolumeLocation { string public_url = 2; repeated uint32 new_vids = 3; repeated uint32 deleted_vids = 4; + string leader = 5; // optional when leader is not itself } message LookupVolumeRequest { @@ -102,6 +158,8 @@ message AssignRequest { string data_center = 5; string rack = 6; string data_node = 7; + uint32 memory_map_max_size_mb = 8; + uint32 Writable_volume_count = 9; } message AssignResponse { string fid = 1; @@ -109,6 +167,7 @@ message AssignResponse { string public_url = 3; uint64 count = 4; string error = 5; + string auth = 6; } message StatisticsRequest { @@ -124,3 +183,119 @@ message StatisticsResponse { uint64 used_size = 5; uint64 file_count = 6; } + +// +// collection related +// + +message StorageType { + string replication = 1; + string ttl = 2; +} +message Collection { + string name = 1; +} +message CollectionListRequest { + bool include_normal_volumes = 1; + bool include_ec_volumes = 2; +} +message CollectionListResponse { + repeated Collection collections = 1; +} + +message CollectionDeleteRequest { + string name = 1; +} +message CollectionDeleteResponse { +} + +// +// volume related +// +message DataNodeInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated VolumeInformationMessage volume_infos = 6; + repeated VolumeEcShardInformationMessage ec_shard_infos = 7; + uint64 remote_volume_count = 8; +} +message RackInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated DataNodeInfo data_node_infos = 6; + uint64 remote_volume_count = 7; +} +message DataCenterInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated RackInfo rack_infos = 6; + uint64 remote_volume_count = 7; +} +message TopologyInfo { + string id = 1; + uint64 volume_count = 2; + uint64 max_volume_count = 3; + uint64 free_volume_count = 4; + uint64 active_volume_count = 5; + repeated DataCenterInfo data_center_infos = 6; + uint64 remote_volume_count = 7; +} +message VolumeListRequest { +} +message VolumeListResponse { + TopologyInfo topology_info = 1; + uint64 volume_size_limit_mb = 2; +} + +message LookupEcVolumeRequest { + uint32 volume_id = 1; +} +message LookupEcVolumeResponse { + uint32 volume_id = 1; + message EcShardIdLocation { + uint32 shard_id = 1; + repeated Location locations = 2; + } + repeated EcShardIdLocation shard_id_locations = 2; +} + +message GetMasterConfigurationRequest { +} +message GetMasterConfigurationResponse { + string metrics_address = 1; + uint32 metrics_interval_seconds = 2; +} + +message ListMasterClientsRequest { + string client_type = 1; +} +message ListMasterClientsResponse { + repeated string grpc_addresses = 1; +} + +message LeaseAdminTokenRequest { + int64 previous_token = 1; + int64 previous_lock_time = 2; + string lock_name = 3; +} +message LeaseAdminTokenResponse { + int64 token = 1; + int64 lock_ts_ns = 2; +} + +message ReleaseAdminTokenRequest { + int64 previous_token = 1; + int64 previous_lock_time = 2; + string lock_name = 3; +} +message ReleaseAdminTokenResponse { +} diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index 894f08471..0d9782439 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -1,727 +1,3993 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.24.0 +// protoc v3.12.3 // source: master.proto -// DO NOT EDIT! - -/* -Package master_pb is a generated protocol buffer package. - -It is generated from these files: - master.proto - -It has these top-level messages: - Heartbeat - HeartbeatResponse - VolumeInformationMessage - Empty - SuperBlockExtra - ClientListenRequest - VolumeLocation - LookupVolumeRequest - LookupVolumeResponse - Location - AssignRequest - AssignResponse - StatisticsRequest - StatisticsResponse -*/ -package master_pb -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package master_pb import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type Heartbeat struct { - Ip string `protobuf:"bytes,1,opt,name=ip" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - MaxVolumeCount uint32 `protobuf:"varint,4,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey" json:"max_file_key,omitempty"` - DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,7,opt,name=rack" json:"rack,omitempty"` - AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort" json:"admin_port,omitempty"` - Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"` - // delta volume ids - NewVids []uint32 `protobuf:"varint,10,rep,packed,name=new_vids,json=newVids" json:"new_vids,omitempty"` - DeletedVids []uint32 `protobuf:"varint,11,rep,packed,name=deleted_vids,json=deletedVids" json:"deleted_vids,omitempty"` -} - -func (m *Heartbeat) Reset() { *m = Heartbeat{} } -func (m *Heartbeat) String() string { return proto.CompactTextString(m) } -func (*Heartbeat) ProtoMessage() {} -func (*Heartbeat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Heartbeat) GetIp() string { - if m != nil { - return m.Ip + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + MaxVolumeCount uint32 `protobuf:"varint,4,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"` + MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey,proto3" json:"max_file_key,omitempty"` + DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"` + AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort,proto3" json:"admin_port,omitempty"` + Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes,proto3" json:"volumes,omitempty"` + // delta volumes + NewVolumes []*VolumeShortInformationMessage `protobuf:"bytes,10,rep,name=new_volumes,json=newVolumes,proto3" json:"new_volumes,omitempty"` + DeletedVolumes []*VolumeShortInformationMessage `protobuf:"bytes,11,rep,name=deleted_volumes,json=deletedVolumes,proto3" json:"deleted_volumes,omitempty"` + HasNoVolumes bool `protobuf:"varint,12,opt,name=has_no_volumes,json=hasNoVolumes,proto3" json:"has_no_volumes,omitempty"` + // erasure coding + EcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,16,rep,name=ec_shards,json=ecShards,proto3" json:"ec_shards,omitempty"` + // delta erasure coding shards + NewEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,17,rep,name=new_ec_shards,json=newEcShards,proto3" json:"new_ec_shards,omitempty"` + DeletedEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,18,rep,name=deleted_ec_shards,json=deletedEcShards,proto3" json:"deleted_ec_shards,omitempty"` + HasNoEcShards bool `protobuf:"varint,19,opt,name=has_no_ec_shards,json=hasNoEcShards,proto3" json:"has_no_ec_shards,omitempty"` +} + +func (x *Heartbeat) Reset() { + *x = Heartbeat{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Heartbeat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Heartbeat) ProtoMessage() {} + +func (x *Heartbeat) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Heartbeat.ProtoReflect.Descriptor instead. +func (*Heartbeat) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{0} +} + +func (x *Heartbeat) GetIp() string { + if x != nil { + return x.Ip } return "" } -func (m *Heartbeat) GetPort() uint32 { - if m != nil { - return m.Port +func (x *Heartbeat) GetPort() uint32 { + if x != nil { + return x.Port } return 0 } -func (m *Heartbeat) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *Heartbeat) GetPublicUrl() string { + if x != nil { + return x.PublicUrl } return "" } -func (m *Heartbeat) GetMaxVolumeCount() uint32 { - if m != nil { - return m.MaxVolumeCount +func (x *Heartbeat) GetMaxVolumeCount() uint32 { + if x != nil { + return x.MaxVolumeCount } return 0 } -func (m *Heartbeat) GetMaxFileKey() uint64 { - if m != nil { - return m.MaxFileKey +func (x *Heartbeat) GetMaxFileKey() uint64 { + if x != nil { + return x.MaxFileKey } return 0 } -func (m *Heartbeat) GetDataCenter() string { - if m != nil { - return m.DataCenter +func (x *Heartbeat) GetDataCenter() string { + if x != nil { + return x.DataCenter } return "" } -func (m *Heartbeat) GetRack() string { - if m != nil { - return m.Rack +func (x *Heartbeat) GetRack() string { + if x != nil { + return x.Rack } return "" } -func (m *Heartbeat) GetAdminPort() uint32 { - if m != nil { - return m.AdminPort +func (x *Heartbeat) GetAdminPort() uint32 { + if x != nil { + return x.AdminPort } return 0 } -func (m *Heartbeat) GetVolumes() []*VolumeInformationMessage { - if m != nil { - return m.Volumes +func (x *Heartbeat) GetVolumes() []*VolumeInformationMessage { + if x != nil { + return x.Volumes + } + return nil +} + +func (x *Heartbeat) GetNewVolumes() []*VolumeShortInformationMessage { + if x != nil { + return x.NewVolumes + } + return nil +} + +func (x *Heartbeat) GetDeletedVolumes() []*VolumeShortInformationMessage { + if x != nil { + return x.DeletedVolumes + } + return nil +} + +func (x *Heartbeat) GetHasNoVolumes() bool { + if x != nil { + return x.HasNoVolumes + } + return false +} + +func (x *Heartbeat) GetEcShards() []*VolumeEcShardInformationMessage { + if x != nil { + return x.EcShards } return nil } -func (m *Heartbeat) GetNewVids() []uint32 { - if m != nil { - return m.NewVids +func (x *Heartbeat) GetNewEcShards() []*VolumeEcShardInformationMessage { + if x != nil { + return x.NewEcShards } return nil } -func (m *Heartbeat) GetDeletedVids() []uint32 { - if m != nil { - return m.DeletedVids +func (x *Heartbeat) GetDeletedEcShards() []*VolumeEcShardInformationMessage { + if x != nil { + return x.DeletedEcShards } return nil } +func (x *Heartbeat) GetHasNoEcShards() bool { + if x != nil { + return x.HasNoEcShards + } + return false +} + type HeartbeatResponse struct { - VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volumeSizeLimit" json:"volumeSizeLimit,omitempty"` - SecretKey string `protobuf:"bytes,2,opt,name=secretKey" json:"secretKey,omitempty"` - Leader string `protobuf:"bytes,3,opt,name=leader" json:"leader,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit,proto3" json:"volume_size_limit,omitempty"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` + MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` + StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` +} + +func (x *HeartbeatResponse) Reset() { + *x = HeartbeatResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeartbeatResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeartbeatResponse) ProtoMessage() {} + +func (x *HeartbeatResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } -func (m *HeartbeatResponse) String() string { return proto.CompactTextString(m) } -func (*HeartbeatResponse) ProtoMessage() {} -func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +// Deprecated: Use HeartbeatResponse.ProtoReflect.Descriptor instead. +func (*HeartbeatResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{1} +} -func (m *HeartbeatResponse) GetVolumeSizeLimit() uint64 { - if m != nil { - return m.VolumeSizeLimit +func (x *HeartbeatResponse) GetVolumeSizeLimit() uint64 { + if x != nil { + return x.VolumeSizeLimit } return 0 } -func (m *HeartbeatResponse) GetSecretKey() string { - if m != nil { - return m.SecretKey +func (x *HeartbeatResponse) GetLeader() string { + if x != nil { + return x.Leader } return "" } -func (m *HeartbeatResponse) GetLeader() string { - if m != nil { - return m.Leader +func (x *HeartbeatResponse) GetMetricsAddress() string { + if x != nil { + return x.MetricsAddress } return "" } +func (x *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 { + if x != nil { + return x.MetricsIntervalSeconds + } + return 0 +} + +func (x *HeartbeatResponse) GetStorageBackends() []*StorageBackend { + if x != nil { + return x.StorageBackends + } + return nil +} + type VolumeInformationMessage struct { - Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` - DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount" json:"delete_count,omitempty"` - DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount" json:"deleted_byte_count,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` - Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` - Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` -} - -func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} } -func (m *VolumeInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeInformationMessage) ProtoMessage() {} -func (*VolumeInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *VolumeInformationMessage) GetId() uint32 { - if m != nil { - return m.Id + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Size uint64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` + DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount,proto3" json:"delete_count,omitempty"` + DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount,proto3" json:"deleted_byte_count,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` + CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond,proto3" json:"modified_at_second,omitempty"` + RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName,proto3" json:"remote_storage_name,omitempty"` + RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey,proto3" json:"remote_storage_key,omitempty"` +} + +func (x *VolumeInformationMessage) Reset() { + *x = VolumeInformationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeInformationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeInformationMessage) ProtoMessage() {} + +func (x *VolumeInformationMessage) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeInformationMessage.ProtoReflect.Descriptor instead. +func (*VolumeInformationMessage) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{2} +} + +func (x *VolumeInformationMessage) GetId() uint32 { + if x != nil { + return x.Id } return 0 } -func (m *VolumeInformationMessage) GetSize() uint64 { - if m != nil { - return m.Size +func (x *VolumeInformationMessage) GetSize() uint64 { + if x != nil { + return x.Size } return 0 } -func (m *VolumeInformationMessage) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeInformationMessage) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeInformationMessage) GetFileCount() uint64 { - if m != nil { - return m.FileCount +func (x *VolumeInformationMessage) GetFileCount() uint64 { + if x != nil { + return x.FileCount } return 0 } -func (m *VolumeInformationMessage) GetDeleteCount() uint64 { - if m != nil { - return m.DeleteCount +func (x *VolumeInformationMessage) GetDeleteCount() uint64 { + if x != nil { + return x.DeleteCount } return 0 } -func (m *VolumeInformationMessage) GetDeletedByteCount() uint64 { - if m != nil { - return m.DeletedByteCount +func (x *VolumeInformationMessage) GetDeletedByteCount() uint64 { + if x != nil { + return x.DeletedByteCount } return 0 } -func (m *VolumeInformationMessage) GetReadOnly() bool { - if m != nil { - return m.ReadOnly +func (x *VolumeInformationMessage) GetReadOnly() bool { + if x != nil { + return x.ReadOnly } return false } -func (m *VolumeInformationMessage) GetReplicaPlacement() uint32 { - if m != nil { - return m.ReplicaPlacement +func (x *VolumeInformationMessage) GetReplicaPlacement() uint32 { + if x != nil { + return x.ReplicaPlacement } return 0 } -func (m *VolumeInformationMessage) GetVersion() uint32 { - if m != nil { - return m.Version +func (x *VolumeInformationMessage) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } -func (m *VolumeInformationMessage) GetTtl() uint32 { - if m != nil { - return m.Ttl +func (x *VolumeInformationMessage) GetTtl() uint32 { + if x != nil { + return x.Ttl } return 0 } -type Empty struct { +func (x *VolumeInformationMessage) GetCompactRevision() uint32 { + if x != nil { + return x.CompactRevision + } + return 0 } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (x *VolumeInformationMessage) GetModifiedAtSecond() int64 { + if x != nil { + return x.ModifiedAtSecond + } + return 0 +} -type SuperBlockExtra struct { - ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding" json:"erasure_coding,omitempty"` +func (x *VolumeInformationMessage) GetRemoteStorageName() string { + if x != nil { + return x.RemoteStorageName + } + return "" +} + +func (x *VolumeInformationMessage) GetRemoteStorageKey() string { + if x != nil { + return x.RemoteStorageKey + } + return "" } -func (m *SuperBlockExtra) Reset() { *m = SuperBlockExtra{} } -func (m *SuperBlockExtra) String() string { return proto.CompactTextString(m) } -func (*SuperBlockExtra) ProtoMessage() {} -func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +type VolumeShortInformationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` +} -func (m *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding { - if m != nil { - return m.ErasureCoding +func (x *VolumeShortInformationMessage) Reset() { + *x = VolumeShortInformationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type SuperBlockExtra_ErasureCoding struct { - Data uint32 `protobuf:"varint,1,opt,name=data" json:"data,omitempty"` - Parity uint32 `protobuf:"varint,2,opt,name=parity" json:"parity,omitempty"` - VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` +func (x *VolumeShortInformationMessage) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SuperBlockExtra_ErasureCoding) Reset() { *m = SuperBlockExtra_ErasureCoding{} } -func (m *SuperBlockExtra_ErasureCoding) String() string { return proto.CompactTextString(m) } -func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} -func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{4, 0} +func (*VolumeShortInformationMessage) ProtoMessage() {} + +func (x *VolumeShortInformationMessage) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeShortInformationMessage.ProtoReflect.Descriptor instead. +func (*VolumeShortInformationMessage) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{3} +} + +func (x *VolumeShortInformationMessage) GetId() uint32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *VolumeShortInformationMessage) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeShortInformationMessage) GetReplicaPlacement() uint32 { + if x != nil { + return x.ReplicaPlacement + } + return 0 +} + +func (x *VolumeShortInformationMessage) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *VolumeShortInformationMessage) GetTtl() uint32 { + if x != nil { + return x.Ttl + } + return 0 +} + +type VolumeEcShardInformationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits,proto3" json:"ec_index_bits,omitempty"` +} + +func (x *VolumeEcShardInformationMessage) Reset() { + *x = VolumeEcShardInformationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardInformationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardInformationMessage) ProtoMessage() {} + +func (x *VolumeEcShardInformationMessage) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardInformationMessage.ProtoReflect.Descriptor instead. +func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{4} } -func (m *SuperBlockExtra_ErasureCoding) GetData() uint32 { - if m != nil { - return m.Data +func (x *VolumeEcShardInformationMessage) GetId() uint32 { + if x != nil { + return x.Id } return 0 } -func (m *SuperBlockExtra_ErasureCoding) GetParity() uint32 { - if m != nil { - return m.Parity +func (x *VolumeEcShardInformationMessage) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 { + if x != nil { + return x.EcIndexBits } return 0 } -func (m *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 { - if m != nil { - return m.VolumeIds +type StorageBackend struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Properties map[string]string `protobuf:"bytes,3,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *StorageBackend) Reset() { + *x = StorageBackend{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageBackend) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageBackend) ProtoMessage() {} + +func (x *StorageBackend) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageBackend.ProtoReflect.Descriptor instead. +func (*StorageBackend) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{5} +} + +func (x *StorageBackend) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *StorageBackend) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *StorageBackend) GetProperties() map[string]string { + if x != nil { + return x.Properties + } + return nil +} + +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{6} +} + +type SuperBlockExtra struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding,proto3" json:"erasure_coding,omitempty"` +} + +func (x *SuperBlockExtra) Reset() { + *x = SuperBlockExtra{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SuperBlockExtra) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SuperBlockExtra) ProtoMessage() {} + +func (x *SuperBlockExtra) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SuperBlockExtra.ProtoReflect.Descriptor instead. +func (*SuperBlockExtra) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{7} +} + +func (x *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding { + if x != nil { + return x.ErasureCoding } return nil } -type ClientListenRequest struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +type KeepConnectedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` +} + +func (x *KeepConnectedRequest) Reset() { + *x = KeepConnectedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepConnectedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepConnectedRequest) ProtoMessage() {} + +func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *ClientListenRequest) Reset() { *m = ClientListenRequest{} } -func (m *ClientListenRequest) String() string { return proto.CompactTextString(m) } -func (*ClientListenRequest) ProtoMessage() {} -func (*ClientListenRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead. +func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{8} +} -func (m *ClientListenRequest) GetName() string { - if m != nil { - return m.Name +func (x *KeepConnectedRequest) GetName() string { + if x != nil { + return x.Name } return "" } +func (x *KeepConnectedRequest) GetGrpcPort() uint32 { + if x != nil { + return x.GrpcPort + } + return 0 +} + type VolumeLocation struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids" json:"new_vids,omitempty"` - DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids" json:"deleted_vids,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids,proto3" json:"new_vids,omitempty"` + DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids,proto3" json:"deleted_vids,omitempty"` + Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` // optional when leader is not itself +} + +func (x *VolumeLocation) Reset() { + *x = VolumeLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeLocation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeLocation) ProtoMessage() {} + +func (x *VolumeLocation) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeLocation) Reset() { *m = VolumeLocation{} } -func (m *VolumeLocation) String() string { return proto.CompactTextString(m) } -func (*VolumeLocation) ProtoMessage() {} -func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +// Deprecated: Use VolumeLocation.ProtoReflect.Descriptor instead. +func (*VolumeLocation) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{9} +} -func (m *VolumeLocation) GetUrl() string { - if m != nil { - return m.Url +func (x *VolumeLocation) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *VolumeLocation) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *VolumeLocation) GetPublicUrl() string { + if x != nil { + return x.PublicUrl } return "" } -func (m *VolumeLocation) GetNewVids() []uint32 { - if m != nil { - return m.NewVids +func (x *VolumeLocation) GetNewVids() []uint32 { + if x != nil { + return x.NewVids } return nil } -func (m *VolumeLocation) GetDeletedVids() []uint32 { - if m != nil { - return m.DeletedVids +func (x *VolumeLocation) GetDeletedVids() []uint32 { + if x != nil { + return x.DeletedVids } return nil } +func (x *VolumeLocation) GetLeader() string { + if x != nil { + return x.Leader + } + return "" +} + type LookupVolumeRequest struct { - VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` // optional, a bit faster if provided. +} + +func (x *LookupVolumeRequest) Reset() { + *x = LookupVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupVolumeRequest) ProtoMessage() {} + +func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } -func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +// Deprecated: Use LookupVolumeRequest.ProtoReflect.Descriptor instead. +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{10} +} -func (m *LookupVolumeRequest) GetVolumeIds() []string { - if m != nil { - return m.VolumeIds +func (x *LookupVolumeRequest) GetVolumeIds() []string { + if x != nil { + return x.VolumeIds } return nil } -func (m *LookupVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *LookupVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } type LookupVolumeResponse struct { - VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations" json:"volume_id_locations,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations,proto3" json:"volume_id_locations,omitempty"` +} + +func (x *LookupVolumeResponse) Reset() { + *x = LookupVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupVolumeResponse) ProtoMessage() {} + +func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupVolumeResponse.ProtoReflect.Descriptor instead. +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{11} +} + +func (x *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation { + if x != nil { + return x.VolumeIdLocations + } + return nil +} + +type Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` +} + +func (x *Location) Reset() { + *x = Location{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Location) ProtoMessage() {} + +func (x *Location) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Location.ProtoReflect.Descriptor instead. +func (*Location) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{12} +} + +func (x *Location) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Location) GetPublicUrl() string { + if x != nil { + return x.PublicUrl + } + return "" +} + +type AssignRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,6,opt,name=rack,proto3" json:"rack,omitempty"` + DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"` + MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` + WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=Writable_volume_count,json=WritableVolumeCount,proto3" json:"Writable_volume_count,omitempty"` +} + +func (x *AssignRequest) Reset() { + *x = AssignRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignRequest) ProtoMessage() {} + +func (x *AssignRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignRequest.ProtoReflect.Descriptor instead. +func (*AssignRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{13} +} + +func (x *AssignRequest) GetCount() uint64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *AssignRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *AssignRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *AssignRequest) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +func (x *AssignRequest) GetDataCenter() string { + if x != nil { + return x.DataCenter + } + return "" +} + +func (x *AssignRequest) GetRack() string { + if x != nil { + return x.Rack + } + return "" +} + +func (x *AssignRequest) GetDataNode() string { + if x != nil { + return x.DataNode + } + return "" +} + +func (x *AssignRequest) GetMemoryMapMaxSizeMb() uint32 { + if x != nil { + return x.MemoryMapMaxSizeMb + } + return 0 +} + +func (x *AssignRequest) GetWritableVolumeCount() uint32 { + if x != nil { + return x.WritableVolumeCount + } + return 0 +} + +type AssignResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Fid string `protobuf:"bytes,1,opt,name=fid,proto3" json:"fid,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` + Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + Auth string `protobuf:"bytes,6,opt,name=auth,proto3" json:"auth,omitempty"` +} + +func (x *AssignResponse) Reset() { + *x = AssignResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignResponse) ProtoMessage() {} + +func (x *AssignResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignResponse.ProtoReflect.Descriptor instead. +func (*AssignResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{14} +} + +func (x *AssignResponse) GetFid() string { + if x != nil { + return x.Fid + } + return "" +} + +func (x *AssignResponse) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *AssignResponse) GetPublicUrl() string { + if x != nil { + return x.PublicUrl + } + return "" +} + +func (x *AssignResponse) GetCount() uint64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *AssignResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *AssignResponse) GetAuth() string { + if x != nil { + return x.Auth + } + return "" +} + +type StatisticsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *StatisticsRequest) Reset() { + *x = StatisticsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsRequest) ProtoMessage() {} + +func (x *StatisticsRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead. +func (*StatisticsRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{15} +} + +func (x *StatisticsRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *StatisticsRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *StatisticsRequest) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +type StatisticsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` +} + +func (x *StatisticsResponse) Reset() { + *x = StatisticsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsResponse) ProtoMessage() {} + +func (x *StatisticsResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead. +func (*StatisticsResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{16} +} + +func (x *StatisticsResponse) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *StatisticsResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *StatisticsResponse) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +func (x *StatisticsResponse) GetTotalSize() uint64 { + if x != nil { + return x.TotalSize + } + return 0 +} + +func (x *StatisticsResponse) GetUsedSize() uint64 { + if x != nil { + return x.UsedSize + } + return 0 +} + +func (x *StatisticsResponse) GetFileCount() uint64 { + if x != nil { + return x.FileCount + } + return 0 +} + +type StorageType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,2,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *StorageType) Reset() { + *x = StorageType{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageType) ProtoMessage() {} + +func (x *StorageType) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageType.ProtoReflect.Descriptor instead. +func (*StorageType) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{17} +} + +func (x *StorageType) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *StorageType) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +type Collection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Collection) Reset() { + *x = Collection{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Collection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Collection) ProtoMessage() {} + +func (x *Collection) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Collection.ProtoReflect.Descriptor instead. +func (*Collection) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{18} +} + +func (x *Collection) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type CollectionListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"` + IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"` +} + +func (x *CollectionListRequest) Reset() { + *x = CollectionListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionListRequest) ProtoMessage() {} + +func (x *CollectionListRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionListRequest.ProtoReflect.Descriptor instead. +func (*CollectionListRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{19} +} + +func (x *CollectionListRequest) GetIncludeNormalVolumes() bool { + if x != nil { + return x.IncludeNormalVolumes + } + return false +} + +func (x *CollectionListRequest) GetIncludeEcVolumes() bool { + if x != nil { + return x.IncludeEcVolumes + } + return false +} + +type CollectionListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` +} + +func (x *CollectionListResponse) Reset() { + *x = CollectionListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionListResponse) ProtoMessage() {} + +func (x *CollectionListResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionListResponse.ProtoReflect.Descriptor instead. +func (*CollectionListResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{20} +} + +func (x *CollectionListResponse) GetCollections() []*Collection { + if x != nil { + return x.Collections + } + return nil +} + +type CollectionDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *CollectionDeleteRequest) Reset() { + *x = CollectionDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionDeleteRequest) ProtoMessage() {} + +func (x *CollectionDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionDeleteRequest.ProtoReflect.Descriptor instead. +func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{21} +} + +func (x *CollectionDeleteRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type CollectionDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CollectionDeleteResponse) Reset() { + *x = CollectionDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionDeleteResponse) ProtoMessage() {} + +func (x *CollectionDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionDeleteResponse.ProtoReflect.Descriptor instead. +func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{22} +} + +// +// volume related +// +type DataNodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"` + VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos,proto3" json:"volume_infos,omitempty"` + EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos,proto3" json:"ec_shard_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"` +} + +func (x *DataNodeInfo) Reset() { + *x = DataNodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataNodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataNodeInfo) ProtoMessage() {} + +func (x *DataNodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataNodeInfo.ProtoReflect.Descriptor instead. +func (*DataNodeInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{23} +} + +func (x *DataNodeInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *DataNodeInfo) GetVolumeCount() uint64 { + if x != nil { + return x.VolumeCount + } + return 0 +} + +func (x *DataNodeInfo) GetMaxVolumeCount() uint64 { + if x != nil { + return x.MaxVolumeCount + } + return 0 +} + +func (x *DataNodeInfo) GetFreeVolumeCount() uint64 { + if x != nil { + return x.FreeVolumeCount + } + return 0 +} + +func (x *DataNodeInfo) GetActiveVolumeCount() uint64 { + if x != nil { + return x.ActiveVolumeCount + } + return 0 +} + +func (x *DataNodeInfo) GetVolumeInfos() []*VolumeInformationMessage { + if x != nil { + return x.VolumeInfos + } + return nil +} + +func (x *DataNodeInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage { + if x != nil { + return x.EcShardInfos + } + return nil +} + +func (x *DataNodeInfo) GetRemoteVolumeCount() uint64 { + if x != nil { + return x.RemoteVolumeCount + } + return 0 +} + +type RackInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"` + DataNodeInfos []*DataNodeInfo `protobuf:"bytes,6,rep,name=data_node_infos,json=dataNodeInfos,proto3" json:"data_node_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"` +} + +func (x *RackInfo) Reset() { + *x = RackInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RackInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RackInfo) ProtoMessage() {} + +func (x *RackInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RackInfo.ProtoReflect.Descriptor instead. +func (*RackInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{24} +} + +func (x *RackInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *RackInfo) GetVolumeCount() uint64 { + if x != nil { + return x.VolumeCount + } + return 0 +} + +func (x *RackInfo) GetMaxVolumeCount() uint64 { + if x != nil { + return x.MaxVolumeCount + } + return 0 +} + +func (x *RackInfo) GetFreeVolumeCount() uint64 { + if x != nil { + return x.FreeVolumeCount + } + return 0 +} + +func (x *RackInfo) GetActiveVolumeCount() uint64 { + if x != nil { + return x.ActiveVolumeCount + } + return 0 +} + +func (x *RackInfo) GetDataNodeInfos() []*DataNodeInfo { + if x != nil { + return x.DataNodeInfos + } + return nil +} + +func (x *RackInfo) GetRemoteVolumeCount() uint64 { + if x != nil { + return x.RemoteVolumeCount + } + return 0 +} + +type DataCenterInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"` + RackInfos []*RackInfo `protobuf:"bytes,6,rep,name=rack_infos,json=rackInfos,proto3" json:"rack_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"` +} + +func (x *DataCenterInfo) Reset() { + *x = DataCenterInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataCenterInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataCenterInfo) ProtoMessage() {} + +func (x *DataCenterInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataCenterInfo.ProtoReflect.Descriptor instead. +func (*DataCenterInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{25} +} + +func (x *DataCenterInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *DataCenterInfo) GetVolumeCount() uint64 { + if x != nil { + return x.VolumeCount + } + return 0 +} + +func (x *DataCenterInfo) GetMaxVolumeCount() uint64 { + if x != nil { + return x.MaxVolumeCount + } + return 0 +} + +func (x *DataCenterInfo) GetFreeVolumeCount() uint64 { + if x != nil { + return x.FreeVolumeCount + } + return 0 +} + +func (x *DataCenterInfo) GetActiveVolumeCount() uint64 { + if x != nil { + return x.ActiveVolumeCount + } + return 0 +} + +func (x *DataCenterInfo) GetRackInfos() []*RackInfo { + if x != nil { + return x.RackInfos + } + return nil +} + +func (x *DataCenterInfo) GetRemoteVolumeCount() uint64 { + if x != nil { + return x.RemoteVolumeCount + } + return 0 +} + +type TopologyInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"` + DataCenterInfos []*DataCenterInfo `protobuf:"bytes,6,rep,name=data_center_infos,json=dataCenterInfos,proto3" json:"data_center_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"` +} + +func (x *TopologyInfo) Reset() { + *x = TopologyInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TopologyInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TopologyInfo) ProtoMessage() {} + +func (x *TopologyInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TopologyInfo.ProtoReflect.Descriptor instead. +func (*TopologyInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{26} +} + +func (x *TopologyInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *TopologyInfo) GetVolumeCount() uint64 { + if x != nil { + return x.VolumeCount + } + return 0 +} + +func (x *TopologyInfo) GetMaxVolumeCount() uint64 { + if x != nil { + return x.MaxVolumeCount + } + return 0 +} + +func (x *TopologyInfo) GetFreeVolumeCount() uint64 { + if x != nil { + return x.FreeVolumeCount + } + return 0 +} + +func (x *TopologyInfo) GetActiveVolumeCount() uint64 { + if x != nil { + return x.ActiveVolumeCount + } + return 0 +} + +func (x *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo { + if x != nil { + return x.DataCenterInfos + } + return nil +} + +func (x *TopologyInfo) GetRemoteVolumeCount() uint64 { + if x != nil { + return x.RemoteVolumeCount + } + return 0 +} + +type VolumeListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeListRequest) Reset() { + *x = VolumeListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeListRequest) ProtoMessage() {} + +func (x *VolumeListRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeListRequest.ProtoReflect.Descriptor instead. +func (*VolumeListRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{27} +} + +type VolumeListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo,proto3" json:"topology_info,omitempty"` + VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb,proto3" json:"volume_size_limit_mb,omitempty"` +} + +func (x *VolumeListResponse) Reset() { + *x = VolumeListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeListResponse) ProtoMessage() {} + +func (x *VolumeListResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeListResponse.ProtoReflect.Descriptor instead. +func (*VolumeListResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{28} +} + +func (x *VolumeListResponse) GetTopologyInfo() *TopologyInfo { + if x != nil { + return x.TopologyInfo + } + return nil +} + +func (x *VolumeListResponse) GetVolumeSizeLimitMb() uint64 { + if x != nil { + return x.VolumeSizeLimitMb + } + return 0 +} + +type LookupEcVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *LookupEcVolumeRequest) Reset() { + *x = LookupEcVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupEcVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupEcVolumeRequest) ProtoMessage() {} + +func (x *LookupEcVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupEcVolumeRequest.ProtoReflect.Descriptor instead. +func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{29} +} + +func (x *LookupEcVolumeRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type LookupEcVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardIdLocations []*LookupEcVolumeResponse_EcShardIdLocation `protobuf:"bytes,2,rep,name=shard_id_locations,json=shardIdLocations,proto3" json:"shard_id_locations,omitempty"` +} + +func (x *LookupEcVolumeResponse) Reset() { + *x = LookupEcVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupEcVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupEcVolumeResponse) ProtoMessage() {} + +func (x *LookupEcVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupEcVolumeResponse.ProtoReflect.Descriptor instead. +func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{30} +} + +func (x *LookupEcVolumeResponse) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *LookupEcVolumeResponse) GetShardIdLocations() []*LookupEcVolumeResponse_EcShardIdLocation { + if x != nil { + return x.ShardIdLocations + } + return nil +} + +type GetMasterConfigurationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMasterConfigurationRequest) Reset() { + *x = GetMasterConfigurationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMasterConfigurationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMasterConfigurationRequest) ProtoMessage() {} + +func (x *GetMasterConfigurationRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMasterConfigurationRequest.ProtoReflect.Descriptor instead. +func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{31} +} + +type GetMasterConfigurationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` +} + +func (x *GetMasterConfigurationResponse) Reset() { + *x = GetMasterConfigurationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMasterConfigurationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMasterConfigurationResponse) ProtoMessage() {} + +func (x *GetMasterConfigurationResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMasterConfigurationResponse.ProtoReflect.Descriptor instead. +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{32} +} + +func (x *GetMasterConfigurationResponse) GetMetricsAddress() string { + if x != nil { + return x.MetricsAddress + } + return "" +} + +func (x *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 { + if x != nil { + return x.MetricsIntervalSeconds + } + return 0 +} + +type ListMasterClientsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"` +} + +func (x *ListMasterClientsRequest) Reset() { + *x = ListMasterClientsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMasterClientsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMasterClientsRequest) ProtoMessage() {} + +func (x *ListMasterClientsRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMasterClientsRequest.ProtoReflect.Descriptor instead. +func (*ListMasterClientsRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{33} +} + +func (x *ListMasterClientsRequest) GetClientType() string { + if x != nil { + return x.ClientType + } + return "" +} + +type ListMasterClientsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GrpcAddresses []string `protobuf:"bytes,1,rep,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"` +} + +func (x *ListMasterClientsResponse) Reset() { + *x = ListMasterClientsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListMasterClientsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListMasterClientsResponse) ProtoMessage() {} + +func (x *ListMasterClientsResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListMasterClientsResponse.ProtoReflect.Descriptor instead. +func (*ListMasterClientsResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{34} +} + +func (x *ListMasterClientsResponse) GetGrpcAddresses() []string { + if x != nil { + return x.GrpcAddresses + } + return nil +} + +type LeaseAdminTokenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` + PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` + LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` +} + +func (x *LeaseAdminTokenRequest) Reset() { + *x = LeaseAdminTokenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeaseAdminTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeaseAdminTokenRequest) ProtoMessage() {} + +func (x *LeaseAdminTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeaseAdminTokenRequest.ProtoReflect.Descriptor instead. +func (*LeaseAdminTokenRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{35} +} + +func (x *LeaseAdminTokenRequest) GetPreviousToken() int64 { + if x != nil { + return x.PreviousToken + } + return 0 +} + +func (x *LeaseAdminTokenRequest) GetPreviousLockTime() int64 { + if x != nil { + return x.PreviousLockTime + } + return 0 +} + +func (x *LeaseAdminTokenRequest) GetLockName() string { + if x != nil { + return x.LockName + } + return "" +} + +type LeaseAdminTokenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Token int64 `protobuf:"varint,1,opt,name=token,proto3" json:"token,omitempty"` + LockTsNs int64 `protobuf:"varint,2,opt,name=lock_ts_ns,json=lockTsNs,proto3" json:"lock_ts_ns,omitempty"` +} + +func (x *LeaseAdminTokenResponse) Reset() { + *x = LeaseAdminTokenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeaseAdminTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeaseAdminTokenResponse) ProtoMessage() {} + +func (x *LeaseAdminTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeaseAdminTokenResponse.ProtoReflect.Descriptor instead. +func (*LeaseAdminTokenResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{36} +} + +func (x *LeaseAdminTokenResponse) GetToken() int64 { + if x != nil { + return x.Token + } + return 0 +} + +func (x *LeaseAdminTokenResponse) GetLockTsNs() int64 { + if x != nil { + return x.LockTsNs + } + return 0 +} + +type ReleaseAdminTokenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` + PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` + LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` +} + +func (x *ReleaseAdminTokenRequest) Reset() { + *x = ReleaseAdminTokenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReleaseAdminTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReleaseAdminTokenRequest) ProtoMessage() {} + +func (x *ReleaseAdminTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReleaseAdminTokenRequest.ProtoReflect.Descriptor instead. +func (*ReleaseAdminTokenRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{37} } -func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } -func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (x *ReleaseAdminTokenRequest) GetPreviousToken() int64 { + if x != nil { + return x.PreviousToken + } + return 0 +} -func (m *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation { - if m != nil { - return m.VolumeIdLocations +func (x *ReleaseAdminTokenRequest) GetPreviousLockTime() int64 { + if x != nil { + return x.PreviousLockTime } - return nil + return 0 } -type LookupVolumeResponse_VolumeIdLocation struct { - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Locations []*Location `protobuf:"bytes,2,rep,name=locations" json:"locations,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"` +func (x *ReleaseAdminTokenRequest) GetLockName() string { + if x != nil { + return x.LockName + } + return "" } -func (m *LookupVolumeResponse_VolumeIdLocation) Reset() { *m = LookupVolumeResponse_VolumeIdLocation{} } -func (m *LookupVolumeResponse_VolumeIdLocation) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} -func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{8, 0} +type ReleaseAdminTokenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *ReleaseAdminTokenResponse) Reset() { + *x = ReleaseAdminTokenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (m *LookupVolumeResponse_VolumeIdLocation) GetLocations() []*Location { - if m != nil { - return m.Locations - } - return nil +func (x *ReleaseAdminTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LookupVolumeResponse_VolumeIdLocation) GetError() string { - if m != nil { - return m.Error +func (*ReleaseAdminTokenResponse) ProtoMessage() {} + +func (x *ReleaseAdminTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -type Location struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` +// Deprecated: Use ReleaseAdminTokenResponse.ProtoReflect.Descriptor instead. +func (*ReleaseAdminTokenResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{38} } -func (m *Location) Reset() { *m = Location{} } -func (m *Location) String() string { return proto.CompactTextString(m) } -func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +type SuperBlockExtra_ErasureCoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Location) GetUrl() string { - if m != nil { - return m.Url - } - return "" + Data uint32 `protobuf:"varint,1,opt,name=data,proto3" json:"data,omitempty"` + Parity uint32 `protobuf:"varint,2,opt,name=parity,proto3" json:"parity,omitempty"` + VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` } -func (m *Location) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *SuperBlockExtra_ErasureCoding) Reset() { + *x = SuperBlockExtra_ErasureCoding{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type AssignRequest struct { - Count uint64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,6,opt,name=rack" json:"rack,omitempty"` - DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode" json:"data_node,omitempty"` +func (x *SuperBlockExtra_ErasureCoding) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AssignRequest) Reset() { *m = AssignRequest{} } -func (m *AssignRequest) String() string { return proto.CompactTextString(m) } -func (*AssignRequest) ProtoMessage() {} -func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} -func (m *AssignRequest) GetCount() uint64 { - if m != nil { - return m.Count +func (x *SuperBlockExtra_ErasureCoding) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *AssignRequest) GetReplication() string { - if m != nil { - return m.Replication - } - return "" +// Deprecated: Use SuperBlockExtra_ErasureCoding.ProtoReflect.Descriptor instead. +func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{7, 0} } -func (m *AssignRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *SuperBlockExtra_ErasureCoding) GetData() uint32 { + if x != nil { + return x.Data } - return "" + return 0 } -func (m *AssignRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *SuperBlockExtra_ErasureCoding) GetParity() uint32 { + if x != nil { + return x.Parity } - return "" + return 0 } -func (m *AssignRequest) GetDataCenter() string { - if m != nil { - return m.DataCenter +func (x *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 { + if x != nil { + return x.VolumeIds } - return "" + return nil } -func (m *AssignRequest) GetRack() string { - if m != nil { - return m.Rack - } - return "" +type LookupVolumeResponse_VolumeIdLocation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` } -func (m *AssignRequest) GetDataNode() string { - if m != nil { - return m.DataNode +func (x *LookupVolumeResponse_VolumeIdLocation) Reset() { + *x = LookupVolumeResponse_VolumeIdLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type AssignResponse struct { - Fid string `protobuf:"bytes,1,opt,name=fid" json:"fid,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - Count uint64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - Error string `protobuf:"bytes,5,opt,name=error" json:"error,omitempty"` +func (x *LookupVolumeResponse_VolumeIdLocation) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AssignResponse) Reset() { *m = AssignResponse{} } -func (m *AssignResponse) String() string { return proto.CompactTextString(m) } -func (*AssignResponse) ProtoMessage() {} -func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} -func (m *AssignResponse) GetFid() string { - if m != nil { - return m.Fid +func (x *LookupVolumeResponse_VolumeIdLocation) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *AssignResponse) GetUrl() string { - if m != nil { - return m.Url - } - return "" +// Deprecated: Use LookupVolumeResponse_VolumeIdLocation.ProtoReflect.Descriptor instead. +func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{11, 0} } -func (m *AssignResponse) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *AssignResponse) GetCount() uint64 { - if m != nil { - return m.Count +func (x *LookupVolumeResponse_VolumeIdLocation) GetLocations() []*Location { + if x != nil { + return x.Locations } - return 0 + return nil } -func (m *AssignResponse) GetError() string { - if m != nil { - return m.Error +func (x *LookupVolumeResponse_VolumeIdLocation) GetError() string { + if x != nil { + return x.Error } return "" } -type StatisticsRequest struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` -} +type LookupEcVolumeResponse_EcShardIdLocation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } -func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } -func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` +} -func (m *StatisticsRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *LookupEcVolumeResponse_EcShardIdLocation) Reset() { + *x = LookupEcVolumeResponse_EcShardIdLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (m *StatisticsRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +func (x *LookupEcVolumeResponse_EcShardIdLocation) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StatisticsRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {} + +func (x *LookupEcVolumeResponse_EcShardIdLocation) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -type StatisticsResponse struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` - TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` - UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize" json:"used_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` +// Deprecated: Use LookupEcVolumeResponse_EcShardIdLocation.ProtoReflect.Descriptor instead. +func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{30, 0} } -func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } -func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } -func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -func (m *StatisticsResponse) GetReplication() string { - if m != nil { - return m.Replication +func (x *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 { + if x != nil { + return x.ShardId } - return "" + return 0 } -func (m *StatisticsResponse) GetCollection() string { - if m != nil { - return m.Collection +func (x *LookupEcVolumeResponse_EcShardIdLocation) GetLocations() []*Location { + if x != nil { + return x.Locations } - return "" + return nil } -func (m *StatisticsResponse) GetTtl() string { - if m != nil { - return m.Ttl - } - return "" +var File_master_proto protoreflect.FileDescriptor + +var file_master_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x8b, 0x06, 0x0a, 0x09, 0x48, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, + 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x46, + 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, + 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x3d, 0x0a, 0x07, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x0b, 0x6e, 0x65, 0x77, + 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x68, 0x61, 0x73, 0x5f, 0x6e, + 0x6f, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0c, 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, + 0x09, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x65, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x63, + 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x45, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x56, 0x0a, 0x11, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x27, + 0x0a, 0x10, 0x68, 0x61, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x45, + 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x80, 0x02, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x72, + 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, + 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x44, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x22, 0xfb, 0x03, 0x0a, 0x18, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, + 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, + 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, 0x61, 0x63, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, + 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, + 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x41, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x22, 0xa8, 0x01, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, + 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, + 0x74, 0x74, 0x6c, 0x22, 0x75, 0x0a, 0x1f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x63, 0x5f, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x5f, 0x62, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65, + 0x63, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x69, 0x74, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x49, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, + 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x07, 0x0a, 0x05, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0xbe, 0x01, 0x0a, 0x0f, 0x53, 0x75, 0x70, 0x65, 0x72, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x45, 0x78, 0x74, 0x72, 0x61, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x61, 0x73, + 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x70, + 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x78, 0x74, 0x72, 0x61, 0x2e, 0x45, 0x72, 0x61, + 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x0d, 0x65, 0x72, 0x61, 0x73, + 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x1a, 0x5a, 0x0a, 0x0d, 0x45, 0x72, 0x61, + 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, 0x47, 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x97, + 0x01, 0x0a, 0x0e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, + 0x72, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x56, 0x69, 0x64, 0x73, 0x12, 0x21, 0x0a, + 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0d, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x56, 0x69, 0x64, 0x73, + 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x12, 0x1e, + 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xf2, + 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x78, 0x0a, 0x10, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, + 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x22, 0x3b, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, + 0x22, 0xb3, 0x02, 0x0a, 0x0d, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, + 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x32, + 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x6d, 0x61, 0x78, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, + 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, + 0x4d, 0x62, 0x12, 0x32, 0x0a, 0x15, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x13, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x93, 0x01, 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x66, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x66, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x22, 0x67, 0x0a, 0x11, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0xc3, 0x01, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, + 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x75, 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x41, 0x0a, 0x0b, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, + 0x74, 0x74, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0x20, + 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x7b, 0x0a, 0x15, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, + 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x22, 0x51, 0x0a, + 0x16, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x22, 0x2d, 0x0a, 0x17, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x1a, 0x0a, 0x18, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x91, 0x03, 0x0a, 0x0c, + 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65, + 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x50, 0x0a, + 0x0e, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, + 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x52, 0x0c, 0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, + 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, + 0xb4, 0x02, 0x0a, 0x08, 0x52, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65, + 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3f, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, + 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4e, + 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xad, 0x02, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x43, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, + 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x0a, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x52, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x72, 0x61, 0x63, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xbe, 0x02, 0x0a, 0x0c, 0x54, 0x6f, 0x70, 0x6f, 0x6c, + 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, + 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x45, 0x0a, 0x11, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x83, 0x01, 0x0a, + 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x0c, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2f, 0x0a, 0x14, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x6d, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x4d, 0x62, 0x22, 0x34, 0x0a, 0x15, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, 0x0a, 0x16, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x61, 0x0a, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, + 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x61, 0x0a, 0x11, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x1f, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x83, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x3b, 0x0a, + 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x19, 0x4c, 0x69, + 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x8a, + 0x01, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, + 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17, 0x4c, + 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x0a, + 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x52, + 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, 0x69, + 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c, + 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, + 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf7, 0x08, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, 0x65, + 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, + 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, 0x0a, + 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1f, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, + 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x22, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, + 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, + 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *StatisticsResponse) GetTotalSize() uint64 { - if m != nil { - return m.TotalSize - } - return 0 -} +var ( + file_master_proto_rawDescOnce sync.Once + file_master_proto_rawDescData = file_master_proto_rawDesc +) -func (m *StatisticsResponse) GetUsedSize() uint64 { - if m != nil { - return m.UsedSize - } - return 0 +func file_master_proto_rawDescGZIP() []byte { + file_master_proto_rawDescOnce.Do(func() { + file_master_proto_rawDescData = protoimpl.X.CompressGZIP(file_master_proto_rawDescData) + }) + return file_master_proto_rawDescData } -func (m *StatisticsResponse) GetFileCount() uint64 { - if m != nil { - return m.FileCount - } - return 0 +var file_master_proto_msgTypes = make([]protoimpl.MessageInfo, 43) +var file_master_proto_goTypes = []interface{}{ + (*Heartbeat)(nil), // 0: master_pb.Heartbeat + (*HeartbeatResponse)(nil), // 1: master_pb.HeartbeatResponse + (*VolumeInformationMessage)(nil), // 2: master_pb.VolumeInformationMessage + (*VolumeShortInformationMessage)(nil), // 3: master_pb.VolumeShortInformationMessage + (*VolumeEcShardInformationMessage)(nil), // 4: master_pb.VolumeEcShardInformationMessage + (*StorageBackend)(nil), // 5: master_pb.StorageBackend + (*Empty)(nil), // 6: master_pb.Empty + (*SuperBlockExtra)(nil), // 7: master_pb.SuperBlockExtra + (*KeepConnectedRequest)(nil), // 8: master_pb.KeepConnectedRequest + (*VolumeLocation)(nil), // 9: master_pb.VolumeLocation + (*LookupVolumeRequest)(nil), // 10: master_pb.LookupVolumeRequest + (*LookupVolumeResponse)(nil), // 11: master_pb.LookupVolumeResponse + (*Location)(nil), // 12: master_pb.Location + (*AssignRequest)(nil), // 13: master_pb.AssignRequest + (*AssignResponse)(nil), // 14: master_pb.AssignResponse + (*StatisticsRequest)(nil), // 15: master_pb.StatisticsRequest + (*StatisticsResponse)(nil), // 16: master_pb.StatisticsResponse + (*StorageType)(nil), // 17: master_pb.StorageType + (*Collection)(nil), // 18: master_pb.Collection + (*CollectionListRequest)(nil), // 19: master_pb.CollectionListRequest + (*CollectionListResponse)(nil), // 20: master_pb.CollectionListResponse + (*CollectionDeleteRequest)(nil), // 21: master_pb.CollectionDeleteRequest + (*CollectionDeleteResponse)(nil), // 22: master_pb.CollectionDeleteResponse + (*DataNodeInfo)(nil), // 23: master_pb.DataNodeInfo + (*RackInfo)(nil), // 24: master_pb.RackInfo + (*DataCenterInfo)(nil), // 25: master_pb.DataCenterInfo + (*TopologyInfo)(nil), // 26: master_pb.TopologyInfo + (*VolumeListRequest)(nil), // 27: master_pb.VolumeListRequest + (*VolumeListResponse)(nil), // 28: master_pb.VolumeListResponse + (*LookupEcVolumeRequest)(nil), // 29: master_pb.LookupEcVolumeRequest + (*LookupEcVolumeResponse)(nil), // 30: master_pb.LookupEcVolumeResponse + (*GetMasterConfigurationRequest)(nil), // 31: master_pb.GetMasterConfigurationRequest + (*GetMasterConfigurationResponse)(nil), // 32: master_pb.GetMasterConfigurationResponse + (*ListMasterClientsRequest)(nil), // 33: master_pb.ListMasterClientsRequest + (*ListMasterClientsResponse)(nil), // 34: master_pb.ListMasterClientsResponse + (*LeaseAdminTokenRequest)(nil), // 35: master_pb.LeaseAdminTokenRequest + (*LeaseAdminTokenResponse)(nil), // 36: master_pb.LeaseAdminTokenResponse + (*ReleaseAdminTokenRequest)(nil), // 37: master_pb.ReleaseAdminTokenRequest + (*ReleaseAdminTokenResponse)(nil), // 38: master_pb.ReleaseAdminTokenResponse + nil, // 39: master_pb.StorageBackend.PropertiesEntry + (*SuperBlockExtra_ErasureCoding)(nil), // 40: master_pb.SuperBlockExtra.ErasureCoding + (*LookupVolumeResponse_VolumeIdLocation)(nil), // 41: master_pb.LookupVolumeResponse.VolumeIdLocation + (*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 42: master_pb.LookupEcVolumeResponse.EcShardIdLocation +} +var file_master_proto_depIdxs = []int32{ + 2, // 0: master_pb.Heartbeat.volumes:type_name -> master_pb.VolumeInformationMessage + 3, // 1: master_pb.Heartbeat.new_volumes:type_name -> master_pb.VolumeShortInformationMessage + 3, // 2: master_pb.Heartbeat.deleted_volumes:type_name -> master_pb.VolumeShortInformationMessage + 4, // 3: master_pb.Heartbeat.ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage + 4, // 4: master_pb.Heartbeat.new_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage + 4, // 5: master_pb.Heartbeat.deleted_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage + 5, // 6: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend + 39, // 7: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry + 40, // 8: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding + 41, // 9: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation + 18, // 10: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection + 2, // 11: master_pb.DataNodeInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage + 4, // 12: master_pb.DataNodeInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage + 23, // 13: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo + 24, // 14: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo + 25, // 15: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo + 26, // 16: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo + 42, // 17: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation + 12, // 18: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location + 12, // 19: master_pb.LookupEcVolumeResponse.EcShardIdLocation.locations:type_name -> master_pb.Location + 0, // 20: master_pb.Seaweed.SendHeartbeat:input_type -> master_pb.Heartbeat + 8, // 21: master_pb.Seaweed.KeepConnected:input_type -> master_pb.KeepConnectedRequest + 10, // 22: master_pb.Seaweed.LookupVolume:input_type -> master_pb.LookupVolumeRequest + 13, // 23: master_pb.Seaweed.Assign:input_type -> master_pb.AssignRequest + 15, // 24: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest + 19, // 25: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest + 21, // 26: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest + 27, // 27: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest + 29, // 28: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest + 31, // 29: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest + 33, // 30: master_pb.Seaweed.ListMasterClients:input_type -> master_pb.ListMasterClientsRequest + 35, // 31: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest + 37, // 32: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest + 1, // 33: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse + 9, // 34: master_pb.Seaweed.KeepConnected:output_type -> master_pb.VolumeLocation + 11, // 35: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse + 14, // 36: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse + 16, // 37: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse + 20, // 38: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse + 22, // 39: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse + 28, // 40: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse + 30, // 41: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse + 32, // 42: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse + 34, // 43: master_pb.Seaweed.ListMasterClients:output_type -> master_pb.ListMasterClientsResponse + 36, // 44: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse + 38, // 45: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse + 33, // [33:46] is the sub-list for method output_type + 20, // [20:33] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } -func init() { - proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat") - proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse") - proto.RegisterType((*VolumeInformationMessage)(nil), "master_pb.VolumeInformationMessage") - proto.RegisterType((*Empty)(nil), "master_pb.Empty") - proto.RegisterType((*SuperBlockExtra)(nil), "master_pb.SuperBlockExtra") - proto.RegisterType((*SuperBlockExtra_ErasureCoding)(nil), "master_pb.SuperBlockExtra.ErasureCoding") - proto.RegisterType((*ClientListenRequest)(nil), "master_pb.ClientListenRequest") - proto.RegisterType((*VolumeLocation)(nil), "master_pb.VolumeLocation") - proto.RegisterType((*LookupVolumeRequest)(nil), "master_pb.LookupVolumeRequest") - proto.RegisterType((*LookupVolumeResponse)(nil), "master_pb.LookupVolumeResponse") - proto.RegisterType((*LookupVolumeResponse_VolumeIdLocation)(nil), "master_pb.LookupVolumeResponse.VolumeIdLocation") - proto.RegisterType((*Location)(nil), "master_pb.Location") - proto.RegisterType((*AssignRequest)(nil), "master_pb.AssignRequest") - proto.RegisterType((*AssignResponse)(nil), "master_pb.AssignResponse") - proto.RegisterType((*StatisticsRequest)(nil), "master_pb.StatisticsRequest") - proto.RegisterType((*StatisticsResponse)(nil), "master_pb.StatisticsResponse") +func init() { file_master_proto_init() } +func file_master_proto_init() { + if File_master_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_master_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Heartbeat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeartbeatResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeShortInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageBackend); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SuperBlockExtra); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataNodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RackInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataCenterInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopologyInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMasterConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMasterConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMasterClientsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMasterClientsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaseAdminTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaseAdminTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseAdminTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseAdminTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SuperBlockExtra_ErasureCoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse_VolumeIdLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeResponse_EcShardIdLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_master_proto_rawDesc, + NumEnums: 0, + NumMessages: 43, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_master_proto_goTypes, + DependencyIndexes: file_master_proto_depIdxs, + MessageInfos: file_master_proto_msgTypes, + }.Build() + File_master_proto = out.File + file_master_proto_rawDesc = nil + file_master_proto_goTypes = nil + file_master_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Seaweed service +const _ = grpc.SupportPackageIsVersion6 +// SeaweedClient is the client API for Seaweed service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type SeaweedClient interface { SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) + CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) + CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) + VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) + LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) + GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) + ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) + LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) + ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) } type seaweedClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewSeaweedClient(cc *grpc.ClientConn) SeaweedClient { +func NewSeaweedClient(cc grpc.ClientConnInterface) SeaweedClient { return &seaweedClient{cc} } func (c *seaweedClient) SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[0], c.cc, "/master_pb.Seaweed/SendHeartbeat", opts...) + stream, err := c.cc.NewStream(ctx, &_Seaweed_serviceDesc.Streams[0], "/master_pb.Seaweed/SendHeartbeat", opts...) if err != nil { return nil, err } @@ -752,7 +4018,7 @@ func (x *seaweedSendHeartbeatClient) Recv() (*HeartbeatResponse, error) { } func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[1], c.cc, "/master_pb.Seaweed/KeepConnected", opts...) + stream, err := c.cc.NewStream(ctx, &_Seaweed_serviceDesc.Streams[1], "/master_pb.Seaweed/KeepConnected", opts...) if err != nil { return nil, err } @@ -761,7 +4027,7 @@ func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOpti } type Seaweed_KeepConnectedClient interface { - Send(*ClientListenRequest) error + Send(*KeepConnectedRequest) error Recv() (*VolumeLocation, error) grpc.ClientStream } @@ -770,7 +4036,7 @@ type seaweedKeepConnectedClient struct { grpc.ClientStream } -func (x *seaweedKeepConnectedClient) Send(m *ClientListenRequest) error { +func (x *seaweedKeepConnectedClient) Send(m *KeepConnectedRequest) error { return x.ClientStream.SendMsg(m) } @@ -784,7 +4050,7 @@ func (x *seaweedKeepConnectedClient) Recv() (*VolumeLocation, error) { func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) { out := new(LookupVolumeResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, opts...) if err != nil { return nil, err } @@ -793,7 +4059,7 @@ func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeReques func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) { out := new(AssignResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, opts...) if err != nil { return nil, err } @@ -802,21 +4068,144 @@ func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...g func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) { out := new(StatisticsResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) { + out := new(CollectionListResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) { + out := new(CollectionDeleteResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) { + out := new(VolumeListResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) { + out := new(LookupEcVolumeResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupEcVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) { + out := new(GetMasterConfigurationResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) { + out := new(ListMasterClientsResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ListMasterClients", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) { + out := new(LeaseAdminTokenResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LeaseAdminToken", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for Seaweed service +func (c *seaweedClient) ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) { + out := new(ReleaseAdminTokenResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ReleaseAdminToken", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} +// SeaweedServer is the server API for Seaweed service. type SeaweedServer interface { SendHeartbeat(Seaweed_SendHeartbeatServer) error KeepConnected(Seaweed_KeepConnectedServer) error LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) Assign(context.Context, *AssignRequest) (*AssignResponse, error) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) + CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) + CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) + VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) + LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) + GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) + ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error) + LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error) + ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error) +} + +// UnimplementedSeaweedServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedServer struct { +} + +func (*UnimplementedSeaweedServer) SendHeartbeat(Seaweed_SendHeartbeatServer) error { + return status.Errorf(codes.Unimplemented, "method SendHeartbeat not implemented") +} +func (*UnimplementedSeaweedServer) KeepConnected(Seaweed_KeepConnectedServer) error { + return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented") +} +func (*UnimplementedSeaweedServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupVolume not implemented") +} +func (*UnimplementedSeaweedServer) Assign(context.Context, *AssignRequest) (*AssignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Assign not implemented") +} +func (*UnimplementedSeaweedServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented") +} +func (*UnimplementedSeaweedServer) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CollectionList not implemented") +} +func (*UnimplementedSeaweedServer) CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CollectionDelete not implemented") +} +func (*UnimplementedSeaweedServer) VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeList not implemented") +} +func (*UnimplementedSeaweedServer) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupEcVolume not implemented") +} +func (*UnimplementedSeaweedServer) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMasterConfiguration not implemented") +} +func (*UnimplementedSeaweedServer) ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListMasterClients not implemented") +} +func (*UnimplementedSeaweedServer) LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseAdminToken not implemented") +} +func (*UnimplementedSeaweedServer) ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReleaseAdminToken not implemented") } func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) { @@ -855,7 +4244,7 @@ func _Seaweed_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) e type Seaweed_KeepConnectedServer interface { Send(*VolumeLocation) error - Recv() (*ClientListenRequest, error) + Recv() (*KeepConnectedRequest, error) grpc.ServerStream } @@ -867,8 +4256,8 @@ func (x *seaweedKeepConnectedServer) Send(m *VolumeLocation) error { return x.ServerStream.SendMsg(m) } -func (x *seaweedKeepConnectedServer) Recv() (*ClientListenRequest, error) { - m := new(ClientListenRequest) +func (x *seaweedKeepConnectedServer) Recv() (*KeepConnectedRequest, error) { + m := new(KeepConnectedRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } @@ -929,6 +4318,150 @@ func _Seaweed_Statistics_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _Seaweed_CollectionList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CollectionListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).CollectionList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/CollectionList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).CollectionList(ctx, req.(*CollectionListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_CollectionDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CollectionDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).CollectionDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/CollectionDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).CollectionDelete(ctx, req.(*CollectionDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_VolumeList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).VolumeList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/VolumeList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).VolumeList(ctx, req.(*VolumeListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_LookupEcVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LookupEcVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).LookupEcVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/LookupEcVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).LookupEcVolume(ctx, req.(*LookupEcVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetMasterConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).GetMasterConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/GetMasterConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).GetMasterConfiguration(ctx, req.(*GetMasterConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_ListMasterClients_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMasterClientsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).ListMasterClients(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/ListMasterClients", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).ListMasterClients(ctx, req.(*ListMasterClientsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_LeaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseAdminTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).LeaseAdminToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/LeaseAdminToken", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).LeaseAdminToken(ctx, req.(*LeaseAdminTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_ReleaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReleaseAdminTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).ReleaseAdminToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/ReleaseAdminToken", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).ReleaseAdminToken(ctx, req.(*ReleaseAdminTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Seaweed_serviceDesc = grpc.ServiceDesc{ ServiceName: "master_pb.Seaweed", HandlerType: (*SeaweedServer)(nil), @@ -945,6 +4478,38 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ MethodName: "Statistics", Handler: _Seaweed_Statistics_Handler, }, + { + MethodName: "CollectionList", + Handler: _Seaweed_CollectionList_Handler, + }, + { + MethodName: "CollectionDelete", + Handler: _Seaweed_CollectionDelete_Handler, + }, + { + MethodName: "VolumeList", + Handler: _Seaweed_VolumeList_Handler, + }, + { + MethodName: "LookupEcVolume", + Handler: _Seaweed_LookupEcVolume_Handler, + }, + { + MethodName: "GetMasterConfiguration", + Handler: _Seaweed_GetMasterConfiguration_Handler, + }, + { + MethodName: "ListMasterClients", + Handler: _Seaweed_ListMasterClients_Handler, + }, + { + MethodName: "LeaseAdminToken", + Handler: _Seaweed_LeaseAdminToken_Handler, + }, + { + MethodName: "ReleaseAdminToken", + Handler: _Seaweed_ReleaseAdminToken_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -962,75 +4527,3 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ }, Metadata: "master.proto", } - -func init() { proto.RegisterFile("master.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1055 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x56, 0x4b, 0x6f, 0xe4, 0x44, - 0x10, 0x5e, 0x7b, 0x9e, 0xae, 0xd9, 0xc9, 0x4e, 0x3a, 0x11, 0xf2, 0xce, 0xbe, 0x06, 0x73, 0x19, - 0x04, 0x8a, 0x96, 0x70, 0x44, 0x08, 0xb1, 0xd1, 0x22, 0xa2, 0x04, 0x36, 0x38, 0xb0, 0x07, 0x2e, - 0xa6, 0x63, 0x57, 0xa2, 0x56, 0xfc, 0xa2, 0xbb, 0x27, 0x99, 0xd9, 0x0b, 0x47, 0xfe, 0x15, 0x17, - 0xb8, 0xf1, 0x53, 0xb8, 0xf1, 0x0b, 0x50, 0x3f, 0xec, 0xf1, 0x38, 0x09, 0x91, 0x90, 0xb8, 0xb5, - 0xbf, 0xae, 0xee, 0xaa, 0xfe, 0xbe, 0x7a, 0x18, 0x1e, 0x66, 0x54, 0x48, 0xe4, 0x7b, 0x25, 0x2f, - 0x64, 0x41, 0x3c, 0xf3, 0x15, 0x95, 0x67, 0xc1, 0x5f, 0x2e, 0x78, 0x5f, 0x23, 0xe5, 0xf2, 0x0c, - 0xa9, 0x24, 0x5b, 0xe0, 0xb2, 0xd2, 0x77, 0x66, 0xce, 0xdc, 0x0b, 0x5d, 0x56, 0x12, 0x02, 0xdd, - 0xb2, 0xe0, 0xd2, 0x77, 0x67, 0xce, 0x7c, 0x1c, 0xea, 0x35, 0x79, 0x06, 0x50, 0x2e, 0xce, 0x52, - 0x16, 0x47, 0x0b, 0x9e, 0xfa, 0x1d, 0x6d, 0xeb, 0x19, 0xe4, 0x07, 0x9e, 0x92, 0x39, 0x4c, 0x32, - 0xba, 0x8c, 0xae, 0x8a, 0x74, 0x91, 0x61, 0x14, 0x17, 0x8b, 0x5c, 0xfa, 0x5d, 0x7d, 0x7c, 0x2b, - 0xa3, 0xcb, 0xb7, 0x1a, 0x3e, 0x50, 0x28, 0x99, 0xa9, 0xa8, 0x96, 0xd1, 0x39, 0x4b, 0x31, 0xba, - 0xc4, 0x95, 0xdf, 0x9b, 0x39, 0xf3, 0x6e, 0x08, 0x19, 0x5d, 0x7e, 0xc5, 0x52, 0x3c, 0xc2, 0x15, - 0x79, 0x01, 0xa3, 0x84, 0x4a, 0x1a, 0xc5, 0x98, 0x4b, 0xe4, 0x7e, 0x5f, 0xfb, 0x02, 0x05, 0x1d, - 0x68, 0x44, 0xc5, 0xc7, 0x69, 0x7c, 0xe9, 0x0f, 0xf4, 0x8e, 0x5e, 0xab, 0xf8, 0x68, 0x92, 0xb1, - 0x3c, 0xd2, 0x91, 0x0f, 0xb5, 0x6b, 0x4f, 0x23, 0x27, 0x2a, 0xfc, 0xcf, 0x61, 0x60, 0x62, 0x13, - 0xbe, 0x37, 0xeb, 0xcc, 0x47, 0xfb, 0x1f, 0xec, 0xd5, 0x6c, 0xec, 0x99, 0xf0, 0x0e, 0xf3, 0xf3, - 0x82, 0x67, 0x54, 0xb2, 0x22, 0xff, 0x06, 0x85, 0xa0, 0x17, 0x18, 0x56, 0x67, 0xc8, 0x63, 0x18, - 0xe6, 0x78, 0x1d, 0x5d, 0xb1, 0x44, 0xf8, 0x30, 0xeb, 0xcc, 0xc7, 0xe1, 0x20, 0xc7, 0xeb, 0xb7, - 0x2c, 0x11, 0xe4, 0x7d, 0x78, 0x98, 0x60, 0x8a, 0x12, 0x13, 0xb3, 0x3d, 0xd2, 0xdb, 0x23, 0x8b, - 0x29, 0x93, 0x40, 0xc0, 0x76, 0x4d, 0x76, 0x88, 0xa2, 0x2c, 0x72, 0x81, 0x64, 0x0e, 0x8f, 0xcc, - 0xed, 0xa7, 0xec, 0x1d, 0x1e, 0xb3, 0x8c, 0x49, 0xad, 0x40, 0x37, 0x6c, 0xc3, 0xe4, 0x29, 0x78, - 0x02, 0x63, 0x8e, 0xf2, 0x08, 0x57, 0x5a, 0x13, 0x2f, 0x5c, 0x03, 0xe4, 0x3d, 0xe8, 0xa7, 0x48, - 0x13, 0xe4, 0x56, 0x14, 0xfb, 0x15, 0xfc, 0xe1, 0x82, 0x7f, 0xd7, 0xc3, 0xb4, 0xe2, 0x89, 0xf6, - 0x37, 0x0e, 0x5d, 0x96, 0x28, 0x46, 0x05, 0x7b, 0x87, 0xfa, 0xf6, 0x6e, 0xa8, 0xd7, 0xe4, 0x39, - 0x40, 0x5c, 0xa4, 0x29, 0xc6, 0xea, 0xa0, 0xbd, 0xbc, 0x81, 0x28, 0xc6, 0xb5, 0x88, 0x6b, 0xb1, - 0xbb, 0xa1, 0xa7, 0x10, 0xa3, 0x73, 0xcd, 0x8b, 0x35, 0x30, 0x3a, 0x5b, 0x5e, 0x8c, 0xc9, 0xc7, - 0x40, 0x2a, 0xea, 0xce, 0x56, 0xb5, 0x61, 0x5f, 0x1b, 0x4e, 0xec, 0xce, 0xab, 0x55, 0x65, 0xfd, - 0x04, 0x3c, 0x8e, 0x34, 0x89, 0x8a, 0x3c, 0x5d, 0x69, 0xe9, 0x87, 0xe1, 0x50, 0x01, 0x6f, 0xf2, - 0x74, 0x45, 0x3e, 0x82, 0x6d, 0x8e, 0x65, 0xca, 0x62, 0x1a, 0x95, 0x29, 0x8d, 0x31, 0xc3, 0xbc, - 0xca, 0x82, 0x89, 0xdd, 0x38, 0xa9, 0x70, 0xe2, 0xc3, 0xe0, 0x0a, 0xb9, 0x50, 0xcf, 0xf2, 0xb4, - 0x49, 0xf5, 0x49, 0x26, 0xd0, 0x91, 0x32, 0xf5, 0x41, 0xa3, 0x6a, 0x19, 0x0c, 0xa0, 0xf7, 0x3a, - 0x2b, 0xe5, 0x2a, 0xf8, 0xcd, 0x81, 0x47, 0xa7, 0x8b, 0x12, 0xf9, 0xab, 0xb4, 0x88, 0x2f, 0x5f, - 0x2f, 0x25, 0xa7, 0xe4, 0x0d, 0x6c, 0x21, 0xa7, 0x62, 0xc1, 0x55, 0xec, 0x09, 0xcb, 0x2f, 0x34, - 0xa5, 0xa3, 0xfd, 0x79, 0x23, 0xb9, 0x5a, 0x67, 0xf6, 0x5e, 0x9b, 0x03, 0x07, 0xda, 0x3e, 0x1c, - 0x63, 0xf3, 0x73, 0xfa, 0x23, 0x8c, 0x37, 0xf6, 0x95, 0x30, 0x2a, 0xf1, 0xad, 0x54, 0x7a, 0xad, - 0x14, 0x2f, 0x29, 0x67, 0x72, 0x65, 0x0b, 0xd4, 0x7e, 0x29, 0x41, 0x6c, 0xfd, 0xa9, 0x3c, 0xec, - 0xe8, 0x3c, 0xf4, 0x0c, 0x72, 0x98, 0x88, 0xe0, 0x43, 0xd8, 0x39, 0x48, 0x19, 0xe6, 0xf2, 0x98, - 0x09, 0x89, 0x79, 0x88, 0x3f, 0x2f, 0x50, 0x48, 0xe5, 0x21, 0xa7, 0x19, 0xda, 0xf2, 0xd7, 0xeb, - 0xe0, 0x17, 0xd8, 0x32, 0xa9, 0x73, 0x5c, 0xc4, 0x3a, 0x6f, 0x14, 0x31, 0xaa, 0xee, 0x8d, 0x91, - 0x5a, 0xb6, 0x1a, 0x82, 0xdb, 0x6e, 0x08, 0xcd, 0x8a, 0xe9, 0xfc, 0x7b, 0xc5, 0x74, 0x6f, 0x56, - 0xcc, 0xf7, 0xb0, 0x73, 0x5c, 0x14, 0x97, 0x8b, 0xd2, 0x84, 0x51, 0xc5, 0xba, 0xf9, 0x42, 0x67, - 0xd6, 0x51, 0x3e, 0xeb, 0x17, 0xb6, 0x32, 0xd6, 0x6d, 0x67, 0x6c, 0xf0, 0xb7, 0x03, 0xbb, 0x9b, - 0xd7, 0xda, 0x5a, 0xfc, 0x09, 0x76, 0xea, 0x7b, 0xa3, 0xd4, 0xbe, 0xd9, 0x38, 0x18, 0xed, 0xbf, - 0x6c, 0x88, 0x79, 0xdb, 0xe9, 0xaa, 0x7d, 0x24, 0x15, 0x59, 0xe1, 0xf6, 0x55, 0x0b, 0x11, 0xd3, - 0x25, 0x4c, 0xda, 0x66, 0x2a, 0xa1, 0x6b, 0xaf, 0x96, 0xd9, 0x61, 0x75, 0x92, 0x7c, 0x02, 0xde, - 0x3a, 0x10, 0x57, 0x07, 0xb2, 0xb3, 0x11, 0x88, 0xf5, 0xb5, 0xb6, 0x22, 0xbb, 0xd0, 0x43, 0xce, - 0x8b, 0xaa, 0x11, 0x98, 0x8f, 0xe0, 0x33, 0x18, 0xfe, 0x67, 0x15, 0x83, 0x3f, 0x1d, 0x18, 0x7f, - 0x29, 0x04, 0xbb, 0xa8, 0xd3, 0x65, 0x17, 0x7a, 0xa6, 0x4c, 0x4d, 0xb3, 0x32, 0x1f, 0x64, 0x06, - 0x23, 0x5b, 0x65, 0x0d, 0xea, 0x9b, 0xd0, 0xbd, 0xdd, 0xc4, 0x56, 0x5e, 0xd7, 0x84, 0x26, 0x65, - 0xda, 0x1e, 0x03, 0xbd, 0x3b, 0xc7, 0x40, 0xbf, 0x31, 0x06, 0x9e, 0x80, 0xa7, 0x0f, 0xe5, 0x45, - 0x82, 0x76, 0x3e, 0x0c, 0x15, 0xf0, 0x6d, 0x91, 0xe8, 0xb4, 0xae, 0x1e, 0x63, 0x85, 0x9f, 0x40, - 0xe7, 0xbc, 0x26, 0x5f, 0x2d, 0x2b, 0x8a, 0xdc, 0xbb, 0x28, 0xba, 0x31, 0xf9, 0x6a, 0x42, 0xba, - 0x4d, 0x42, 0x6a, 0x2d, 0x7a, 0x4d, 0x2d, 0x2e, 0x60, 0xfb, 0x54, 0x52, 0xc9, 0x84, 0x64, 0xb1, - 0xa8, 0x18, 0x6d, 0x71, 0xe7, 0xdc, 0xc7, 0x9d, 0x7b, 0x17, 0x77, 0x9d, 0x9a, 0xbb, 0xe0, 0x77, - 0x07, 0x48, 0xd3, 0x93, 0x7d, 0xee, 0xff, 0xe0, 0x4a, 0xd1, 0x23, 0x0b, 0x49, 0xd3, 0x48, 0x0f, - 0x10, 0x3b, 0x06, 0x34, 0xa2, 0x26, 0x98, 0x12, 0x64, 0x21, 0x30, 0x31, 0xbb, 0x66, 0x06, 0x0c, - 0x15, 0xa0, 0x37, 0x37, 0x47, 0x48, 0xbf, 0x35, 0x42, 0xf6, 0x7f, 0xed, 0xc0, 0xe0, 0x14, 0xe9, - 0x35, 0x62, 0x42, 0x0e, 0x61, 0x7c, 0x8a, 0x79, 0xb2, 0xfe, 0x69, 0xd9, 0x6d, 0x54, 0x43, 0x8d, - 0x4e, 0x9f, 0xde, 0x86, 0x56, 0xef, 0x0f, 0x1e, 0xcc, 0x9d, 0x97, 0x0e, 0x39, 0x81, 0xf1, 0x11, - 0x62, 0x79, 0x50, 0xe4, 0x39, 0xc6, 0x12, 0x13, 0xf2, 0xbc, 0x71, 0xe8, 0x96, 0x16, 0x39, 0x7d, - 0x7c, 0xe3, 0x5f, 0xa1, 0xaa, 0x28, 0x7b, 0xe3, 0x77, 0xf0, 0xb0, 0xd9, 0x19, 0x36, 0x2e, 0xbc, - 0xa5, 0x8f, 0x4d, 0x5f, 0xdc, 0xd3, 0x52, 0x82, 0x07, 0xe4, 0x0b, 0xe8, 0x9b, 0x5c, 0x25, 0x7e, - 0xc3, 0x78, 0xa3, 0x16, 0x37, 0xe2, 0xda, 0x4c, 0xec, 0xe0, 0x01, 0x39, 0x02, 0x58, 0x67, 0x00, - 0x69, 0xf2, 0x72, 0x23, 0x05, 0xa7, 0xcf, 0xee, 0xd8, 0xad, 0x2e, 0x3b, 0xeb, 0xeb, 0x3f, 0xc8, - 0x4f, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x9f, 0x0a, 0x25, 0x51, 0x0a, 0x00, 0x00, -} diff --git a/weed/pb/messaging.proto b/weed/pb/messaging.proto new file mode 100644 index 000000000..04446ad16 --- /dev/null +++ b/weed/pb/messaging.proto @@ -0,0 +1,135 @@ +syntax = "proto3"; + +package messaging_pb; + +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"; +option java_package = "seaweedfs.client"; +option java_outer_classname = "MessagingProto"; + +////////////////////////////////////////////////// + +service SeaweedMessaging { + + rpc Subscribe (stream SubscriberMessage) returns (stream BrokerMessage) { + } + + rpc Publish (stream PublishRequest) returns (stream PublishResponse) { + } + + rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) { + } + + rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) { + } + + rpc GetTopicConfiguration (GetTopicConfigurationRequest) returns (GetTopicConfigurationResponse) { + } + + rpc FindBroker (FindBrokerRequest) returns (FindBrokerResponse) { + } + +} + +////////////////////////////////////////////////// + +message SubscriberMessage { + message InitMessage { + string namespace = 1; + string topic = 2; + int32 partition = 3; + enum StartPosition { + LATEST = 0; // Start at the newest message + EARLIEST = 1; // Start at the oldest message + TIMESTAMP = 2; // Start after a specified timestamp, exclusive + } + StartPosition startPosition = 4; // Where to begin consuming from + int64 timestampNs = 5; // timestamp in nano seconds + string subscriber_id = 6; // uniquely identify a subscriber to track consumption + } + InitMessage init = 1; + message AckMessage { + int64 message_id = 1; + } + AckMessage ack = 2; + bool is_close = 3; +} + +message Message { + int64 event_time_ns = 1 [jstype = JS_STRING]; + bytes key = 2; // Message key + bytes value = 3; // Message payload + map headers = 4; // Message headers + bool is_close = 5; +} + +message BrokerMessage { + Message data = 1; +} + +message PublishRequest { + message InitMessage { + string namespace = 1; // only needed on the initial request + string topic = 2; // only needed on the initial request + int32 partition = 3; + } + InitMessage init = 1; + Message data = 2; +} + +message PublishResponse { + message ConfigMessage { + int32 partition_count = 1; + } + ConfigMessage config = 1; + message RedirectMessage { + string new_broker = 1; + } + RedirectMessage redirect = 2; + bool is_closed = 3; +} + +message DeleteTopicRequest { + string namespace = 1; + string topic = 2; +} +message DeleteTopicResponse { +} + +message ConfigureTopicRequest { + string namespace = 1; + string topic = 2; + TopicConfiguration configuration = 3; +} +message ConfigureTopicResponse { +} + +message GetTopicConfigurationRequest { + string namespace = 1; + string topic = 2; +} +message GetTopicConfigurationResponse { + TopicConfiguration configuration = 1; +} + +message FindBrokerRequest { + string namespace = 1; + string topic = 2; + int32 parition = 3; +} + +message FindBrokerResponse { + string broker = 1; +} + +message TopicConfiguration { + int32 partition_count = 1; + string collection = 2; + string replication = 3; + bool is_transient = 4; + enum Partitioning { + NonNullKeyHash = 0; // If not null, hash by key value. If null, round robin + KeyHash = 1; // hash by key value + RoundRobin = 2; // round robin pick one partition + } + Partitioning partitoning = 5; +} diff --git a/weed/pb/messaging_pb/messaging.pb.go b/weed/pb/messaging_pb/messaging.pb.go new file mode 100644 index 000000000..90b4b724a --- /dev/null +++ b/weed/pb/messaging_pb/messaging.pb.go @@ -0,0 +1,2053 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.24.0 +// protoc v3.12.3 +// source: messaging.proto + +package messaging_pb + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type SubscriberMessage_InitMessage_StartPosition int32 + +const ( + SubscriberMessage_InitMessage_LATEST SubscriberMessage_InitMessage_StartPosition = 0 // Start at the newest message + SubscriberMessage_InitMessage_EARLIEST SubscriberMessage_InitMessage_StartPosition = 1 // Start at the oldest message + SubscriberMessage_InitMessage_TIMESTAMP SubscriberMessage_InitMessage_StartPosition = 2 // Start after a specified timestamp, exclusive +) + +// Enum value maps for SubscriberMessage_InitMessage_StartPosition. +var ( + SubscriberMessage_InitMessage_StartPosition_name = map[int32]string{ + 0: "LATEST", + 1: "EARLIEST", + 2: "TIMESTAMP", + } + SubscriberMessage_InitMessage_StartPosition_value = map[string]int32{ + "LATEST": 0, + "EARLIEST": 1, + "TIMESTAMP": 2, + } +) + +func (x SubscriberMessage_InitMessage_StartPosition) Enum() *SubscriberMessage_InitMessage_StartPosition { + p := new(SubscriberMessage_InitMessage_StartPosition) + *p = x + return p +} + +func (x SubscriberMessage_InitMessage_StartPosition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubscriberMessage_InitMessage_StartPosition) Descriptor() protoreflect.EnumDescriptor { + return file_messaging_proto_enumTypes[0].Descriptor() +} + +func (SubscriberMessage_InitMessage_StartPosition) Type() protoreflect.EnumType { + return &file_messaging_proto_enumTypes[0] +} + +func (x SubscriberMessage_InitMessage_StartPosition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubscriberMessage_InitMessage_StartPosition.Descriptor instead. +func (SubscriberMessage_InitMessage_StartPosition) EnumDescriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 0, 0} +} + +type TopicConfiguration_Partitioning int32 + +const ( + TopicConfiguration_NonNullKeyHash TopicConfiguration_Partitioning = 0 // If not null, hash by key value. If null, round robin + TopicConfiguration_KeyHash TopicConfiguration_Partitioning = 1 // hash by key value + TopicConfiguration_RoundRobin TopicConfiguration_Partitioning = 2 // round robin pick one partition +) + +// Enum value maps for TopicConfiguration_Partitioning. +var ( + TopicConfiguration_Partitioning_name = map[int32]string{ + 0: "NonNullKeyHash", + 1: "KeyHash", + 2: "RoundRobin", + } + TopicConfiguration_Partitioning_value = map[string]int32{ + "NonNullKeyHash": 0, + "KeyHash": 1, + "RoundRobin": 2, + } +) + +func (x TopicConfiguration_Partitioning) Enum() *TopicConfiguration_Partitioning { + p := new(TopicConfiguration_Partitioning) + *p = x + return p +} + +func (x TopicConfiguration_Partitioning) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TopicConfiguration_Partitioning) Descriptor() protoreflect.EnumDescriptor { + return file_messaging_proto_enumTypes[1].Descriptor() +} + +func (TopicConfiguration_Partitioning) Type() protoreflect.EnumType { + return &file_messaging_proto_enumTypes[1] +} + +func (x TopicConfiguration_Partitioning) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TopicConfiguration_Partitioning.Descriptor instead. +func (TopicConfiguration_Partitioning) EnumDescriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{13, 0} +} + +type SubscriberMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Init *SubscriberMessage_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"` + Ack *SubscriberMessage_AckMessage `protobuf:"bytes,2,opt,name=ack,proto3" json:"ack,omitempty"` + IsClose bool `protobuf:"varint,3,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"` +} + +func (x *SubscriberMessage) Reset() { + *x = SubscriberMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage) ProtoMessage() {} + +func (x *SubscriberMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0} +} + +func (x *SubscriberMessage) GetInit() *SubscriberMessage_InitMessage { + if x != nil { + return x.Init + } + return nil +} + +func (x *SubscriberMessage) GetAck() *SubscriberMessage_AckMessage { + if x != nil { + return x.Ack + } + return nil +} + +func (x *SubscriberMessage) GetIsClose() bool { + if x != nil { + return x.IsClose + } + return false +} + +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EventTimeNs int64 `protobuf:"varint,1,opt,name=event_time_ns,json=eventTimeNs,proto3" json:"event_time_ns,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` // Message key + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` // Message payload + Headers map[string][]byte `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Message headers + IsClose bool `protobuf:"varint,5,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{1} +} + +func (x *Message) GetEventTimeNs() int64 { + if x != nil { + return x.EventTimeNs + } + return 0 +} + +func (x *Message) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *Message) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *Message) GetHeaders() map[string][]byte { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Message) GetIsClose() bool { + if x != nil { + return x.IsClose + } + return false +} + +type BrokerMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data *Message `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *BrokerMessage) Reset() { + *x = BrokerMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BrokerMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BrokerMessage) ProtoMessage() {} + +func (x *BrokerMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BrokerMessage.ProtoReflect.Descriptor instead. +func (*BrokerMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{2} +} + +func (x *BrokerMessage) GetData() *Message { + if x != nil { + return x.Data + } + return nil +} + +type PublishRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Init *PublishRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"` + Data *Message `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{3} +} + +func (x *PublishRequest) GetInit() *PublishRequest_InitMessage { + if x != nil { + return x.Init + } + return nil +} + +func (x *PublishRequest) GetData() *Message { + if x != nil { + return x.Data + } + return nil +} + +type PublishResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *PublishResponse_ConfigMessage `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Redirect *PublishResponse_RedirectMessage `protobuf:"bytes,2,opt,name=redirect,proto3" json:"redirect,omitempty"` + IsClosed bool `protobuf:"varint,3,opt,name=is_closed,json=isClosed,proto3" json:"is_closed,omitempty"` +} + +func (x *PublishResponse) Reset() { + *x = PublishResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse) ProtoMessage() {} + +func (x *PublishResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead. +func (*PublishResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4} +} + +func (x *PublishResponse) GetConfig() *PublishResponse_ConfigMessage { + if x != nil { + return x.Config + } + return nil +} + +func (x *PublishResponse) GetRedirect() *PublishResponse_RedirectMessage { + if x != nil { + return x.Redirect + } + return nil +} + +func (x *PublishResponse) GetIsClosed() bool { + if x != nil { + return x.IsClosed + } + return false +} + +type DeleteTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *DeleteTopicRequest) Reset() { + *x = DeleteTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTopicRequest) ProtoMessage() {} + +func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead. +func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteTopicRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *DeleteTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +type DeleteTopicResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteTopicResponse) Reset() { + *x = DeleteTopicResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteTopicResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTopicResponse) ProtoMessage() {} + +func (x *DeleteTopicResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTopicResponse.ProtoReflect.Descriptor instead. +func (*DeleteTopicResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{6} +} + +type ConfigureTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Configuration *TopicConfiguration `protobuf:"bytes,3,opt,name=configuration,proto3" json:"configuration,omitempty"` +} + +func (x *ConfigureTopicRequest) Reset() { + *x = ConfigureTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureTopicRequest) ProtoMessage() {} + +func (x *ConfigureTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureTopicRequest.ProtoReflect.Descriptor instead. +func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{7} +} + +func (x *ConfigureTopicRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ConfigureTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *ConfigureTopicRequest) GetConfiguration() *TopicConfiguration { + if x != nil { + return x.Configuration + } + return nil +} + +type ConfigureTopicResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ConfigureTopicResponse) Reset() { + *x = ConfigureTopicResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureTopicResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureTopicResponse) ProtoMessage() {} + +func (x *ConfigureTopicResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureTopicResponse.ProtoReflect.Descriptor instead. +func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{8} +} + +type GetTopicConfigurationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *GetTopicConfigurationRequest) Reset() { + *x = GetTopicConfigurationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTopicConfigurationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTopicConfigurationRequest) ProtoMessage() {} + +func (x *GetTopicConfigurationRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTopicConfigurationRequest.ProtoReflect.Descriptor instead. +func (*GetTopicConfigurationRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{9} +} + +func (x *GetTopicConfigurationRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *GetTopicConfigurationRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +type GetTopicConfigurationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Configuration *TopicConfiguration `protobuf:"bytes,1,opt,name=configuration,proto3" json:"configuration,omitempty"` +} + +func (x *GetTopicConfigurationResponse) Reset() { + *x = GetTopicConfigurationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTopicConfigurationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTopicConfigurationResponse) ProtoMessage() {} + +func (x *GetTopicConfigurationResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTopicConfigurationResponse.ProtoReflect.Descriptor instead. +func (*GetTopicConfigurationResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{10} +} + +func (x *GetTopicConfigurationResponse) GetConfiguration() *TopicConfiguration { + if x != nil { + return x.Configuration + } + return nil +} + +type FindBrokerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Parition int32 `protobuf:"varint,3,opt,name=parition,proto3" json:"parition,omitempty"` +} + +func (x *FindBrokerRequest) Reset() { + *x = FindBrokerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindBrokerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindBrokerRequest) ProtoMessage() {} + +func (x *FindBrokerRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindBrokerRequest.ProtoReflect.Descriptor instead. +func (*FindBrokerRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{11} +} + +func (x *FindBrokerRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *FindBrokerRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *FindBrokerRequest) GetParition() int32 { + if x != nil { + return x.Parition + } + return 0 +} + +type FindBrokerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Broker string `protobuf:"bytes,1,opt,name=broker,proto3" json:"broker,omitempty"` +} + +func (x *FindBrokerResponse) Reset() { + *x = FindBrokerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindBrokerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindBrokerResponse) ProtoMessage() {} + +func (x *FindBrokerResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindBrokerResponse.ProtoReflect.Descriptor instead. +func (*FindBrokerResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{12} +} + +func (x *FindBrokerResponse) GetBroker() string { + if x != nil { + return x.Broker + } + return "" +} + +type TopicConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + IsTransient bool `protobuf:"varint,4,opt,name=is_transient,json=isTransient,proto3" json:"is_transient,omitempty"` + Partitoning TopicConfiguration_Partitioning `protobuf:"varint,5,opt,name=partitoning,proto3,enum=messaging_pb.TopicConfiguration_Partitioning" json:"partitoning,omitempty"` +} + +func (x *TopicConfiguration) Reset() { + *x = TopicConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TopicConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TopicConfiguration) ProtoMessage() {} + +func (x *TopicConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TopicConfiguration.ProtoReflect.Descriptor instead. +func (*TopicConfiguration) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{13} +} + +func (x *TopicConfiguration) GetPartitionCount() int32 { + if x != nil { + return x.PartitionCount + } + return 0 +} + +func (x *TopicConfiguration) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *TopicConfiguration) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *TopicConfiguration) GetIsTransient() bool { + if x != nil { + return x.IsTransient + } + return false +} + +func (x *TopicConfiguration) GetPartitoning() TopicConfiguration_Partitioning { + if x != nil { + return x.Partitoning + } + return TopicConfiguration_NonNullKeyHash +} + +type SubscriberMessage_InitMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"` + StartPosition SubscriberMessage_InitMessage_StartPosition `protobuf:"varint,4,opt,name=startPosition,proto3,enum=messaging_pb.SubscriberMessage_InitMessage_StartPosition" json:"startPosition,omitempty"` // Where to begin consuming from + TimestampNs int64 `protobuf:"varint,5,opt,name=timestampNs,proto3" json:"timestampNs,omitempty"` // timestamp in nano seconds + SubscriberId string `protobuf:"bytes,6,opt,name=subscriber_id,json=subscriberId,proto3" json:"subscriber_id,omitempty"` // uniquely identify a subscriber to track consumption +} + +func (x *SubscriberMessage_InitMessage) Reset() { + *x = SubscriberMessage_InitMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage_InitMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage_InitMessage) ProtoMessage() {} + +func (x *SubscriberMessage_InitMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage_InitMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage_InitMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *SubscriberMessage_InitMessage) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *SubscriberMessage_InitMessage) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *SubscriberMessage_InitMessage) GetPartition() int32 { + if x != nil { + return x.Partition + } + return 0 +} + +func (x *SubscriberMessage_InitMessage) GetStartPosition() SubscriberMessage_InitMessage_StartPosition { + if x != nil { + return x.StartPosition + } + return SubscriberMessage_InitMessage_LATEST +} + +func (x *SubscriberMessage_InitMessage) GetTimestampNs() int64 { + if x != nil { + return x.TimestampNs + } + return 0 +} + +func (x *SubscriberMessage_InitMessage) GetSubscriberId() string { + if x != nil { + return x.SubscriberId + } + return "" +} + +type SubscriberMessage_AckMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` +} + +func (x *SubscriberMessage_AckMessage) Reset() { + *x = SubscriberMessage_AckMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage_AckMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage_AckMessage) ProtoMessage() {} + +func (x *SubscriberMessage_AckMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage_AckMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage_AckMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *SubscriberMessage_AckMessage) GetMessageId() int64 { + if x != nil { + return x.MessageId + } + return 0 +} + +type PublishRequest_InitMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` // only needed on the initial request + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` // only needed on the initial request + Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"` +} + +func (x *PublishRequest_InitMessage) Reset() { + *x = PublishRequest_InitMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest_InitMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest_InitMessage) ProtoMessage() {} + +func (x *PublishRequest_InitMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest_InitMessage.ProtoReflect.Descriptor instead. +func (*PublishRequest_InitMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *PublishRequest_InitMessage) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *PublishRequest_InitMessage) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *PublishRequest_InitMessage) GetPartition() int32 { + if x != nil { + return x.Partition + } + return 0 +} + +type PublishResponse_ConfigMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` +} + +func (x *PublishResponse_ConfigMessage) Reset() { + *x = PublishResponse_ConfigMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse_ConfigMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse_ConfigMessage) ProtoMessage() {} + +func (x *PublishResponse_ConfigMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse_ConfigMessage.ProtoReflect.Descriptor instead. +func (*PublishResponse_ConfigMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *PublishResponse_ConfigMessage) GetPartitionCount() int32 { + if x != nil { + return x.PartitionCount + } + return 0 +} + +type PublishResponse_RedirectMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker,proto3" json:"new_broker,omitempty"` +} + +func (x *PublishResponse_RedirectMessage) Reset() { + *x = PublishResponse_RedirectMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse_RedirectMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse_RedirectMessage) ProtoMessage() {} + +func (x *PublishResponse_RedirectMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse_RedirectMessage.ProtoReflect.Descriptor instead. +func (*PublishResponse_RedirectMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *PublishResponse_RedirectMessage) GetNewBroker() string { + if x != nil { + return x.NewBroker + } + return "" +} + +var File_messaging_proto protoreflect.FileDescriptor + +var file_messaging_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, + 0x9e, 0x04, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x3c, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x03, 0x61, 0x63, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a, + 0xc1, 0x02, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x5f, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4e, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x49, 0x64, 0x22, 0x38, 0x0a, 0x0d, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41, + 0x54, 0x45, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x41, 0x52, 0x4c, 0x49, 0x45, + 0x53, 0x54, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, + 0x50, 0x10, 0x02, 0x1a, 0x2b, 0x0a, 0x0a, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, + 0x22, 0xee, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0d, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x02, 0x30, 0x01, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, + 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, + 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x3a, 0x0a, 0x0d, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xda, 0x01, + 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3c, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x69, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x29, + 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5f, 0x0a, 0x0b, 0x49, 0x6e, 0x69, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x02, 0x0a, 0x0f, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x1a, 0x38, 0x0a, 0x0d, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x0f, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x77, 0x5f, + 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, + 0x77, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0x48, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x46, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x18, + 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x67, 0x0a, 0x1d, + 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x63, 0x0a, 0x11, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x12, 0x46, 0x69, + 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0xb4, 0x02, 0x0a, 0x12, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, + 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x69, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4f, 0x0a, + 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x3f, + 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12, + 0x0a, 0x0e, 0x4e, 0x6f, 0x6e, 0x4e, 0x75, 0x6c, 0x6c, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x10, 0x02, 0x32, + 0xad, 0x04, 0x0a, 0x10, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, + 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x1a, 0x1b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, + 0x12, 0x1c, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, + 0x01, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x12, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x23, 0x2e, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x24, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0a, + 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, + 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42, + 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x57, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x42, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, + 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_messaging_proto_rawDescOnce sync.Once + file_messaging_proto_rawDescData = file_messaging_proto_rawDesc +) + +func file_messaging_proto_rawDescGZIP() []byte { + file_messaging_proto_rawDescOnce.Do(func() { + file_messaging_proto_rawDescData = protoimpl.X.CompressGZIP(file_messaging_proto_rawDescData) + }) + return file_messaging_proto_rawDescData +} + +var file_messaging_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_messaging_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_messaging_proto_goTypes = []interface{}{ + (SubscriberMessage_InitMessage_StartPosition)(0), // 0: messaging_pb.SubscriberMessage.InitMessage.StartPosition + (TopicConfiguration_Partitioning)(0), // 1: messaging_pb.TopicConfiguration.Partitioning + (*SubscriberMessage)(nil), // 2: messaging_pb.SubscriberMessage + (*Message)(nil), // 3: messaging_pb.Message + (*BrokerMessage)(nil), // 4: messaging_pb.BrokerMessage + (*PublishRequest)(nil), // 5: messaging_pb.PublishRequest + (*PublishResponse)(nil), // 6: messaging_pb.PublishResponse + (*DeleteTopicRequest)(nil), // 7: messaging_pb.DeleteTopicRequest + (*DeleteTopicResponse)(nil), // 8: messaging_pb.DeleteTopicResponse + (*ConfigureTopicRequest)(nil), // 9: messaging_pb.ConfigureTopicRequest + (*ConfigureTopicResponse)(nil), // 10: messaging_pb.ConfigureTopicResponse + (*GetTopicConfigurationRequest)(nil), // 11: messaging_pb.GetTopicConfigurationRequest + (*GetTopicConfigurationResponse)(nil), // 12: messaging_pb.GetTopicConfigurationResponse + (*FindBrokerRequest)(nil), // 13: messaging_pb.FindBrokerRequest + (*FindBrokerResponse)(nil), // 14: messaging_pb.FindBrokerResponse + (*TopicConfiguration)(nil), // 15: messaging_pb.TopicConfiguration + (*SubscriberMessage_InitMessage)(nil), // 16: messaging_pb.SubscriberMessage.InitMessage + (*SubscriberMessage_AckMessage)(nil), // 17: messaging_pb.SubscriberMessage.AckMessage + nil, // 18: messaging_pb.Message.HeadersEntry + (*PublishRequest_InitMessage)(nil), // 19: messaging_pb.PublishRequest.InitMessage + (*PublishResponse_ConfigMessage)(nil), // 20: messaging_pb.PublishResponse.ConfigMessage + (*PublishResponse_RedirectMessage)(nil), // 21: messaging_pb.PublishResponse.RedirectMessage +} +var file_messaging_proto_depIdxs = []int32{ + 16, // 0: messaging_pb.SubscriberMessage.init:type_name -> messaging_pb.SubscriberMessage.InitMessage + 17, // 1: messaging_pb.SubscriberMessage.ack:type_name -> messaging_pb.SubscriberMessage.AckMessage + 18, // 2: messaging_pb.Message.headers:type_name -> messaging_pb.Message.HeadersEntry + 3, // 3: messaging_pb.BrokerMessage.data:type_name -> messaging_pb.Message + 19, // 4: messaging_pb.PublishRequest.init:type_name -> messaging_pb.PublishRequest.InitMessage + 3, // 5: messaging_pb.PublishRequest.data:type_name -> messaging_pb.Message + 20, // 6: messaging_pb.PublishResponse.config:type_name -> messaging_pb.PublishResponse.ConfigMessage + 21, // 7: messaging_pb.PublishResponse.redirect:type_name -> messaging_pb.PublishResponse.RedirectMessage + 15, // 8: messaging_pb.ConfigureTopicRequest.configuration:type_name -> messaging_pb.TopicConfiguration + 15, // 9: messaging_pb.GetTopicConfigurationResponse.configuration:type_name -> messaging_pb.TopicConfiguration + 1, // 10: messaging_pb.TopicConfiguration.partitoning:type_name -> messaging_pb.TopicConfiguration.Partitioning + 0, // 11: messaging_pb.SubscriberMessage.InitMessage.startPosition:type_name -> messaging_pb.SubscriberMessage.InitMessage.StartPosition + 2, // 12: messaging_pb.SeaweedMessaging.Subscribe:input_type -> messaging_pb.SubscriberMessage + 5, // 13: messaging_pb.SeaweedMessaging.Publish:input_type -> messaging_pb.PublishRequest + 7, // 14: messaging_pb.SeaweedMessaging.DeleteTopic:input_type -> messaging_pb.DeleteTopicRequest + 9, // 15: messaging_pb.SeaweedMessaging.ConfigureTopic:input_type -> messaging_pb.ConfigureTopicRequest + 11, // 16: messaging_pb.SeaweedMessaging.GetTopicConfiguration:input_type -> messaging_pb.GetTopicConfigurationRequest + 13, // 17: messaging_pb.SeaweedMessaging.FindBroker:input_type -> messaging_pb.FindBrokerRequest + 4, // 18: messaging_pb.SeaweedMessaging.Subscribe:output_type -> messaging_pb.BrokerMessage + 6, // 19: messaging_pb.SeaweedMessaging.Publish:output_type -> messaging_pb.PublishResponse + 8, // 20: messaging_pb.SeaweedMessaging.DeleteTopic:output_type -> messaging_pb.DeleteTopicResponse + 10, // 21: messaging_pb.SeaweedMessaging.ConfigureTopic:output_type -> messaging_pb.ConfigureTopicResponse + 12, // 22: messaging_pb.SeaweedMessaging.GetTopicConfiguration:output_type -> messaging_pb.GetTopicConfigurationResponse + 14, // 23: messaging_pb.SeaweedMessaging.FindBroker:output_type -> messaging_pb.FindBrokerResponse + 18, // [18:24] is the sub-list for method output_type + 12, // [12:18] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_messaging_proto_init() } +func file_messaging_proto_init() { + if File_messaging_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_messaging_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BrokerMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTopicResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureTopicResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopicConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopicConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindBrokerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindBrokerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopicConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage_InitMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage_AckMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest_InitMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse_ConfigMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse_RedirectMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_messaging_proto_rawDesc, + NumEnums: 2, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_messaging_proto_goTypes, + DependencyIndexes: file_messaging_proto_depIdxs, + EnumInfos: file_messaging_proto_enumTypes, + MessageInfos: file_messaging_proto_msgTypes, + }.Build() + File_messaging_proto = out.File + file_messaging_proto_rawDesc = nil + file_messaging_proto_goTypes = nil + file_messaging_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// SeaweedMessagingClient is the client API for SeaweedMessaging service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SeaweedMessagingClient interface { + Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) + Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) + DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) + ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) + GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) + FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) +} + +type seaweedMessagingClient struct { + cc grpc.ClientConnInterface +} + +func NewSeaweedMessagingClient(cc grpc.ClientConnInterface) SeaweedMessagingClient { + return &seaweedMessagingClient{cc} +} + +func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[0], "/messaging_pb.SeaweedMessaging/Subscribe", opts...) + if err != nil { + return nil, err + } + x := &seaweedMessagingSubscribeClient{stream} + return x, nil +} + +type SeaweedMessaging_SubscribeClient interface { + Send(*SubscriberMessage) error + Recv() (*BrokerMessage, error) + grpc.ClientStream +} + +type seaweedMessagingSubscribeClient struct { + grpc.ClientStream +} + +func (x *seaweedMessagingSubscribeClient) Send(m *SubscriberMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) { + m := new(BrokerMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[1], "/messaging_pb.SeaweedMessaging/Publish", opts...) + if err != nil { + return nil, err + } + x := &seaweedMessagingPublishClient{stream} + return x, nil +} + +type SeaweedMessaging_PublishClient interface { + Send(*PublishRequest) error + Recv() (*PublishResponse, error) + grpc.ClientStream +} + +type seaweedMessagingPublishClient struct { + grpc.ClientStream +} + +func (x *seaweedMessagingPublishClient) Send(m *PublishRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) { + m := new(PublishResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) { + out := new(DeleteTopicResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/DeleteTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) { + out := new(ConfigureTopicResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) { + out := new(GetTopicConfigurationResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) { + out := new(FindBrokerResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/FindBroker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SeaweedMessagingServer is the server API for SeaweedMessaging service. +type SeaweedMessagingServer interface { + Subscribe(SeaweedMessaging_SubscribeServer) error + Publish(SeaweedMessaging_PublishServer) error + DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) + ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) + GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) + FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error) +} + +// UnimplementedSeaweedMessagingServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedMessagingServer struct { +} + +func (*UnimplementedSeaweedMessagingServer) Subscribe(SeaweedMessaging_SubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") +} +func (*UnimplementedSeaweedMessagingServer) Publish(SeaweedMessaging_PublishServer) error { + return status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (*UnimplementedSeaweedMessagingServer) DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteTopic not implemented") +} +func (*UnimplementedSeaweedMessagingServer) ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConfigureTopic not implemented") +} +func (*UnimplementedSeaweedMessagingServer) GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTopicConfiguration not implemented") +} +func (*UnimplementedSeaweedMessagingServer) FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FindBroker not implemented") +} + +func RegisterSeaweedMessagingServer(s *grpc.Server, srv SeaweedMessagingServer) { + s.RegisterService(&_SeaweedMessaging_serviceDesc, srv) +} + +func _SeaweedMessaging_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedMessagingServer).Subscribe(&seaweedMessagingSubscribeServer{stream}) +} + +type SeaweedMessaging_SubscribeServer interface { + Send(*BrokerMessage) error + Recv() (*SubscriberMessage, error) + grpc.ServerStream +} + +type seaweedMessagingSubscribeServer struct { + grpc.ServerStream +} + +func (x *seaweedMessagingSubscribeServer) Send(m *BrokerMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedMessagingSubscribeServer) Recv() (*SubscriberMessage, error) { + m := new(SubscriberMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedMessaging_Publish_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedMessagingServer).Publish(&seaweedMessagingPublishServer{stream}) +} + +type SeaweedMessaging_PublishServer interface { + Send(*PublishResponse) error + Recv() (*PublishRequest, error) + grpc.ServerStream +} + +type seaweedMessagingPublishServer struct { + grpc.ServerStream +} + +func (x *seaweedMessagingPublishServer) Send(m *PublishResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedMessagingPublishServer) Recv() (*PublishRequest, error) { + m := new(PublishRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedMessaging_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).DeleteTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/DeleteTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigureTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTopicConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FindBrokerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).FindBroker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/FindBroker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).FindBroker(ctx, req.(*FindBrokerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SeaweedMessaging_serviceDesc = grpc.ServiceDesc{ + ServiceName: "messaging_pb.SeaweedMessaging", + HandlerType: (*SeaweedMessagingServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeleteTopic", + Handler: _SeaweedMessaging_DeleteTopic_Handler, + }, + { + MethodName: "ConfigureTopic", + Handler: _SeaweedMessaging_ConfigureTopic_Handler, + }, + { + MethodName: "GetTopicConfiguration", + Handler: _SeaweedMessaging_GetTopicConfiguration_Handler, + }, + { + MethodName: "FindBroker", + Handler: _SeaweedMessaging_FindBroker_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _SeaweedMessaging_Subscribe_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Publish", + Handler: _SeaweedMessaging_Publish_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "messaging.proto", +} diff --git a/weed/pb/proto_read_write_test.go b/weed/pb/proto_read_write_test.go new file mode 100644 index 000000000..7f6444ab5 --- /dev/null +++ b/weed/pb/proto_read_write_test.go @@ -0,0 +1,43 @@ +package pb + +import ( + "fmt" + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/golang/protobuf/jsonpb" +) + +func TestJsonpMarshalUnmarshal(t *testing.T) { + + tv := &volume_server_pb.RemoteFile{ + BackendType: "aws", + BackendId: "", + FileSize: 12, + } + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + if text, err := m.MarshalToString(tv); err != nil { + fmt.Printf("marshal eror: %v\n", err) + } else { + fmt.Printf("marshalled: %s\n", text) + } + + rawJson := `{ + "backendType":"aws", + "backendId":"temp", + "FileSize":12 + }` + + tv1 := &volume_server_pb.RemoteFile{} + if err := jsonpb.UnmarshalString(rawJson, tv1); err != nil { + fmt.Printf("unmarshal error: %v\n", err) + } + + fmt.Printf("unmarshalled: %+v\n", tv1) + +} diff --git a/weed/pb/shared_values.go b/weed/pb/shared_values.go new file mode 100644 index 000000000..1af19e51a --- /dev/null +++ b/weed/pb/shared_values.go @@ -0,0 +1,5 @@ +package pb + +const ( + AdminShellClient = "adminShell" +) diff --git a/weed/pb/volume_info.go b/weed/pb/volume_info.go new file mode 100644 index 000000000..c4f733f5c --- /dev/null +++ b/weed/pb/volume_info.go @@ -0,0 +1,76 @@ +package pb + +import ( + "bytes" + "fmt" + "io/ioutil" + + _ "github.com/chrislusf/seaweedfs/weed/storage/backend/s3_backend" + "github.com/chrislusf/seaweedfs/weed/util" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" +) + +// MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil +func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool, error) { + + volumeInfo := &volume_server_pb.VolumeInfo{} + + glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName) + if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead { + if !exists { + return volumeInfo, false, nil + } + if !canRead { + glog.Warningf("can not read %s", fileName) + return volumeInfo, false, fmt.Errorf("can not read %s", fileName) + } + return volumeInfo, false, nil + } + + glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) + tierData, readErr := ioutil.ReadFile(fileName) + if readErr != nil { + glog.Warningf("fail to read %s : %v", fileName, readErr) + return volumeInfo, false, fmt.Errorf("fail to read %s : %v", fileName, readErr) + } + + glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) + if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { + glog.Warningf("unmarshal error: %v", err) + return volumeInfo, false, fmt.Errorf("unmarshal error: %v", err) + } + + if len(volumeInfo.GetFiles()) == 0 { + return volumeInfo, false, nil + } + + return volumeInfo, true, nil +} + +func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) error { + + if exists, _, canWrite, _, _ := util.CheckFile(fileName); exists && !canWrite { + return fmt.Errorf("%s not writable", fileName) + } + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, marshalErr := m.MarshalToString(volumeInfo) + if marshalErr != nil { + return fmt.Errorf("marshal to %s: %v", fileName, marshalErr) + } + + writeErr := ioutil.WriteFile(fileName, []byte(text), 0755) + if writeErr != nil { + return fmt.Errorf("fail to write %s : %v", fileName, writeErr) + } + + return nil +} diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 8ab67a1bf..c9727e8d3 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -1,6 +1,7 @@ syntax = "proto3"; package volume_server_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"; ////////////////////////////////////////////////// @@ -8,6 +9,7 @@ service VolumeServer { //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) { } + rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) { } rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (VacuumVolumeCompactResponse) { @@ -19,14 +21,12 @@ service VolumeServer { rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) { } - rpc AssignVolume (AssignVolumeRequest) returns (AssignVolumeResponse) { + rpc AllocateVolume (AllocateVolumeRequest) returns (AllocateVolumeResponse) { } rpc VolumeSyncStatus (VolumeSyncStatusRequest) returns (VolumeSyncStatusResponse) { } - rpc VolumeSyncIndex (VolumeSyncIndexRequest) returns (stream VolumeSyncIndexResponse) { - } - rpc VolumeSyncData (VolumeSyncDataRequest) returns (stream VolumeSyncDataResponse) { + rpc VolumeIncrementalCopy (VolumeIncrementalCopyRequest) returns (stream VolumeIncrementalCopyResponse) { } rpc VolumeMount (VolumeMountRequest) returns (VolumeMountResponse) { @@ -35,8 +35,56 @@ service VolumeServer { } rpc VolumeDelete (VolumeDeleteRequest) returns (VolumeDeleteResponse) { } + rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) { + } + rpc VolumeConfigure (VolumeConfigureRequest) returns (VolumeConfigureResponse) { + } + + // copy the .idx .dat files, and mount this volume + rpc VolumeCopy (VolumeCopyRequest) returns (VolumeCopyResponse) { + } + rpc ReadVolumeFileStatus (ReadVolumeFileStatusRequest) returns (ReadVolumeFileStatusResponse) { + } + rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) { + } + + rpc VolumeTailSender (VolumeTailSenderRequest) returns (stream VolumeTailSenderResponse) { + } + rpc VolumeTailReceiver (VolumeTailReceiverRequest) returns (VolumeTailReceiverResponse) { + } - // rpc VolumeUiPage (VolumeUiPageRequest) returns (VolumeUiPageResponse) {} + // erasure coding + rpc VolumeEcShardsGenerate (VolumeEcShardsGenerateRequest) returns (VolumeEcShardsGenerateResponse) { + } + rpc VolumeEcShardsRebuild (VolumeEcShardsRebuildRequest) returns (VolumeEcShardsRebuildResponse) { + } + rpc VolumeEcShardsCopy (VolumeEcShardsCopyRequest) returns (VolumeEcShardsCopyResponse) { + } + rpc VolumeEcShardsDelete (VolumeEcShardsDeleteRequest) returns (VolumeEcShardsDeleteResponse) { + } + rpc VolumeEcShardsMount (VolumeEcShardsMountRequest) returns (VolumeEcShardsMountResponse) { + } + rpc VolumeEcShardsUnmount (VolumeEcShardsUnmountRequest) returns (VolumeEcShardsUnmountResponse) { + } + rpc VolumeEcShardRead (VolumeEcShardReadRequest) returns (stream VolumeEcShardReadResponse) { + } + rpc VolumeEcBlobDelete (VolumeEcBlobDeleteRequest) returns (VolumeEcBlobDeleteResponse) { + } + rpc VolumeEcShardsToVolume (VolumeEcShardsToVolumeRequest) returns (VolumeEcShardsToVolumeResponse) { + } + + // tiered storage + rpc VolumeTierMoveDatToRemote (VolumeTierMoveDatToRemoteRequest) returns (stream VolumeTierMoveDatToRemoteResponse) { + } + rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) { + } + + rpc VolumeServerStatus (VolumeServerStatusRequest) returns (VolumeServerStatusResponse) { + } + + // query + rpc Query (QueryRequest) returns (stream QueriedStripe) { + } } @@ -44,6 +92,7 @@ service VolumeServer { message BatchDeleteRequest { repeated string file_ids = 1; + bool skip_cookie_check = 2; } message BatchDeleteResponse { @@ -54,33 +103,35 @@ message DeleteResult { int32 status = 2; string error = 3; uint32 size = 4; + uint32 version = 5; } message Empty { } message VacuumVolumeCheckRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VacuumVolumeCheckResponse { double garbage_ratio = 1; } message VacuumVolumeCompactRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; int64 preallocate = 2; } message VacuumVolumeCompactResponse { } message VacuumVolumeCommitRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VacuumVolumeCommitResponse { + bool is_read_only = 1; } message VacuumVolumeCleanupRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VacuumVolumeCleanupResponse { } @@ -91,21 +142,22 @@ message DeleteCollectionRequest { message DeleteCollectionResponse { } -message AssignVolumeRequest { - uint32 volumd_id = 1; +message AllocateVolumeRequest { + uint32 volume_id = 1; string collection = 2; int64 preallocate = 3; string replication = 4; string ttl = 5; + uint32 memory_map_max_size_mb = 6; } -message AssignVolumeResponse { +message AllocateVolumeResponse { } message VolumeSyncStatusRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeSyncStatusResponse { - uint32 volumd_id = 1; + uint32 volume_id = 1; string collection = 2; string replication = 4; string ttl = 5; @@ -114,45 +166,180 @@ message VolumeSyncStatusResponse { uint64 idx_file_size = 8; } -message VolumeSyncIndexRequest { - uint32 volumd_id = 1; +message VolumeIncrementalCopyRequest { + uint32 volume_id = 1; + uint64 since_ns = 2; } -message VolumeSyncIndexResponse { - bytes index_file_content = 1; -} - -message VolumeSyncDataRequest { - uint32 volumd_id = 1; - uint32 revision = 2; - uint32 offset = 3; - uint32 size = 4; - string needle_id = 5; -} -message VolumeSyncDataResponse { +message VolumeIncrementalCopyResponse { bytes file_content = 1; } message VolumeMountRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeMountResponse { } message VolumeUnmountRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeUnmountResponse { } message VolumeDeleteRequest { - uint32 volumd_id = 1; + uint32 volume_id = 1; } message VolumeDeleteResponse { } -message VolumeUiPageRequest { +message VolumeMarkReadonlyRequest { + uint32 volume_id = 1; +} +message VolumeMarkReadonlyResponse { +} + +message VolumeConfigureRequest { + uint32 volume_id = 1; + string replication = 2; +} +message VolumeConfigureResponse { + string error = 1; +} + +message VolumeCopyRequest { + uint32 volume_id = 1; + string collection = 2; + string replication = 3; + string ttl = 4; + string source_data_node = 5; +} +message VolumeCopyResponse { + uint64 last_append_at_ns = 1; +} + +message CopyFileRequest { + uint32 volume_id = 1; + string ext = 2; + uint32 compaction_revision = 3; + uint64 stop_offset = 4; + string collection = 5; + bool is_ec_volume = 6; + bool ignore_source_file_not_found = 7; +} +message CopyFileResponse { + bytes file_content = 1; +} + +message VolumeTailSenderRequest { + uint32 volume_id = 1; + uint64 since_ns = 2; + uint32 idle_timeout_seconds = 3; +} +message VolumeTailSenderResponse { + bytes needle_header = 1; + bytes needle_body = 2; + bool is_last_chunk = 3; +} + +message VolumeTailReceiverRequest { + uint32 volume_id = 1; + uint64 since_ns = 2; + uint32 idle_timeout_seconds = 3; + string source_volume_server = 4; +} +message VolumeTailReceiverResponse { +} + +message VolumeEcShardsGenerateRequest { + uint32 volume_id = 1; + string collection = 2; +} +message VolumeEcShardsGenerateResponse { +} + +message VolumeEcShardsRebuildRequest { + uint32 volume_id = 1; + string collection = 2; +} +message VolumeEcShardsRebuildResponse { + repeated uint32 rebuilt_shard_ids = 1; +} + +message VolumeEcShardsCopyRequest { + uint32 volume_id = 1; + string collection = 2; + repeated uint32 shard_ids = 3; + bool copy_ecx_file = 4; + string source_data_node = 5; + bool copy_ecj_file = 6; + bool copy_vif_file = 7; +} +message VolumeEcShardsCopyResponse { +} + +message VolumeEcShardsDeleteRequest { + uint32 volume_id = 1; + string collection = 2; + repeated uint32 shard_ids = 3; +} +message VolumeEcShardsDeleteResponse { +} + +message VolumeEcShardsMountRequest { + uint32 volume_id = 1; + string collection = 2; + repeated uint32 shard_ids = 3; +} +message VolumeEcShardsMountResponse { +} + +message VolumeEcShardsUnmountRequest { + uint32 volume_id = 1; + repeated uint32 shard_ids = 3; +} +message VolumeEcShardsUnmountResponse { +} + +message VolumeEcShardReadRequest { + uint32 volume_id = 1; + uint32 shard_id = 2; + int64 offset = 3; + int64 size = 4; + uint64 file_key = 5; +} +message VolumeEcShardReadResponse { + bytes data = 1; + bool is_deleted = 2; +} + +message VolumeEcBlobDeleteRequest { + uint32 volume_id = 1; + string collection = 2; + uint64 file_key = 3; + uint32 version = 4; +} +message VolumeEcBlobDeleteResponse { +} + +message VolumeEcShardsToVolumeRequest { + uint32 volume_id = 1; + string collection = 2; } -message VolumeUiPageResponse { +message VolumeEcShardsToVolumeResponse { +} + +message ReadVolumeFileStatusRequest { + uint32 volume_id = 1; +} +message ReadVolumeFileStatusResponse { + uint32 volume_id = 1; + uint64 idx_file_timestamp_seconds = 2; + uint64 idx_file_size = 3; + uint64 dat_file_timestamp_seconds = 4; + uint64 dat_file_size = 5; + uint64 file_count = 6; + uint32 compaction_revision = 7; + string collection = 8; } message DiskStatus { @@ -160,6 +347,8 @@ message DiskStatus { uint64 all = 2; uint64 used = 3; uint64 free = 4; + float percent_free = 5; + float percent_used = 6; } message MemStatus { @@ -171,3 +360,106 @@ message MemStatus { uint64 heap = 6; uint64 stack = 7; } + +// tired storage on volume servers +message RemoteFile { + string backend_type = 1; + string backend_id = 2; + string key = 3; + uint64 offset = 4; + uint64 file_size = 5; + uint64 modified_time = 6; + string extension = 7; +} +message VolumeInfo { + repeated RemoteFile files = 1; + uint32 version = 2; + string replication = 3; +} + +message VolumeTierMoveDatToRemoteRequest { + uint32 volume_id = 1; + string collection = 2; + string destination_backend_name = 3; + bool keep_local_dat_file = 4; +} +message VolumeTierMoveDatToRemoteResponse { + int64 processed = 1; + float processedPercentage = 2; +} + +message VolumeTierMoveDatFromRemoteRequest { + uint32 volume_id = 1; + string collection = 2; + bool keep_remote_dat_file = 3; +} +message VolumeTierMoveDatFromRemoteResponse { + int64 processed = 1; + float processedPercentage = 2; +} + +message VolumeServerStatusRequest { + +} +message VolumeServerStatusResponse { + repeated DiskStatus disk_statuses = 1; + MemStatus memory_status = 2; +} + +// select on volume servers +message QueryRequest { + repeated string selections = 1; + repeated string from_file_ids = 2; + message Filter { + string field = 1; + string operand = 2; + string value = 3; + } + Filter filter = 3; + + message InputSerialization { + // NONE | GZIP | BZIP2 + string compression_type = 1; + message CSVInput { + string file_header_info = 1; // Valid values: NONE | USE | IGNORE + string record_delimiter = 2; // Default: \n + string field_delimiter = 3; // Default: , + string quote_charactoer = 4; // Default: " + string quote_escape_character = 5; // Default: " + string comments = 6; // Default: # + // If true, records might contain record delimiters within quote characters + bool allow_quoted_record_delimiter = 7; // default False. + } + message JSONInput { + string type = 1; // Valid values: DOCUMENT | LINES + } + message ParquetInput { + } + + CSVInput csv_input = 2; + JSONInput json_input = 3; + ParquetInput parquet_input = 4; + } + InputSerialization input_serialization = 4; + + message OutputSerialization { + message CSVOutput { + string quote_fields = 1; // Valid values: ALWAYS | ASNEEDED + string record_delimiter = 2; // Default: \n + string field_delimiter = 3; // Default: , + string quote_charactoer = 4; // Default: " + string quote_escape_character = 5; // Default: " + } + message JSONOutput { + string record_delimiter = 1; + } + + CSVOutput csv_output = 2; + JSONOutput json_output = 3; + } + + OutputSerialization output_serialization = 5; +} +message QueriedStripe { + bytes records = 1; +} diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index fa700e2e5..85d248258 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1,1205 +1,7573 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.24.0 +// protoc v3.12.3 // source: volume_server.proto -// DO NOT EDIT! - -/* -Package volume_server_pb is a generated protocol buffer package. - -It is generated from these files: - volume_server.proto - -It has these top-level messages: - BatchDeleteRequest - BatchDeleteResponse - DeleteResult - Empty - VacuumVolumeCheckRequest - VacuumVolumeCheckResponse - VacuumVolumeCompactRequest - VacuumVolumeCompactResponse - VacuumVolumeCommitRequest - VacuumVolumeCommitResponse - VacuumVolumeCleanupRequest - VacuumVolumeCleanupResponse - DeleteCollectionRequest - DeleteCollectionResponse - AssignVolumeRequest - AssignVolumeResponse - VolumeSyncStatusRequest - VolumeSyncStatusResponse - VolumeSyncIndexRequest - VolumeSyncIndexResponse - VolumeSyncDataRequest - VolumeSyncDataResponse - VolumeMountRequest - VolumeMountResponse - VolumeUnmountRequest - VolumeUnmountResponse - VolumeDeleteRequest - VolumeDeleteResponse - VolumeUiPageRequest - VolumeUiPageResponse - DiskStatus - MemStatus -*/ -package volume_server_pb -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package volume_server_pb import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type BatchDeleteRequest struct { - FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds" json:"file_ids,omitempty"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *BatchDeleteRequest) Reset() { *m = BatchDeleteRequest{} } -func (m *BatchDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*BatchDeleteRequest) ProtoMessage() {} -func (*BatchDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds,proto3" json:"file_ids,omitempty"` + SkipCookieCheck bool `protobuf:"varint,2,opt,name=skip_cookie_check,json=skipCookieCheck,proto3" json:"skip_cookie_check,omitempty"` +} -func (m *BatchDeleteRequest) GetFileIds() []string { - if m != nil { - return m.FileIds +func (x *BatchDeleteRequest) Reset() { + *x = BatchDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type BatchDeleteResponse struct { - Results []*DeleteResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +func (x *BatchDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BatchDeleteResponse) Reset() { *m = BatchDeleteResponse{} } -func (m *BatchDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*BatchDeleteResponse) ProtoMessage() {} -func (*BatchDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*BatchDeleteRequest) ProtoMessage() {} -func (m *BatchDeleteResponse) GetResults() []*DeleteResult { - if m != nil { - return m.Results +func (x *BatchDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -type DeleteResult struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Status int32 `protobuf:"varint,2,opt,name=status" json:"status,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"` - Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` +// Deprecated: Use BatchDeleteRequest.ProtoReflect.Descriptor instead. +func (*BatchDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{0} } -func (m *DeleteResult) Reset() { *m = DeleteResult{} } -func (m *DeleteResult) String() string { return proto.CompactTextString(m) } -func (*DeleteResult) ProtoMessage() {} -func (*DeleteResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *DeleteResult) GetFileId() string { - if m != nil { - return m.FileId +func (x *BatchDeleteRequest) GetFileIds() []string { + if x != nil { + return x.FileIds } - return "" + return nil } -func (m *DeleteResult) GetStatus() int32 { - if m != nil { - return m.Status +func (x *BatchDeleteRequest) GetSkipCookieCheck() bool { + if x != nil { + return x.SkipCookieCheck } - return 0 + return false } -func (m *DeleteResult) GetError() string { - if m != nil { - return m.Error - } - return "" +type BatchDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []*DeleteResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` } -func (m *DeleteResult) GetSize() uint32 { - if m != nil { - return m.Size +func (x *BatchDeleteResponse) Reset() { + *x = BatchDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type Empty struct { +func (x *BatchDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*BatchDeleteResponse) ProtoMessage() {} -type VacuumVolumeCheckRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` +func (x *BatchDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VacuumVolumeCheckRequest) Reset() { *m = VacuumVolumeCheckRequest{} } -func (m *VacuumVolumeCheckRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCheckRequest) ProtoMessage() {} -func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +// Deprecated: Use BatchDeleteResponse.ProtoReflect.Descriptor instead. +func (*BatchDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{1} +} -func (m *VacuumVolumeCheckRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *BatchDeleteResponse) GetResults() []*DeleteResult { + if x != nil { + return x.Results } - return 0 + return nil } -type VacuumVolumeCheckResponse struct { - GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio" json:"garbage_ratio,omitempty"` +type DeleteResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` + Status int32 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` + Size uint32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + Version uint32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` } -func (m *VacuumVolumeCheckResponse) Reset() { *m = VacuumVolumeCheckResponse{} } -func (m *VacuumVolumeCheckResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCheckResponse) ProtoMessage() {} -func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { - if m != nil { - return m.GarbageRatio +func (x *DeleteResult) Reset() { + *x = DeleteResult{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type VacuumVolumeCompactRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` - Preallocate int64 `protobuf:"varint,2,opt,name=preallocate" json:"preallocate,omitempty"` +func (x *DeleteResult) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VacuumVolumeCompactRequest) Reset() { *m = VacuumVolumeCompactRequest{} } -func (m *VacuumVolumeCompactRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCompactRequest) ProtoMessage() {} -func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*DeleteResult) ProtoMessage() {} -func (m *VacuumVolumeCompactRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *DeleteResult) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *VacuumVolumeCompactRequest) GetPreallocate() int64 { - if m != nil { - return m.Preallocate - } - return 0 +// Deprecated: Use DeleteResult.ProtoReflect.Descriptor instead. +func (*DeleteResult) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{2} } -type VacuumVolumeCompactResponse struct { +func (x *DeleteResult) GetFileId() string { + if x != nil { + return x.FileId + } + return "" } -func (m *VacuumVolumeCompactResponse) Reset() { *m = VacuumVolumeCompactResponse{} } -func (m *VacuumVolumeCompactResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCompactResponse) ProtoMessage() {} -func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (x *DeleteResult) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} -type VacuumVolumeCommitRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` +func (x *DeleteResult) GetError() string { + if x != nil { + return x.Error + } + return "" } -func (m *VacuumVolumeCommitRequest) Reset() { *m = VacuumVolumeCommitRequest{} } -func (m *VacuumVolumeCommitRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCommitRequest) ProtoMessage() {} -func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (x *DeleteResult) GetSize() uint32 { + if x != nil { + return x.Size + } + return 0 +} -func (m *VacuumVolumeCommitRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *DeleteResult) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } -type VacuumVolumeCommitResponse struct { +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VacuumVolumeCommitResponse) Reset() { *m = VacuumVolumeCommitResponse{} } -func (m *VacuumVolumeCommitResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCommitResponse) ProtoMessage() {} -func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type VacuumVolumeCleanupRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VacuumVolumeCleanupRequest) Reset() { *m = VacuumVolumeCleanupRequest{} } -func (m *VacuumVolumeCleanupRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCleanupRequest) ProtoMessage() {} -func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*Empty) ProtoMessage() {} -func (m *VacuumVolumeCleanupRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -type VacuumVolumeCleanupResponse struct { +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{3} } -func (m *VacuumVolumeCleanupResponse) Reset() { *m = VacuumVolumeCleanupResponse{} } -func (m *VacuumVolumeCleanupResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCleanupResponse) ProtoMessage() {} -func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +type VacuumVolumeCheckRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type DeleteCollectionRequest struct { - Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } -func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } -func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *DeleteCollectionRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VacuumVolumeCheckRequest) Reset() { + *x = VacuumVolumeCheckRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" -} - -type DeleteCollectionResponse struct { } -func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } -func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -type AssignVolumeRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Preallocate int64 `protobuf:"varint,3,opt,name=preallocate" json:"preallocate,omitempty"` - Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` +func (x *VacuumVolumeCheckRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } -func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeRequest) ProtoMessage() {} -func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*VacuumVolumeCheckRequest) ProtoMessage() {} -func (m *AssignVolumeRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *VacuumVolumeCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *AssignVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +// Deprecated: Use VacuumVolumeCheckRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{4} } -func (m *AssignVolumeRequest) GetPreallocate() int64 { - if m != nil { - return m.Preallocate +func (x *VacuumVolumeCheckRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *AssignVolumeRequest) GetReplication() string { - if m != nil { - return m.Replication - } - return "" +type VacuumVolumeCheckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio,proto3" json:"garbage_ratio,omitempty"` } -func (m *AssignVolumeRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *VacuumVolumeCheckResponse) Reset() { + *x = VacuumVolumeCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type AssignVolumeResponse struct { +func (x *VacuumVolumeCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } -func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeResponse) ProtoMessage() {} -func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*VacuumVolumeCheckResponse) ProtoMessage() {} -type VolumeSyncStatusRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` +func (x *VacuumVolumeCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeSyncStatusRequest) Reset() { *m = VolumeSyncStatusRequest{} } -func (m *VolumeSyncStatusRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncStatusRequest) ProtoMessage() {} -func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +// Deprecated: Use VacuumVolumeCheckResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{5} +} -func (m *VolumeSyncStatusRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { + if x != nil { + return x.GarbageRatio } return 0 } -type VolumeSyncStatusResponse struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` - TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset" json:"tail_offset,omitempty"` - CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` - IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"` -} +type VacuumVolumeCompactRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *VolumeSyncStatusResponse) Reset() { *m = VolumeSyncStatusResponse{} } -func (m *VolumeSyncStatusResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncStatusResponse) ProtoMessage() {} -func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Preallocate int64 `protobuf:"varint,2,opt,name=preallocate,proto3" json:"preallocate,omitempty"` +} -func (m *VolumeSyncStatusResponse) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *VacuumVolumeCompactRequest) Reset() { + *x = VacuumVolumeCompactRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *VolumeSyncStatusResponse) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +func (x *VacuumVolumeCompactRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeSyncStatusResponse) GetReplication() string { - if m != nil { - return m.Replication +func (*VacuumVolumeCompactRequest) ProtoMessage() {} + +func (x *VacuumVolumeCompactRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *VolumeSyncStatusResponse) GetTtl() string { - if m != nil { - return m.Ttl - } - return "" +// Deprecated: Use VacuumVolumeCompactRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{6} } -func (m *VolumeSyncStatusResponse) GetTailOffset() uint64 { - if m != nil { - return m.TailOffset +func (x *VacuumVolumeCompactRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeSyncStatusResponse) GetCompactRevision() uint32 { - if m != nil { - return m.CompactRevision +func (x *VacuumVolumeCompactRequest) GetPreallocate() int64 { + if x != nil { + return x.Preallocate } return 0 } -func (m *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { - if m != nil { - return m.IdxFileSize +type VacuumVolumeCompactResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VacuumVolumeCompactResponse) Reset() { + *x = VacuumVolumeCompactResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type VolumeSyncIndexRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` +func (x *VacuumVolumeCompactResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeSyncIndexRequest) Reset() { *m = VolumeSyncIndexRequest{} } -func (m *VolumeSyncIndexRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncIndexRequest) ProtoMessage() {} -func (*VolumeSyncIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*VacuumVolumeCompactResponse) ProtoMessage() {} -func (m *VolumeSyncIndexRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *VacuumVolumeCompactResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -type VolumeSyncIndexResponse struct { - IndexFileContent []byte `protobuf:"bytes,1,opt,name=index_file_content,json=indexFileContent,proto3" json:"index_file_content,omitempty"` +// Deprecated: Use VacuumVolumeCompactResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{7} } -func (m *VolumeSyncIndexResponse) Reset() { *m = VolumeSyncIndexResponse{} } -func (m *VolumeSyncIndexResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncIndexResponse) ProtoMessage() {} -func (*VolumeSyncIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +type VacuumVolumeCommitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} -func (m *VolumeSyncIndexResponse) GetIndexFileContent() []byte { - if m != nil { - return m.IndexFileContent +func (x *VacuumVolumeCommitRequest) Reset() { + *x = VacuumVolumeCommitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type VolumeSyncDataRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` - Revision uint32 `protobuf:"varint,2,opt,name=revision" json:"revision,omitempty"` - Offset uint32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"` - Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` - NeedleId string `protobuf:"bytes,5,opt,name=needle_id,json=needleId" json:"needle_id,omitempty"` +func (x *VacuumVolumeCommitRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeSyncDataRequest) Reset() { *m = VolumeSyncDataRequest{} } -func (m *VolumeSyncDataRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncDataRequest) ProtoMessage() {} -func (*VolumeSyncDataRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*VacuumVolumeCommitRequest) ProtoMessage() {} -func (m *VolumeSyncDataRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *VacuumVolumeCommitRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *VolumeSyncDataRequest) GetRevision() uint32 { - if m != nil { - return m.Revision - } - return 0 +// Deprecated: Use VacuumVolumeCommitRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{8} } -func (m *VolumeSyncDataRequest) GetOffset() uint32 { - if m != nil { - return m.Offset +func (x *VacuumVolumeCommitRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeSyncDataRequest) GetSize() uint32 { - if m != nil { - return m.Size - } - return 0 +type VacuumVolumeCommitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` } -func (m *VolumeSyncDataRequest) GetNeedleId() string { - if m != nil { - return m.NeedleId +func (x *VacuumVolumeCommitResponse) Reset() { + *x = VacuumVolumeCommitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type VolumeSyncDataResponse struct { - FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` +func (x *VacuumVolumeCommitResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeSyncDataResponse) Reset() { *m = VolumeSyncDataResponse{} } -func (m *VolumeSyncDataResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncDataResponse) ProtoMessage() {} -func (*VolumeSyncDataResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*VacuumVolumeCommitResponse) ProtoMessage() {} -func (m *VolumeSyncDataResponse) GetFileContent() []byte { - if m != nil { - return m.FileContent +func (x *VacuumVolumeCommitResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -type VolumeMountRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` +// Deprecated: Use VacuumVolumeCommitResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{9} +} + +func (x *VacuumVolumeCommitResponse) GetIsReadOnly() bool { + if x != nil { + return x.IsReadOnly + } + return false } -func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} } -func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeMountRequest) ProtoMessage() {} -func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +type VacuumVolumeCleanupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} -func (m *VolumeMountRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *VacuumVolumeCleanupRequest) Reset() { + *x = VacuumVolumeCleanupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type VolumeMountResponse struct { +func (x *VacuumVolumeCleanupRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} } -func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeMountResponse) ProtoMessage() {} -func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*VacuumVolumeCleanupRequest) ProtoMessage() {} -type VolumeUnmountRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` +func (x *VacuumVolumeCleanupRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} } -func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeUnmountRequest) ProtoMessage() {} -func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +// Deprecated: Use VacuumVolumeCleanupRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{10} +} -func (m *VolumeUnmountRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -type VolumeUnmountResponse struct { +type VacuumVolumeCleanupResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} } -func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeUnmountResponse) ProtoMessage() {} -func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (x *VacuumVolumeCleanupResponse) Reset() { + *x = VacuumVolumeCleanupResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type VolumeDeleteRequest struct { - VolumdId uint32 `protobuf:"varint,1,opt,name=volumd_id,json=volumdId" json:"volumd_id,omitempty"` +func (x *VacuumVolumeCleanupResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} } -func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeDeleteRequest) ProtoMessage() {} -func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*VacuumVolumeCleanupResponse) ProtoMessage() {} -func (m *VolumeDeleteRequest) GetVolumdId() uint32 { - if m != nil { - return m.VolumdId +func (x *VacuumVolumeCleanupResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -type VolumeDeleteResponse struct { +// Deprecated: Use VacuumVolumeCleanupResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{11} } -func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} } -func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeDeleteResponse) ProtoMessage() {} -func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +type DeleteCollectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type VolumeUiPageRequest struct { + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` } -func (m *VolumeUiPageRequest) Reset() { *m = VolumeUiPageRequest{} } -func (m *VolumeUiPageRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeUiPageRequest) ProtoMessage() {} -func (*VolumeUiPageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (x *DeleteCollectionRequest) Reset() { + *x = DeleteCollectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type VolumeUiPageResponse struct { +func (x *DeleteCollectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeUiPageResponse) Reset() { *m = VolumeUiPageResponse{} } -func (m *VolumeUiPageResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeUiPageResponse) ProtoMessage() {} -func (*VolumeUiPageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*DeleteCollectionRequest) ProtoMessage() {} -type DiskStatus struct { - Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` +func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *DiskStatus) Reset() { *m = DiskStatus{} } -func (m *DiskStatus) String() string { return proto.CompactTextString(m) } -func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +// Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead. +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{12} +} -func (m *DiskStatus) GetDir() string { - if m != nil { - return m.Dir +func (x *DeleteCollectionRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *DiskStatus) GetAll() uint64 { - if m != nil { - return m.All +type DeleteCollectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteCollectionResponse) Reset() { + *x = DeleteCollectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *DiskStatus) GetUsed() uint64 { - if m != nil { - return m.Used +func (x *DeleteCollectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCollectionResponse) ProtoMessage() {} + +func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead. +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{13} +} + +type AllocateVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Preallocate int64 `protobuf:"varint,3,opt,name=preallocate,proto3" json:"preallocate,omitempty"` + Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` } -func (m *DiskStatus) GetFree() uint64 { - if m != nil { - return m.Free +func (x *AllocateVolumeRequest) Reset() { + *x = AllocateVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type MemStatus struct { - Goroutines int32 `protobuf:"varint,1,opt,name=goroutines" json:"goroutines,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` - Self uint64 `protobuf:"varint,5,opt,name=self" json:"self,omitempty"` - Heap uint64 `protobuf:"varint,6,opt,name=heap" json:"heap,omitempty"` - Stack uint64 `protobuf:"varint,7,opt,name=stack" json:"stack,omitempty"` +func (x *AllocateVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MemStatus) Reset() { *m = MemStatus{} } -func (m *MemStatus) String() string { return proto.CompactTextString(m) } -func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*AllocateVolumeRequest) ProtoMessage() {} -func (m *MemStatus) GetGoroutines() int32 { - if m != nil { - return m.Goroutines +func (x *AllocateVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) +} + +// Deprecated: Use AllocateVolumeRequest.ProtoReflect.Descriptor instead. +func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{14} } -func (m *MemStatus) GetAll() uint64 { - if m != nil { - return m.All +func (x *AllocateVolumeRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *MemStatus) GetUsed() uint64 { - if m != nil { - return m.Used +func (x *AllocateVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection } - return 0 + return "" } -func (m *MemStatus) GetFree() uint64 { - if m != nil { - return m.Free +func (x *AllocateVolumeRequest) GetPreallocate() int64 { + if x != nil { + return x.Preallocate } return 0 } -func (m *MemStatus) GetSelf() uint64 { - if m != nil { - return m.Self +func (x *AllocateVolumeRequest) GetReplication() string { + if x != nil { + return x.Replication } - return 0 + return "" } -func (m *MemStatus) GetHeap() uint64 { - if m != nil { - return m.Heap +func (x *AllocateVolumeRequest) GetTtl() string { + if x != nil { + return x.Ttl } - return 0 + return "" } -func (m *MemStatus) GetStack() uint64 { - if m != nil { - return m.Stack +func (x *AllocateVolumeRequest) GetMemoryMapMaxSizeMb() uint32 { + if x != nil { + return x.MemoryMapMaxSizeMb } return 0 } -func init() { - proto.RegisterType((*BatchDeleteRequest)(nil), "volume_server_pb.BatchDeleteRequest") - proto.RegisterType((*BatchDeleteResponse)(nil), "volume_server_pb.BatchDeleteResponse") - proto.RegisterType((*DeleteResult)(nil), "volume_server_pb.DeleteResult") - proto.RegisterType((*Empty)(nil), "volume_server_pb.Empty") - proto.RegisterType((*VacuumVolumeCheckRequest)(nil), "volume_server_pb.VacuumVolumeCheckRequest") - proto.RegisterType((*VacuumVolumeCheckResponse)(nil), "volume_server_pb.VacuumVolumeCheckResponse") - proto.RegisterType((*VacuumVolumeCompactRequest)(nil), "volume_server_pb.VacuumVolumeCompactRequest") - proto.RegisterType((*VacuumVolumeCompactResponse)(nil), "volume_server_pb.VacuumVolumeCompactResponse") - proto.RegisterType((*VacuumVolumeCommitRequest)(nil), "volume_server_pb.VacuumVolumeCommitRequest") - proto.RegisterType((*VacuumVolumeCommitResponse)(nil), "volume_server_pb.VacuumVolumeCommitResponse") - proto.RegisterType((*VacuumVolumeCleanupRequest)(nil), "volume_server_pb.VacuumVolumeCleanupRequest") - proto.RegisterType((*VacuumVolumeCleanupResponse)(nil), "volume_server_pb.VacuumVolumeCleanupResponse") - proto.RegisterType((*DeleteCollectionRequest)(nil), "volume_server_pb.DeleteCollectionRequest") - proto.RegisterType((*DeleteCollectionResponse)(nil), "volume_server_pb.DeleteCollectionResponse") - proto.RegisterType((*AssignVolumeRequest)(nil), "volume_server_pb.AssignVolumeRequest") - proto.RegisterType((*AssignVolumeResponse)(nil), "volume_server_pb.AssignVolumeResponse") - proto.RegisterType((*VolumeSyncStatusRequest)(nil), "volume_server_pb.VolumeSyncStatusRequest") - proto.RegisterType((*VolumeSyncStatusResponse)(nil), "volume_server_pb.VolumeSyncStatusResponse") - proto.RegisterType((*VolumeSyncIndexRequest)(nil), "volume_server_pb.VolumeSyncIndexRequest") - proto.RegisterType((*VolumeSyncIndexResponse)(nil), "volume_server_pb.VolumeSyncIndexResponse") - proto.RegisterType((*VolumeSyncDataRequest)(nil), "volume_server_pb.VolumeSyncDataRequest") - proto.RegisterType((*VolumeSyncDataResponse)(nil), "volume_server_pb.VolumeSyncDataResponse") - proto.RegisterType((*VolumeMountRequest)(nil), "volume_server_pb.VolumeMountRequest") - proto.RegisterType((*VolumeMountResponse)(nil), "volume_server_pb.VolumeMountResponse") - proto.RegisterType((*VolumeUnmountRequest)(nil), "volume_server_pb.VolumeUnmountRequest") - proto.RegisterType((*VolumeUnmountResponse)(nil), "volume_server_pb.VolumeUnmountResponse") - proto.RegisterType((*VolumeDeleteRequest)(nil), "volume_server_pb.VolumeDeleteRequest") - proto.RegisterType((*VolumeDeleteResponse)(nil), "volume_server_pb.VolumeDeleteResponse") - proto.RegisterType((*VolumeUiPageRequest)(nil), "volume_server_pb.VolumeUiPageRequest") - proto.RegisterType((*VolumeUiPageResponse)(nil), "volume_server_pb.VolumeUiPageResponse") - proto.RegisterType((*DiskStatus)(nil), "volume_server_pb.DiskStatus") - proto.RegisterType((*MemStatus)(nil), "volume_server_pb.MemStatus") +type AllocateVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn +func (x *AllocateVolumeResponse) Reset() { + *x = AllocateVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +func (x *AllocateVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} -// Client API for VolumeServer service +func (*AllocateVolumeResponse) ProtoMessage() {} -type VolumeServerClient interface { - // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. - BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) - VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) - VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) - VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) - VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) - DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) - AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) - VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) - VolumeSyncIndex(ctx context.Context, in *VolumeSyncIndexRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncIndexClient, error) - VolumeSyncData(ctx context.Context, in *VolumeSyncDataRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncDataClient, error) - VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) - VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) - VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) +func (x *AllocateVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type volumeServerClient struct { - cc *grpc.ClientConn +// Deprecated: Use AllocateVolumeResponse.ProtoReflect.Descriptor instead. +func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{15} } -func NewVolumeServerClient(cc *grpc.ClientConn) VolumeServerClient { - return &volumeServerClient{cc} +type VolumeSyncStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } -func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) { - out := new(BatchDeleteResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *VolumeSyncStatusRequest) Reset() { + *x = VolumeSyncStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) { - out := new(VacuumVolumeCheckResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *VolumeSyncStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) { - out := new(VacuumVolumeCompactResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCompact", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (*VolumeSyncStatusRequest) ProtoMessage() {} + +func (x *VolumeSyncStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) { - out := new(VacuumVolumeCommitResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use VolumeSyncStatusRequest.ProtoReflect.Descriptor instead. +func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{16} } -func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) { - out := new(VacuumVolumeCleanupResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *VolumeSyncStatusRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } - return out, nil + return 0 } -func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { - out := new(DeleteCollectionResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +type VolumeSyncStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset,proto3" json:"tail_offset,omitempty"` + CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` } -func (c *volumeServerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) { - out := new(AssignVolumeResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/AssignVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *VolumeSyncStatusResponse) Reset() { + *x = VolumeSyncStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) { - out := new(VolumeSyncStatusResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *VolumeSyncStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *volumeServerClient) VolumeSyncIndex(ctx context.Context, in *VolumeSyncIndexRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncIndexClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncIndex", opts...) - if err != nil { - return nil, err - } - x := &volumeServerVolumeSyncIndexClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err +func (*VolumeSyncStatusResponse) ProtoMessage() {} + +func (x *VolumeSyncStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return x, nil + return mi.MessageOf(x) } -type VolumeServer_VolumeSyncIndexClient interface { - Recv() (*VolumeSyncIndexResponse, error) - grpc.ClientStream +// Deprecated: Use VolumeSyncStatusResponse.ProtoReflect.Descriptor instead. +func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{17} } -type volumeServerVolumeSyncIndexClient struct { - grpc.ClientStream +func (x *VolumeSyncStatusResponse) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 } -func (x *volumeServerVolumeSyncIndexClient) Recv() (*VolumeSyncIndexResponse, error) { - m := new(VolumeSyncIndexResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *VolumeSyncStatusResponse) GetCollection() string { + if x != nil { + return x.Collection } - return m, nil + return "" } -func (c *volumeServerClient) VolumeSyncData(ctx context.Context, in *VolumeSyncDataRequest, opts ...grpc.CallOption) (VolumeServer_VolumeSyncDataClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/VolumeSyncData", opts...) - if err != nil { - return nil, err - } - x := &volumeServerVolumeSyncDataClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err +func (x *VolumeSyncStatusResponse) GetReplication() string { + if x != nil { + return x.Replication } - return x, nil + return "" } -type VolumeServer_VolumeSyncDataClient interface { - Recv() (*VolumeSyncDataResponse, error) - grpc.ClientStream +func (x *VolumeSyncStatusResponse) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" } -type volumeServerVolumeSyncDataClient struct { - grpc.ClientStream +func (x *VolumeSyncStatusResponse) GetTailOffset() uint64 { + if x != nil { + return x.TailOffset + } + return 0 } -func (x *volumeServerVolumeSyncDataClient) Recv() (*VolumeSyncDataResponse, error) { - m := new(VolumeSyncDataResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *VolumeSyncStatusResponse) GetCompactRevision() uint32 { + if x != nil { + return x.CompactRevision } - return m, nil + return 0 +} + +func (x *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { + if x != nil { + return x.IdxFileSize + } + return 0 +} + +type VolumeIncrementalCopyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` +} + +func (x *VolumeIncrementalCopyRequest) Reset() { + *x = VolumeIncrementalCopyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeIncrementalCopyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeIncrementalCopyRequest) ProtoMessage() {} + +func (x *VolumeIncrementalCopyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeIncrementalCopyRequest.ProtoReflect.Descriptor instead. +func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{18} +} + +func (x *VolumeIncrementalCopyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeIncrementalCopyRequest) GetSinceNs() uint64 { + if x != nil { + return x.SinceNs + } + return 0 +} + +type VolumeIncrementalCopyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` +} + +func (x *VolumeIncrementalCopyResponse) Reset() { + *x = VolumeIncrementalCopyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeIncrementalCopyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeIncrementalCopyResponse) ProtoMessage() {} + +func (x *VolumeIncrementalCopyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeIncrementalCopyResponse.ProtoReflect.Descriptor instead. +func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{19} +} + +func (x *VolumeIncrementalCopyResponse) GetFileContent() []byte { + if x != nil { + return x.FileContent + } + return nil +} + +type VolumeMountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeMountRequest) Reset() { + *x = VolumeMountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMountRequest) ProtoMessage() {} + +func (x *VolumeMountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMountRequest.ProtoReflect.Descriptor instead. +func (*VolumeMountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{20} +} + +func (x *VolumeMountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeMountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeMountResponse) Reset() { + *x = VolumeMountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMountResponse) ProtoMessage() {} + +func (x *VolumeMountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMountResponse.ProtoReflect.Descriptor instead. +func (*VolumeMountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{21} +} + +type VolumeUnmountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeUnmountRequest) Reset() { + *x = VolumeUnmountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeUnmountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeUnmountRequest) ProtoMessage() {} + +func (x *VolumeUnmountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeUnmountRequest.ProtoReflect.Descriptor instead. +func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{22} +} + +func (x *VolumeUnmountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeUnmountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeUnmountResponse) Reset() { + *x = VolumeUnmountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeUnmountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeUnmountResponse) ProtoMessage() {} + +func (x *VolumeUnmountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeUnmountResponse.ProtoReflect.Descriptor instead. +func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{23} +} + +type VolumeDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeDeleteRequest) Reset() { + *x = VolumeDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeDeleteRequest) ProtoMessage() {} + +func (x *VolumeDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeDeleteRequest.ProtoReflect.Descriptor instead. +func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{24} +} + +func (x *VolumeDeleteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeDeleteResponse) Reset() { + *x = VolumeDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeDeleteResponse) ProtoMessage() {} + +func (x *VolumeDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeDeleteResponse.ProtoReflect.Descriptor instead. +func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{25} +} + +type VolumeMarkReadonlyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeMarkReadonlyRequest) Reset() { + *x = VolumeMarkReadonlyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMarkReadonlyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMarkReadonlyRequest) ProtoMessage() {} + +func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMarkReadonlyRequest.ProtoReflect.Descriptor instead. +func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{26} +} + +func (x *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeMarkReadonlyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeMarkReadonlyResponse) Reset() { + *x = VolumeMarkReadonlyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMarkReadonlyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMarkReadonlyResponse) ProtoMessage() {} + +func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMarkReadonlyResponse.ProtoReflect.Descriptor instead. +func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{27} +} + +type VolumeConfigureRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` +} + +func (x *VolumeConfigureRequest) Reset() { + *x = VolumeConfigureRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeConfigureRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeConfigureRequest) ProtoMessage() {} + +func (x *VolumeConfigureRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeConfigureRequest.ProtoReflect.Descriptor instead. +func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{28} +} + +func (x *VolumeConfigureRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeConfigureRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +type VolumeConfigureResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *VolumeConfigureResponse) Reset() { + *x = VolumeConfigureResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeConfigureResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeConfigureResponse) ProtoMessage() {} + +func (x *VolumeConfigureResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeConfigureResponse.ProtoReflect.Descriptor instead. +func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{29} +} + +func (x *VolumeConfigureResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type VolumeCopyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` +} + +func (x *VolumeCopyRequest) Reset() { + *x = VolumeCopyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeCopyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeCopyRequest) ProtoMessage() {} + +func (x *VolumeCopyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeCopyRequest.ProtoReflect.Descriptor instead. +func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{30} +} + +func (x *VolumeCopyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeCopyRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeCopyRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *VolumeCopyRequest) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +func (x *VolumeCopyRequest) GetSourceDataNode() string { + if x != nil { + return x.SourceDataNode + } + return "" +} + +type VolumeCopyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs,proto3" json:"last_append_at_ns,omitempty"` +} + +func (x *VolumeCopyResponse) Reset() { + *x = VolumeCopyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeCopyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeCopyResponse) ProtoMessage() {} + +func (x *VolumeCopyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeCopyResponse.ProtoReflect.Descriptor instead. +func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{31} +} + +func (x *VolumeCopyResponse) GetLastAppendAtNs() uint64 { + if x != nil { + return x.LastAppendAtNs + } + return 0 +} + +type CopyFileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Ext string `protobuf:"bytes,2,opt,name=ext,proto3" json:"ext,omitempty"` + CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` + StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset,proto3" json:"stop_offset,omitempty"` + Collection string `protobuf:"bytes,5,opt,name=collection,proto3" json:"collection,omitempty"` + IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"` + IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound,proto3" json:"ignore_source_file_not_found,omitempty"` +} + +func (x *CopyFileRequest) Reset() { + *x = CopyFileRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyFileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyFileRequest) ProtoMessage() {} + +func (x *CopyFileRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyFileRequest.ProtoReflect.Descriptor instead. +func (*CopyFileRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{32} +} + +func (x *CopyFileRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *CopyFileRequest) GetExt() string { + if x != nil { + return x.Ext + } + return "" +} + +func (x *CopyFileRequest) GetCompactionRevision() uint32 { + if x != nil { + return x.CompactionRevision + } + return 0 +} + +func (x *CopyFileRequest) GetStopOffset() uint64 { + if x != nil { + return x.StopOffset + } + return 0 +} + +func (x *CopyFileRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *CopyFileRequest) GetIsEcVolume() bool { + if x != nil { + return x.IsEcVolume + } + return false +} + +func (x *CopyFileRequest) GetIgnoreSourceFileNotFound() bool { + if x != nil { + return x.IgnoreSourceFileNotFound + } + return false +} + +type CopyFileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` +} + +func (x *CopyFileResponse) Reset() { + *x = CopyFileResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyFileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyFileResponse) ProtoMessage() {} + +func (x *CopyFileResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyFileResponse.ProtoReflect.Descriptor instead. +func (*CopyFileResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{33} +} + +func (x *CopyFileResponse) GetFileContent() []byte { + if x != nil { + return x.FileContent + } + return nil +} + +type VolumeTailSenderRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` +} + +func (x *VolumeTailSenderRequest) Reset() { + *x = VolumeTailSenderRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailSenderRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailSenderRequest) ProtoMessage() {} + +func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailSenderRequest.ProtoReflect.Descriptor instead. +func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{34} +} + +func (x *VolumeTailSenderRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeTailSenderRequest) GetSinceNs() uint64 { + if x != nil { + return x.SinceNs + } + return 0 +} + +func (x *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 { + if x != nil { + return x.IdleTimeoutSeconds + } + return 0 +} + +type VolumeTailSenderResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"` + NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"` + IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk,proto3" json:"is_last_chunk,omitempty"` +} + +func (x *VolumeTailSenderResponse) Reset() { + *x = VolumeTailSenderResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailSenderResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailSenderResponse) ProtoMessage() {} + +func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailSenderResponse.ProtoReflect.Descriptor instead. +func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{35} +} + +func (x *VolumeTailSenderResponse) GetNeedleHeader() []byte { + if x != nil { + return x.NeedleHeader + } + return nil +} + +func (x *VolumeTailSenderResponse) GetNeedleBody() []byte { + if x != nil { + return x.NeedleBody + } + return nil +} + +func (x *VolumeTailSenderResponse) GetIsLastChunk() bool { + if x != nil { + return x.IsLastChunk + } + return false +} + +type VolumeTailReceiverRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` + SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer,proto3" json:"source_volume_server,omitempty"` +} + +func (x *VolumeTailReceiverRequest) Reset() { + *x = VolumeTailReceiverRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailReceiverRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailReceiverRequest) ProtoMessage() {} + +func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailReceiverRequest.ProtoReflect.Descriptor instead. +func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{36} +} + +func (x *VolumeTailReceiverRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeTailReceiverRequest) GetSinceNs() uint64 { + if x != nil { + return x.SinceNs + } + return 0 +} + +func (x *VolumeTailReceiverRequest) GetIdleTimeoutSeconds() uint32 { + if x != nil { + return x.IdleTimeoutSeconds + } + return 0 +} + +func (x *VolumeTailReceiverRequest) GetSourceVolumeServer() string { + if x != nil { + return x.SourceVolumeServer + } + return "" +} + +type VolumeTailReceiverResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeTailReceiverResponse) Reset() { + *x = VolumeTailReceiverResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailReceiverResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailReceiverResponse) ProtoMessage() {} + +func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailReceiverResponse.ProtoReflect.Descriptor instead. +func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{37} +} + +type VolumeEcShardsGenerateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *VolumeEcShardsGenerateRequest) Reset() { + *x = VolumeEcShardsGenerateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsGenerateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} + +func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsGenerateRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{38} +} + +func (x *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsGenerateRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +type VolumeEcShardsGenerateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsGenerateResponse) Reset() { + *x = VolumeEcShardsGenerateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsGenerateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} + +func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsGenerateResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{39} +} + +type VolumeEcShardsRebuildRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *VolumeEcShardsRebuildRequest) Reset() { + *x = VolumeEcShardsRebuildRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsRebuildRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} + +func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsRebuildRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{40} +} + +func (x *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsRebuildRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +type VolumeEcShardsRebuildResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds,proto3" json:"rebuilt_shard_ids,omitempty"` +} + +func (x *VolumeEcShardsRebuildResponse) Reset() { + *x = VolumeEcShardsRebuildResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsRebuildResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} + +func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsRebuildResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{41} +} + +func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { + if x != nil { + return x.RebuiltShardIds + } + return nil +} + +type VolumeEcShardsCopyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` + CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile,proto3" json:"copy_ecx_file,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` + CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile,proto3" json:"copy_ecj_file,omitempty"` + CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile,proto3" json:"copy_vif_file,omitempty"` +} + +func (x *VolumeEcShardsCopyRequest) Reset() { + *x = VolumeEcShardsCopyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsCopyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsCopyRequest) ProtoMessage() {} + +func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsCopyRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{42} +} + +func (x *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsCopyRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeEcShardsCopyRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds + } + return nil +} + +func (x *VolumeEcShardsCopyRequest) GetCopyEcxFile() bool { + if x != nil { + return x.CopyEcxFile + } + return false +} + +func (x *VolumeEcShardsCopyRequest) GetSourceDataNode() string { + if x != nil { + return x.SourceDataNode + } + return "" +} + +func (x *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool { + if x != nil { + return x.CopyEcjFile + } + return false +} + +func (x *VolumeEcShardsCopyRequest) GetCopyVifFile() bool { + if x != nil { + return x.CopyVifFile + } + return false +} + +type VolumeEcShardsCopyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsCopyResponse) Reset() { + *x = VolumeEcShardsCopyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsCopyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsCopyResponse) ProtoMessage() {} + +func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsCopyResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{43} +} + +type VolumeEcShardsDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` +} + +func (x *VolumeEcShardsDeleteRequest) Reset() { + *x = VolumeEcShardsDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} + +func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsDeleteRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{44} +} + +func (x *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsDeleteRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds + } + return nil +} + +type VolumeEcShardsDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsDeleteResponse) Reset() { + *x = VolumeEcShardsDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} + +func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsDeleteResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{45} +} + +type VolumeEcShardsMountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` +} + +func (x *VolumeEcShardsMountRequest) Reset() { + *x = VolumeEcShardsMountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsMountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsMountRequest) ProtoMessage() {} + +func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsMountRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{46} +} + +func (x *VolumeEcShardsMountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsMountRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeEcShardsMountRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds + } + return nil +} + +type VolumeEcShardsMountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsMountResponse) Reset() { + *x = VolumeEcShardsMountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsMountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsMountResponse) ProtoMessage() {} + +func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsMountResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{47} +} + +type VolumeEcShardsUnmountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` +} + +func (x *VolumeEcShardsUnmountRequest) Reset() { + *x = VolumeEcShardsUnmountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsUnmountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} + +func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsUnmountRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{48} +} + +func (x *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds + } + return nil +} + +type VolumeEcShardsUnmountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsUnmountResponse) Reset() { + *x = VolumeEcShardsUnmountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsUnmountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} + +func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsUnmountResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{49} +} + +type VolumeEcShardReadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` + Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` +} + +func (x *VolumeEcShardReadRequest) Reset() { + *x = VolumeEcShardReadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardReadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardReadRequest) ProtoMessage() {} + +func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardReadRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{50} +} + +func (x *VolumeEcShardReadRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardReadRequest) GetShardId() uint32 { + if x != nil { + return x.ShardId + } + return 0 +} + +func (x *VolumeEcShardReadRequest) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *VolumeEcShardReadRequest) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *VolumeEcShardReadRequest) GetFileKey() uint64 { + if x != nil { + return x.FileKey + } + return 0 +} + +type VolumeEcShardReadResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted,proto3" json:"is_deleted,omitempty"` +} + +func (x *VolumeEcShardReadResponse) Reset() { + *x = VolumeEcShardReadResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardReadResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardReadResponse) ProtoMessage() {} + +func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardReadResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{51} +} + +func (x *VolumeEcShardReadResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *VolumeEcShardReadResponse) GetIsDeleted() bool { + if x != nil { + return x.IsDeleted + } + return false +} + +type VolumeEcBlobDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Version uint32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *VolumeEcBlobDeleteRequest) Reset() { + *x = VolumeEcBlobDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcBlobDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} + +func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcBlobDeleteRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{52} +} + +func (x *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcBlobDeleteRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeEcBlobDeleteRequest) GetFileKey() uint64 { + if x != nil { + return x.FileKey + } + return 0 +} + +func (x *VolumeEcBlobDeleteRequest) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +type VolumeEcBlobDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcBlobDeleteResponse) Reset() { + *x = VolumeEcBlobDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcBlobDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} + +func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcBlobDeleteResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{53} +} + +type VolumeEcShardsToVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *VolumeEcShardsToVolumeRequest) Reset() { + *x = VolumeEcShardsToVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsToVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} + +func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsToVolumeRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{54} +} + +func (x *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsToVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +type VolumeEcShardsToVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsToVolumeResponse) Reset() { + *x = VolumeEcShardsToVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsToVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} + +func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsToVolumeResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{55} +} + +type ReadVolumeFileStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *ReadVolumeFileStatusRequest) Reset() { + *x = ReadVolumeFileStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadVolumeFileStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadVolumeFileStatusRequest) ProtoMessage() {} + +func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVolumeFileStatusRequest.ProtoReflect.Descriptor instead. +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{56} +} + +func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type ReadVolumeFileStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds,proto3" json:"idx_file_timestamp_seconds,omitempty"` + IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` + DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds,proto3" json:"dat_file_timestamp_seconds,omitempty"` + DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` + CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` + Collection string `protobuf:"bytes,8,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *ReadVolumeFileStatusResponse) Reset() { + *x = ReadVolumeFileStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadVolumeFileStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadVolumeFileStatusResponse) ProtoMessage() {} + +func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVolumeFileStatusResponse.ProtoReflect.Descriptor instead. +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{57} +} + +func (x *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetIdxFileTimestampSeconds() uint64 { + if x != nil { + return x.IdxFileTimestampSeconds + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 { + if x != nil { + return x.IdxFileSize + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetDatFileTimestampSeconds() uint64 { + if x != nil { + return x.DatFileTimestampSeconds + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 { + if x != nil { + return x.DatFileSize + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetFileCount() uint64 { + if x != nil { + return x.FileCount + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetCompactionRevision() uint32 { + if x != nil { + return x.CompactionRevision + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +type DiskStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` + All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` + Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` + PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree,proto3" json:"percent_free,omitempty"` + PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed,proto3" json:"percent_used,omitempty"` +} + +func (x *DiskStatus) Reset() { + *x = DiskStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DiskStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiskStatus) ProtoMessage() {} + +func (x *DiskStatus) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiskStatus.ProtoReflect.Descriptor instead. +func (*DiskStatus) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{58} +} + +func (x *DiskStatus) GetDir() string { + if x != nil { + return x.Dir + } + return "" +} + +func (x *DiskStatus) GetAll() uint64 { + if x != nil { + return x.All + } + return 0 +} + +func (x *DiskStatus) GetUsed() uint64 { + if x != nil { + return x.Used + } + return 0 +} + +func (x *DiskStatus) GetFree() uint64 { + if x != nil { + return x.Free + } + return 0 +} + +func (x *DiskStatus) GetPercentFree() float32 { + if x != nil { + return x.PercentFree + } + return 0 +} + +func (x *DiskStatus) GetPercentUsed() float32 { + if x != nil { + return x.PercentUsed + } + return 0 +} + +type MemStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Goroutines int32 `protobuf:"varint,1,opt,name=goroutines,proto3" json:"goroutines,omitempty"` + All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` + Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` + Self uint64 `protobuf:"varint,5,opt,name=self,proto3" json:"self,omitempty"` + Heap uint64 `protobuf:"varint,6,opt,name=heap,proto3" json:"heap,omitempty"` + Stack uint64 `protobuf:"varint,7,opt,name=stack,proto3" json:"stack,omitempty"` +} + +func (x *MemStatus) Reset() { + *x = MemStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemStatus) ProtoMessage() {} + +func (x *MemStatus) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemStatus.ProtoReflect.Descriptor instead. +func (*MemStatus) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{59} +} + +func (x *MemStatus) GetGoroutines() int32 { + if x != nil { + return x.Goroutines + } + return 0 +} + +func (x *MemStatus) GetAll() uint64 { + if x != nil { + return x.All + } + return 0 +} + +func (x *MemStatus) GetUsed() uint64 { + if x != nil { + return x.Used + } + return 0 +} + +func (x *MemStatus) GetFree() uint64 { + if x != nil { + return x.Free + } + return 0 +} + +func (x *MemStatus) GetSelf() uint64 { + if x != nil { + return x.Self + } + return 0 +} + +func (x *MemStatus) GetHeap() uint64 { + if x != nil { + return x.Heap + } + return 0 +} + +func (x *MemStatus) GetStack() uint64 { + if x != nil { + return x.Stack + } + return 0 +} + +// tired storage on volume servers +type RemoteFile struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType,proto3" json:"backend_type,omitempty"` + BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId,proto3" json:"backend_id,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Offset uint64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` + ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime,proto3" json:"modified_time,omitempty"` + Extension string `protobuf:"bytes,7,opt,name=extension,proto3" json:"extension,omitempty"` +} + +func (x *RemoteFile) Reset() { + *x = RemoteFile{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemoteFile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoteFile) ProtoMessage() {} + +func (x *RemoteFile) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoteFile.ProtoReflect.Descriptor instead. +func (*RemoteFile) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{60} +} + +func (x *RemoteFile) GetBackendType() string { + if x != nil { + return x.BackendType + } + return "" +} + +func (x *RemoteFile) GetBackendId() string { + if x != nil { + return x.BackendId + } + return "" +} + +func (x *RemoteFile) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *RemoteFile) GetOffset() uint64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *RemoteFile) GetFileSize() uint64 { + if x != nil { + return x.FileSize + } + return 0 +} + +func (x *RemoteFile) GetModifiedTime() uint64 { + if x != nil { + return x.ModifiedTime + } + return 0 +} + +func (x *RemoteFile) GetExtension() string { + if x != nil { + return x.Extension + } + return "" +} + +type VolumeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` +} + +func (x *VolumeInfo) Reset() { + *x = VolumeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeInfo) ProtoMessage() {} + +func (x *VolumeInfo) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeInfo.ProtoReflect.Descriptor instead. +func (*VolumeInfo) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{61} +} + +func (x *VolumeInfo) GetFiles() []*RemoteFile { + if x != nil { + return x.Files + } + return nil +} + +func (x *VolumeInfo) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *VolumeInfo) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +type VolumeTierMoveDatToRemoteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName,proto3" json:"destination_backend_name,omitempty"` + KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile,proto3" json:"keep_local_dat_file,omitempty"` +} + +func (x *VolumeTierMoveDatToRemoteRequest) Reset() { + *x = VolumeTierMoveDatToRemoteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTierMoveDatToRemoteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} + +func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead. +func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{62} +} + +func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeTierMoveDatToRemoteRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string { + if x != nil { + return x.DestinationBackendName + } + return "" +} + +func (x *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool { + if x != nil { + return x.KeepLocalDatFile + } + return false +} + +type VolumeTierMoveDatToRemoteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` +} + +func (x *VolumeTierMoveDatToRemoteResponse) Reset() { + *x = VolumeTierMoveDatToRemoteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTierMoveDatToRemoteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} + +func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead. +func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{63} +} + +func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { + if x != nil { + return x.Processed + } + return 0 +} + +func (x *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 { + if x != nil { + return x.ProcessedPercentage + } + return 0 +} + +type VolumeTierMoveDatFromRemoteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile,proto3" json:"keep_remote_dat_file,omitempty"` +} + +func (x *VolumeTierMoveDatFromRemoteRequest) Reset() { + *x = VolumeTierMoveDatFromRemoteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTierMoveDatFromRemoteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} + +func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead. +func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{64} +} + +func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeTierMoveDatFromRemoteRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool { + if x != nil { + return x.KeepRemoteDatFile + } + return false +} + +type VolumeTierMoveDatFromRemoteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` +} + +func (x *VolumeTierMoveDatFromRemoteResponse) Reset() { + *x = VolumeTierMoveDatFromRemoteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTierMoveDatFromRemoteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} + +func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead. +func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{65} +} + +func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { + if x != nil { + return x.Processed + } + return 0 +} + +func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 { + if x != nil { + return x.ProcessedPercentage + } + return 0 +} + +type VolumeServerStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeServerStatusRequest) Reset() { + *x = VolumeServerStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeServerStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeServerStatusRequest) ProtoMessage() {} + +func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead. +func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{66} +} + +type VolumeServerStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses,proto3" json:"disk_statuses,omitempty"` + MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus,proto3" json:"memory_status,omitempty"` +} + +func (x *VolumeServerStatusResponse) Reset() { + *x = VolumeServerStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeServerStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeServerStatusResponse) ProtoMessage() {} + +func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead. +func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{67} +} + +func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { + if x != nil { + return x.DiskStatuses + } + return nil +} + +func (x *VolumeServerStatusResponse) GetMemoryStatus() *MemStatus { + if x != nil { + return x.MemoryStatus + } + return nil +} + +// select on volume servers +type QueryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Selections []string `protobuf:"bytes,1,rep,name=selections,proto3" json:"selections,omitempty"` + FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds,proto3" json:"from_file_ids,omitempty"` + Filter *QueryRequest_Filter `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` + InputSerialization *QueryRequest_InputSerialization `protobuf:"bytes,4,opt,name=input_serialization,json=inputSerialization,proto3" json:"input_serialization,omitempty"` + OutputSerialization *QueryRequest_OutputSerialization `protobuf:"bytes,5,opt,name=output_serialization,json=outputSerialization,proto3" json:"output_serialization,omitempty"` +} + +func (x *QueryRequest) Reset() { + *x = QueryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest) ProtoMessage() {} + +func (x *QueryRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. +func (*QueryRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68} +} + +func (x *QueryRequest) GetSelections() []string { + if x != nil { + return x.Selections + } + return nil +} + +func (x *QueryRequest) GetFromFileIds() []string { + if x != nil { + return x.FromFileIds + } + return nil +} + +func (x *QueryRequest) GetFilter() *QueryRequest_Filter { + if x != nil { + return x.Filter + } + return nil +} + +func (x *QueryRequest) GetInputSerialization() *QueryRequest_InputSerialization { + if x != nil { + return x.InputSerialization + } + return nil +} + +func (x *QueryRequest) GetOutputSerialization() *QueryRequest_OutputSerialization { + if x != nil { + return x.OutputSerialization + } + return nil +} + +type QueriedStripe struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"` +} + +func (x *QueriedStripe) Reset() { + *x = QueriedStripe{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueriedStripe) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueriedStripe) ProtoMessage() {} + +func (x *QueriedStripe) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead. +func (*QueriedStripe) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{69} +} + +func (x *QueriedStripe) GetRecords() []byte { + if x != nil { + return x.Records + } + return nil +} + +type QueryRequest_Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + Operand string `protobuf:"bytes,2,opt,name=operand,proto3" json:"operand,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *QueryRequest_Filter) Reset() { + *x = QueryRequest_Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_Filter) ProtoMessage() {} + +func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead. +func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68, 0} +} + +func (x *QueryRequest_Filter) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *QueryRequest_Filter) GetOperand() string { + if x != nil { + return x.Operand + } + return "" +} + +func (x *QueryRequest_Filter) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type QueryRequest_InputSerialization struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // NONE | GZIP | BZIP2 + CompressionType string `protobuf:"bytes,1,opt,name=compression_type,json=compressionType,proto3" json:"compression_type,omitempty"` + CsvInput *QueryRequest_InputSerialization_CSVInput `protobuf:"bytes,2,opt,name=csv_input,json=csvInput,proto3" json:"csv_input,omitempty"` + JsonInput *QueryRequest_InputSerialization_JSONInput `protobuf:"bytes,3,opt,name=json_input,json=jsonInput,proto3" json:"json_input,omitempty"` + ParquetInput *QueryRequest_InputSerialization_ParquetInput `protobuf:"bytes,4,opt,name=parquet_input,json=parquetInput,proto3" json:"parquet_input,omitempty"` +} + +func (x *QueryRequest_InputSerialization) Reset() { + *x = QueryRequest_InputSerialization{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_InputSerialization) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_InputSerialization) ProtoMessage() {} + +func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead. +func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68, 1} +} + +func (x *QueryRequest_InputSerialization) GetCompressionType() string { + if x != nil { + return x.CompressionType + } + return "" +} + +func (x *QueryRequest_InputSerialization) GetCsvInput() *QueryRequest_InputSerialization_CSVInput { + if x != nil { + return x.CsvInput + } + return nil +} + +func (x *QueryRequest_InputSerialization) GetJsonInput() *QueryRequest_InputSerialization_JSONInput { + if x != nil { + return x.JsonInput + } + return nil +} + +func (x *QueryRequest_InputSerialization) GetParquetInput() *QueryRequest_InputSerialization_ParquetInput { + if x != nil { + return x.ParquetInput + } + return nil +} + +type QueryRequest_OutputSerialization struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput,proto3" json:"csv_output,omitempty"` + JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput,proto3" json:"json_output,omitempty"` +} + +func (x *QueryRequest_OutputSerialization) Reset() { + *x = QueryRequest_OutputSerialization{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_OutputSerialization) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_OutputSerialization) ProtoMessage() {} + +func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead. +func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68, 2} +} + +func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { + if x != nil { + return x.CsvOutput + } + return nil +} + +func (x *QueryRequest_OutputSerialization) GetJsonOutput() *QueryRequest_OutputSerialization_JSONOutput { + if x != nil { + return x.JsonOutput + } + return nil +} + +type QueryRequest_InputSerialization_CSVInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo,proto3" json:"file_header_info,omitempty"` // Valid values: NONE | USE | IGNORE + RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n + FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , + QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: " + QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " + Comments string `protobuf:"bytes,6,opt,name=comments,proto3" json:"comments,omitempty"` // Default: # + // If true, records might contain record delimiters within quote characters + AllowQuotedRecordDelimiter bool `protobuf:"varint,7,opt,name=allow_quoted_record_delimiter,json=allowQuotedRecordDelimiter,proto3" json:"allow_quoted_record_delimiter,omitempty"` // default False. +} + +func (x *QueryRequest_InputSerialization_CSVInput) Reset() { + *x = QueryRequest_InputSerialization_CSVInput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_InputSerialization_CSVInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} + +func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead. +func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68, 1, 0} +} + +func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { + if x != nil { + return x.FileHeaderInfo + } + return "" +} + +func (x *QueryRequest_InputSerialization_CSVInput) GetRecordDelimiter() string { + if x != nil { + return x.RecordDelimiter + } + return "" +} + +func (x *QueryRequest_InputSerialization_CSVInput) GetFieldDelimiter() string { + if x != nil { + return x.FieldDelimiter + } + return "" +} + +func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteCharactoer() string { + if x != nil { + return x.QuoteCharactoer + } + return "" +} + +func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteEscapeCharacter() string { + if x != nil { + return x.QuoteEscapeCharacter + } + return "" +} + +func (x *QueryRequest_InputSerialization_CSVInput) GetComments() string { + if x != nil { + return x.Comments + } + return "" +} + +func (x *QueryRequest_InputSerialization_CSVInput) GetAllowQuotedRecordDelimiter() bool { + if x != nil { + return x.AllowQuotedRecordDelimiter + } + return false +} + +type QueryRequest_InputSerialization_JSONInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Valid values: DOCUMENT | LINES +} + +func (x *QueryRequest_InputSerialization_JSONInput) Reset() { + *x = QueryRequest_InputSerialization_JSONInput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_InputSerialization_JSONInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} + +func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead. +func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68, 1, 1} +} + +func (x *QueryRequest_InputSerialization_JSONInput) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +type QueryRequest_InputSerialization_ParquetInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *QueryRequest_InputSerialization_ParquetInput) Reset() { + *x = QueryRequest_InputSerialization_ParquetInput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_InputSerialization_ParquetInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} + +func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead. +func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68, 1, 2} +} + +type QueryRequest_OutputSerialization_CSVOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields,proto3" json:"quote_fields,omitempty"` // Valid values: ALWAYS | ASNEEDED + RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n + FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , + QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: " + QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " +} + +func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() { + *x = QueryRequest_OutputSerialization_CSVOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_OutputSerialization_CSVOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} + +func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead. +func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68, 2, 0} +} + +func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { + if x != nil { + return x.QuoteFields + } + return "" +} + +func (x *QueryRequest_OutputSerialization_CSVOutput) GetRecordDelimiter() string { + if x != nil { + return x.RecordDelimiter + } + return "" +} + +func (x *QueryRequest_OutputSerialization_CSVOutput) GetFieldDelimiter() string { + if x != nil { + return x.FieldDelimiter + } + return "" +} + +func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharactoer() string { + if x != nil { + return x.QuoteCharactoer + } + return "" +} + +func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteEscapeCharacter() string { + if x != nil { + return x.QuoteEscapeCharacter + } + return "" +} + +type QueryRequest_OutputSerialization_JSONOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` +} + +func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() { + *x = QueryRequest_OutputSerialization_JSONOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_OutputSerialization_JSONOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} + +func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[77] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead. +func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68, 2, 1} +} + +func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { + if x != nil { + return x.RecordDelimiter + } + return "" +} + +var File_volume_server_proto protoreflect.FileDescriptor + +var file_volume_server_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x5b, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, + 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x07, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x6b, 0x69, 0x70, + 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x22, 0x4f, 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x0a, 0x18, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x40, 0x0a, + 0x19, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x61, + 0x72, 0x62, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0c, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, + 0x5b, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, + 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x22, 0x1d, 0x0a, 0x1b, + 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, + 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, + 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x39, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xde, 0x01, 0x0a, 0x15, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, + 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x74, 0x74, 0x6c, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6d, 0x61, + 0x70, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x61, + 0x78, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x36, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, 0x0a, 0x18, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x69, 0x6c, 0x5f, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x61, + 0x69, 0x6c, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, + 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x56, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x22, + 0x42, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x22, 0x31, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x33, 0x0a, + 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x22, 0x17, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x13, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, + 0x16, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, + 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x57, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xae, 0x01, 0x0a, 0x11, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, + 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x22, 0x3f, 0x0a, 0x12, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x29, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, + 0x61, 0x74, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6c, 0x61, 0x73, + 0x74, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x4e, 0x73, 0x22, 0x94, 0x02, 0x0a, 0x0f, + 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x78, 0x74, 0x12, 0x2f, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, + 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x10, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, + 0x14, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, + 0x84, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, + 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6f, + 0x64, 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4c, 0x61, 0x73, + 0x74, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xb7, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, + 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, + 0x0a, 0x14, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, + 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, + 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b, + 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x1d, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x11, + 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x19, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, + 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x78, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x78, + 0x46, 0x69, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, + 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x6a, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x6a, 0x46, 0x69, + 0x6c, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x76, 0x69, 0x66, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x56, + 0x69, 0x66, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1e, 0x0a, + 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, + 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, + 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x58, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1f, + 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x99, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x4e, 0x0a, 0x19, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, + 0x69, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x19, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, + 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x1b, 0x52, 0x65, 0x61, + 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xed, 0x02, 0x0a, 0x1c, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x9e, 0x01, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x64, 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x72, 0x65, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x46, + 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x75, + 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x55, 0x73, 0x65, 0x64, 0x22, 0xa3, 0x01, 0x0a, 0x09, 0x4d, 0x65, 0x6d, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, + 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x65, + 0x6c, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x04, 0x68, 0x65, 0x61, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0xd8, 0x01, 0x0a, + 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x6f, 0x64, + 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7c, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8, 0x01, 0x0a, 0x20, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x6b, 0x65, 0x65, 0x70, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, + 0x22, 0x73, 0x0a, 0x21, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, + 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, + 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, + 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, + 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x22, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x14, 0x6b, 0x65, 0x65, + 0x70, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x75, 0x0a, 0x23, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, + 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, + 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, + 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, + 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa1, + 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, + 0x0d, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x0c, 0x64, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, + 0x12, 0x40, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x6d, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0xf8, 0x0c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, 0x69, 0x6c, 0x65, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72, 0x6f, 0x6d, + 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x13, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x14, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x1a, 0x4e, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x1a, 0xd5, 0x05, 0x0a, 0x12, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x57, 0x0a, 0x09, 0x63, 0x73, 0x76, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x52, 0x08, 0x63, 0x73, 0x76, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5a, 0x0a, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x09, 0x6a, + 0x73, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x63, 0x0a, 0x0d, 0x70, 0x61, 0x72, 0x71, + 0x75, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, + 0x0c, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xc8, 0x02, + 0x0a, 0x08, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, + 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, + 0x6f, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, + 0x61, 0x70, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x71, + 0x75, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x51, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, 0x1f, 0x0a, 0x09, 0x4a, 0x53, 0x4f, 0x4e, + 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x0e, 0x0a, 0x0c, 0x50, 0x61, 0x72, + 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xf1, 0x03, 0x0a, 0x13, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x63, 0x73, 0x76, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x52, 0x09, 0x63, 0x73, 0x76, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x5e, + 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x1a, 0xe3, + 0x01, 0x0a, 0x09, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, + 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, + 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71, + 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34, + 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63, + 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, + 0x63, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x22, 0x29, 0x0a, + 0x0d, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x32, 0xa1, 0x1c, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x0b, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6e, 0x0a, 0x11, 0x56, 0x61, 0x63, 0x75, 0x75, + 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x2a, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, + 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, + 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x2c, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, + 0x12, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, + 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7c, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, + 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5c, 0x0a, 0x0b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x0d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, + 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x28, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x70, 0x79, 0x12, 0x23, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x77, 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, + 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x08, 0x43, 0x6f, 0x70, + 0x79, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, + 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, + 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, + 0x6e, 0x64, 0x65, 0x72, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, + 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, + 0x65, 0x69, 0x76, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, + 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, + 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x7a, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, + 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, + 0x6f, 0x70, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x77, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x7a, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, + 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, + 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, + 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x88, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, + 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x32, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, + 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x33, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, + 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, + 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x34, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, + 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x35, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, + 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, + 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x69, + 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x39, 0x5a, 0x37, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, + 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, + 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_volume_server_proto_rawDescOnce sync.Once + file_volume_server_proto_rawDescData = file_volume_server_proto_rawDesc +) + +func file_volume_server_proto_rawDescGZIP() []byte { + file_volume_server_proto_rawDescOnce.Do(func() { + file_volume_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_volume_server_proto_rawDescData) + }) + return file_volume_server_proto_rawDescData +} + +var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 78) +var file_volume_server_proto_goTypes = []interface{}{ + (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest + (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse + (*DeleteResult)(nil), // 2: volume_server_pb.DeleteResult + (*Empty)(nil), // 3: volume_server_pb.Empty + (*VacuumVolumeCheckRequest)(nil), // 4: volume_server_pb.VacuumVolumeCheckRequest + (*VacuumVolumeCheckResponse)(nil), // 5: volume_server_pb.VacuumVolumeCheckResponse + (*VacuumVolumeCompactRequest)(nil), // 6: volume_server_pb.VacuumVolumeCompactRequest + (*VacuumVolumeCompactResponse)(nil), // 7: volume_server_pb.VacuumVolumeCompactResponse + (*VacuumVolumeCommitRequest)(nil), // 8: volume_server_pb.VacuumVolumeCommitRequest + (*VacuumVolumeCommitResponse)(nil), // 9: volume_server_pb.VacuumVolumeCommitResponse + (*VacuumVolumeCleanupRequest)(nil), // 10: volume_server_pb.VacuumVolumeCleanupRequest + (*VacuumVolumeCleanupResponse)(nil), // 11: volume_server_pb.VacuumVolumeCleanupResponse + (*DeleteCollectionRequest)(nil), // 12: volume_server_pb.DeleteCollectionRequest + (*DeleteCollectionResponse)(nil), // 13: volume_server_pb.DeleteCollectionResponse + (*AllocateVolumeRequest)(nil), // 14: volume_server_pb.AllocateVolumeRequest + (*AllocateVolumeResponse)(nil), // 15: volume_server_pb.AllocateVolumeResponse + (*VolumeSyncStatusRequest)(nil), // 16: volume_server_pb.VolumeSyncStatusRequest + (*VolumeSyncStatusResponse)(nil), // 17: volume_server_pb.VolumeSyncStatusResponse + (*VolumeIncrementalCopyRequest)(nil), // 18: volume_server_pb.VolumeIncrementalCopyRequest + (*VolumeIncrementalCopyResponse)(nil), // 19: volume_server_pb.VolumeIncrementalCopyResponse + (*VolumeMountRequest)(nil), // 20: volume_server_pb.VolumeMountRequest + (*VolumeMountResponse)(nil), // 21: volume_server_pb.VolumeMountResponse + (*VolumeUnmountRequest)(nil), // 22: volume_server_pb.VolumeUnmountRequest + (*VolumeUnmountResponse)(nil), // 23: volume_server_pb.VolumeUnmountResponse + (*VolumeDeleteRequest)(nil), // 24: volume_server_pb.VolumeDeleteRequest + (*VolumeDeleteResponse)(nil), // 25: volume_server_pb.VolumeDeleteResponse + (*VolumeMarkReadonlyRequest)(nil), // 26: volume_server_pb.VolumeMarkReadonlyRequest + (*VolumeMarkReadonlyResponse)(nil), // 27: volume_server_pb.VolumeMarkReadonlyResponse + (*VolumeConfigureRequest)(nil), // 28: volume_server_pb.VolumeConfigureRequest + (*VolumeConfigureResponse)(nil), // 29: volume_server_pb.VolumeConfigureResponse + (*VolumeCopyRequest)(nil), // 30: volume_server_pb.VolumeCopyRequest + (*VolumeCopyResponse)(nil), // 31: volume_server_pb.VolumeCopyResponse + (*CopyFileRequest)(nil), // 32: volume_server_pb.CopyFileRequest + (*CopyFileResponse)(nil), // 33: volume_server_pb.CopyFileResponse + (*VolumeTailSenderRequest)(nil), // 34: volume_server_pb.VolumeTailSenderRequest + (*VolumeTailSenderResponse)(nil), // 35: volume_server_pb.VolumeTailSenderResponse + (*VolumeTailReceiverRequest)(nil), // 36: volume_server_pb.VolumeTailReceiverRequest + (*VolumeTailReceiverResponse)(nil), // 37: volume_server_pb.VolumeTailReceiverResponse + (*VolumeEcShardsGenerateRequest)(nil), // 38: volume_server_pb.VolumeEcShardsGenerateRequest + (*VolumeEcShardsGenerateResponse)(nil), // 39: volume_server_pb.VolumeEcShardsGenerateResponse + (*VolumeEcShardsRebuildRequest)(nil), // 40: volume_server_pb.VolumeEcShardsRebuildRequest + (*VolumeEcShardsRebuildResponse)(nil), // 41: volume_server_pb.VolumeEcShardsRebuildResponse + (*VolumeEcShardsCopyRequest)(nil), // 42: volume_server_pb.VolumeEcShardsCopyRequest + (*VolumeEcShardsCopyResponse)(nil), // 43: volume_server_pb.VolumeEcShardsCopyResponse + (*VolumeEcShardsDeleteRequest)(nil), // 44: volume_server_pb.VolumeEcShardsDeleteRequest + (*VolumeEcShardsDeleteResponse)(nil), // 45: volume_server_pb.VolumeEcShardsDeleteResponse + (*VolumeEcShardsMountRequest)(nil), // 46: volume_server_pb.VolumeEcShardsMountRequest + (*VolumeEcShardsMountResponse)(nil), // 47: volume_server_pb.VolumeEcShardsMountResponse + (*VolumeEcShardsUnmountRequest)(nil), // 48: volume_server_pb.VolumeEcShardsUnmountRequest + (*VolumeEcShardsUnmountResponse)(nil), // 49: volume_server_pb.VolumeEcShardsUnmountResponse + (*VolumeEcShardReadRequest)(nil), // 50: volume_server_pb.VolumeEcShardReadRequest + (*VolumeEcShardReadResponse)(nil), // 51: volume_server_pb.VolumeEcShardReadResponse + (*VolumeEcBlobDeleteRequest)(nil), // 52: volume_server_pb.VolumeEcBlobDeleteRequest + (*VolumeEcBlobDeleteResponse)(nil), // 53: volume_server_pb.VolumeEcBlobDeleteResponse + (*VolumeEcShardsToVolumeRequest)(nil), // 54: volume_server_pb.VolumeEcShardsToVolumeRequest + (*VolumeEcShardsToVolumeResponse)(nil), // 55: volume_server_pb.VolumeEcShardsToVolumeResponse + (*ReadVolumeFileStatusRequest)(nil), // 56: volume_server_pb.ReadVolumeFileStatusRequest + (*ReadVolumeFileStatusResponse)(nil), // 57: volume_server_pb.ReadVolumeFileStatusResponse + (*DiskStatus)(nil), // 58: volume_server_pb.DiskStatus + (*MemStatus)(nil), // 59: volume_server_pb.MemStatus + (*RemoteFile)(nil), // 60: volume_server_pb.RemoteFile + (*VolumeInfo)(nil), // 61: volume_server_pb.VolumeInfo + (*VolumeTierMoveDatToRemoteRequest)(nil), // 62: volume_server_pb.VolumeTierMoveDatToRemoteRequest + (*VolumeTierMoveDatToRemoteResponse)(nil), // 63: volume_server_pb.VolumeTierMoveDatToRemoteResponse + (*VolumeTierMoveDatFromRemoteRequest)(nil), // 64: volume_server_pb.VolumeTierMoveDatFromRemoteRequest + (*VolumeTierMoveDatFromRemoteResponse)(nil), // 65: volume_server_pb.VolumeTierMoveDatFromRemoteResponse + (*VolumeServerStatusRequest)(nil), // 66: volume_server_pb.VolumeServerStatusRequest + (*VolumeServerStatusResponse)(nil), // 67: volume_server_pb.VolumeServerStatusResponse + (*QueryRequest)(nil), // 68: volume_server_pb.QueryRequest + (*QueriedStripe)(nil), // 69: volume_server_pb.QueriedStripe + (*QueryRequest_Filter)(nil), // 70: volume_server_pb.QueryRequest.Filter + (*QueryRequest_InputSerialization)(nil), // 71: volume_server_pb.QueryRequest.InputSerialization + (*QueryRequest_OutputSerialization)(nil), // 72: volume_server_pb.QueryRequest.OutputSerialization + (*QueryRequest_InputSerialization_CSVInput)(nil), // 73: volume_server_pb.QueryRequest.InputSerialization.CSVInput + (*QueryRequest_InputSerialization_JSONInput)(nil), // 74: volume_server_pb.QueryRequest.InputSerialization.JSONInput + (*QueryRequest_InputSerialization_ParquetInput)(nil), // 75: volume_server_pb.QueryRequest.InputSerialization.ParquetInput + (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 76: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 77: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput +} +var file_volume_server_proto_depIdxs = []int32{ + 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult + 60, // 1: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile + 58, // 2: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus + 59, // 3: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus + 70, // 4: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter + 71, // 5: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization + 72, // 6: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization + 73, // 7: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput + 74, // 8: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput + 75, // 9: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput + 76, // 10: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + 77, // 11: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + 0, // 12: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest + 4, // 13: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest + 6, // 14: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest + 8, // 15: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest + 10, // 16: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest + 12, // 17: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest + 14, // 18: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest + 16, // 19: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest + 18, // 20: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest + 20, // 21: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest + 22, // 22: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest + 24, // 23: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest + 26, // 24: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest + 28, // 25: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest + 30, // 26: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest + 56, // 27: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest + 32, // 28: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest + 34, // 29: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest + 36, // 30: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest + 38, // 31: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest + 40, // 32: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest + 42, // 33: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest + 44, // 34: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest + 46, // 35: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest + 48, // 36: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest + 50, // 37: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest + 52, // 38: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest + 54, // 39: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest + 62, // 40: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest + 64, // 41: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest + 66, // 42: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest + 68, // 43: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest + 1, // 44: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse + 5, // 45: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse + 7, // 46: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse + 9, // 47: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse + 11, // 48: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse + 13, // 49: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse + 15, // 50: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse + 17, // 51: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse + 19, // 52: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse + 21, // 53: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse + 23, // 54: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse + 25, // 55: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse + 27, // 56: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse + 29, // 57: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse + 31, // 58: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse + 57, // 59: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse + 33, // 60: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse + 35, // 61: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse + 37, // 62: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse + 39, // 63: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse + 41, // 64: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse + 43, // 65: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse + 45, // 66: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse + 47, // 67: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse + 49, // 68: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse + 51, // 69: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse + 53, // 70: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse + 55, // 71: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse + 63, // 72: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse + 65, // 73: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse + 67, // 74: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse + 69, // 75: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe + 44, // [44:76] is the sub-list for method output_type + 12, // [12:44] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_volume_server_proto_init() } +func file_volume_server_proto_init() { + if File_volume_server_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_volume_server_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCompactRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCompactResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCommitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCommitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCleanupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCleanupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocateVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocateVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeSyncStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeSyncStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeIncrementalCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeIncrementalCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeUnmountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeUnmountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkReadonlyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkReadonlyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeConfigureRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeConfigureResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyFileRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyFileResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailSenderRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailSenderResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailReceiverRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailReceiverResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsGenerateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsGenerateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsRebuildRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsRebuildResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsMountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsMountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsUnmountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsUnmountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardReadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardReadResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcBlobDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcBlobDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsToVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsToVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVolumeFileStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVolumeFileStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiskStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteFile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatToRemoteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatToRemoteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatFromRemoteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatFromRemoteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueriedStripe); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_CSVInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_JSONInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_ParquetInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization_CSVOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization_JSONOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_volume_server_proto_rawDesc, + NumEnums: 0, + NumMessages: 78, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_volume_server_proto_goTypes, + DependencyIndexes: file_volume_server_proto_depIdxs, + MessageInfos: file_volume_server_proto_msgTypes, + }.Build() + File_volume_server_proto = out.File + file_volume_server_proto_rawDesc = nil + file_volume_server_proto_goTypes = nil + file_volume_server_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// VolumeServerClient is the client API for VolumeServer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type VolumeServerClient interface { + //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. + BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) + VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) + VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) + VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) + VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) + DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) + AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) + VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) + VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) + VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) + VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) + VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) + VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) + VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) + // copy the .idx .dat files, and mount this volume + VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) + ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) + CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) + VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) + VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) + // erasure coding + VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) + VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) + VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) + VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) + VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) + VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) + VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) + VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) + VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) + // tiered storage + VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) + VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) + VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) + // query + Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) +} + +type volumeServerClient struct { + cc grpc.ClientConnInterface +} + +func NewVolumeServerClient(cc grpc.ClientConnInterface) VolumeServerClient { + return &volumeServerClient{cc} +} + +func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) { + out := new(BatchDeleteResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) { + out := new(VacuumVolumeCheckResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) { + out := new(VacuumVolumeCompactResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCompact", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) { + out := new(VacuumVolumeCommitResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) { + out := new(VacuumVolumeCleanupResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { + out := new(DeleteCollectionResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) { + out := new(AllocateVolumeResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) { + out := new(VolumeSyncStatusResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[0], "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeIncrementalCopyClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeIncrementalCopyClient interface { + Recv() (*VolumeIncrementalCopyResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeIncrementalCopyClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeIncrementalCopyClient) Recv() (*VolumeIncrementalCopyResponse, error) { + m := new(VolumeIncrementalCopyResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil } func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) { out := new(VolumeMountResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) { + out := new(VolumeUnmountResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) { + out := new(VolumeDeleteResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) { + out := new(VolumeMarkReadonlyResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) { + out := new(VolumeConfigureResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) { + out := new(VolumeCopyResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) { + out := new(ReadVolumeFileStatusResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[1], "/volume_server_pb.VolumeServer/CopyFile", opts...) + if err != nil { + return nil, err + } + x := &volumeServerCopyFileClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_CopyFileClient interface { + Recv() (*CopyFileResponse, error) + grpc.ClientStream +} + +type volumeServerCopyFileClient struct { + grpc.ClientStream +} + +func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { + m := new(CopyFileResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[2], "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeTailSenderClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeTailSenderClient interface { + Recv() (*VolumeTailSenderResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTailSenderClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTailSenderClient) Recv() (*VolumeTailSenderResponse, error) { + m := new(VolumeTailSenderResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) { + out := new(VolumeTailReceiverResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) { + out := new(VolumeEcShardsGenerateResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) { + out := new(VolumeEcShardsRebuildResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) { + out := new(VolumeEcShardsCopyResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) { + out := new(VolumeEcShardsDeleteResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) { + out := new(VolumeEcShardsMountResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) { + out := new(VolumeEcShardsUnmountResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[3], "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeEcShardReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeEcShardReadClient interface { + Recv() (*VolumeEcShardReadResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeEcShardReadClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeEcShardReadClient) Recv() (*VolumeEcShardReadResponse, error) { + m := new(VolumeEcShardReadResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) { + out := new(VolumeEcBlobDeleteResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) { + out := new(VolumeEcShardsToVolumeResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[4], "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeTierMoveDatToRemoteClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeTierMoveDatToRemoteClient interface { + Recv() (*VolumeTierMoveDatToRemoteResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTierMoveDatToRemoteClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDatToRemoteResponse, error) { + m := new(VolumeTierMoveDatToRemoteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[5], "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) + if err != nil { + return nil, err + } + x := &volumeServerVolumeTierMoveDatFromRemoteClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_VolumeTierMoveDatFromRemoteClient interface { + Recv() (*VolumeTierMoveDatFromRemoteResponse, error) + grpc.ClientStream +} + +type volumeServerVolumeTierMoveDatFromRemoteClient struct { + grpc.ClientStream +} + +func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveDatFromRemoteResponse, error) { + m := new(VolumeTierMoveDatFromRemoteResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) { + out := new(VolumeServerStatusResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) { + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[6], "/volume_server_pb.VolumeServer/Query", opts...) if err != nil { return nil, err } - return out, nil + x := &volumeServerQueryClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type VolumeServer_QueryClient interface { + Recv() (*QueriedStripe, error) + grpc.ClientStream +} + +type volumeServerQueryClient struct { + grpc.ClientStream +} + +func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) { + m := new(QueriedStripe) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// VolumeServerServer is the server API for VolumeServer service. +type VolumeServerServer interface { + //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. + BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) + VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) + VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) + VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) + VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) + DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) + AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) + VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) + VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error + VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) + VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) + VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) + VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) + VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) + // copy the .idx .dat files, and mount this volume + VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) + ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) + CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error + VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error + VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) + // erasure coding + VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) + VolumeEcShardsRebuild(context.Context, *VolumeEcShardsRebuildRequest) (*VolumeEcShardsRebuildResponse, error) + VolumeEcShardsCopy(context.Context, *VolumeEcShardsCopyRequest) (*VolumeEcShardsCopyResponse, error) + VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) + VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) + VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) + VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error + VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) + VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) + // tiered storage + VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error + VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error + VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) + // query + Query(*QueryRequest, VolumeServer_QueryServer) error +} + +// UnimplementedVolumeServerServer can be embedded to have forward compatible implementations. +type UnimplementedVolumeServerServer struct { +} + +func (*UnimplementedVolumeServerServer) BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCheck not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCompact not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCommit not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCleanup not implemented") +} +func (*UnimplementedVolumeServerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented") +} +func (*UnimplementedVolumeServerServer) AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllocateVolume not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeSyncStatus not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeIncrementalCopy not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeMount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeUnmount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkReadonly not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeConfigure not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeCopy not implemented") +} +func (*UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadVolumeFileStatus not implemented") +} +func (*UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error { + return status.Errorf(codes.Unimplemented, "method CopyFile not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeTailSender not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeTailReceiver not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsGenerate not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsRebuild(context.Context, *VolumeEcShardsRebuildRequest) (*VolumeEcShardsRebuildResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsRebuild not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsCopy(context.Context, *VolumeEcShardsCopyRequest) (*VolumeEcShardsCopyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsCopy not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsMount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsUnmount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeEcShardRead not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcBlobDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsToVolume not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatToRemote not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatFromRemote not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeServerStatus not implemented") +} +func (*UnimplementedVolumeServerServer) Query(*QueryRequest, VolumeServer_QueryServer) error { + return status.Errorf(codes.Unimplemented, "method Query not implemented") +} + +func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) { + s.RegisterService(&_VolumeServer_serviceDesc, srv) +} + +func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).BatchDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/BatchDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).BatchDelete(ctx, req.(*BatchDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VacuumVolumeCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VacuumVolumeCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, req.(*VacuumVolumeCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VacuumVolumeCompact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VacuumVolumeCompactRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VacuumVolumeCompact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCompact", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VacuumVolumeCompact(ctx, req.(*VacuumVolumeCompactRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VacuumVolumeCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VacuumVolumeCommitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCommit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, req.(*VacuumVolumeCommitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VacuumVolumeCleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VacuumVolumeCleanupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, req.(*VacuumVolumeCleanupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_DeleteCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteCollectionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).DeleteCollection(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/DeleteCollection", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).DeleteCollection(ctx, req.(*DeleteCollectionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_AllocateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AllocateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).AllocateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/AllocateVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).AllocateVolume(ctx, req.(*AllocateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeSyncStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeSyncStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeSyncStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeSyncStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeSyncStatus(ctx, req.(*VolumeSyncStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeIncrementalCopy_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeIncrementalCopyRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeIncrementalCopy(m, &volumeServerVolumeIncrementalCopyServer{stream}) +} + +type VolumeServer_VolumeIncrementalCopyServer interface { + Send(*VolumeIncrementalCopyResponse) error + grpc.ServerStream +} + +type volumeServerVolumeIncrementalCopyServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeIncrementalCopyServer) Send(m *VolumeIncrementalCopyResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeMountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeMount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeMount(ctx, req.(*VolumeMountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeUnmount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeUnmountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeUnmount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeUnmount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeUnmount(ctx, req.(*VolumeUnmountRequest)) + } + return interceptor(ctx, in, info, handler) } -func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) { - out := new(VolumeUnmountResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, c.cc, opts...) - if err != nil { +func _VolumeServer_VolumeDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeDeleteRequest) + if err := dec(in); err != nil { return nil, err } - return out, nil + if interceptor == nil { + return srv.(VolumeServerServer).VolumeDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeDelete(ctx, req.(*VolumeDeleteRequest)) + } + return interceptor(ctx, in, info, handler) } -func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) { - out := new(VolumeDeleteResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, c.cc, opts...) - if err != nil { +func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeMarkReadonlyRequest) + if err := dec(in); err != nil { return nil, err } - return out, nil -} - -// Server API for VolumeServer service - -type VolumeServerServer interface { - // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. - BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) - VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) - VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) - VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) - VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) - DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) - AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) - VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) - VolumeSyncIndex(*VolumeSyncIndexRequest, VolumeServer_VolumeSyncIndexServer) error - VolumeSyncData(*VolumeSyncDataRequest, VolumeServer_VolumeSyncDataServer) error - VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) - VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) - VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) -} - -func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) { - s.RegisterService(&_VolumeServer_serviceDesc, srv) + if interceptor == nil { + return srv.(VolumeServerServer).VolumeMarkReadonly(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkReadonly", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeMarkReadonly(ctx, req.(*VolumeMarkReadonlyRequest)) + } + return interceptor(ctx, in, info, handler) } -func _VolumeServer_BatchDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(BatchDeleteRequest) +func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeConfigureRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).BatchDelete(ctx, in) + return srv.(VolumeServerServer).VolumeConfigure(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/BatchDelete", + FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).BatchDelete(ctx, req.(*BatchDeleteRequest)) + return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest)) } return interceptor(ctx, in, info, handler) } -func _VolumeServer_VacuumVolumeCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VacuumVolumeCheckRequest) +func _VolumeServer_VolumeCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeCopyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, in) + return srv.(VolumeServerServer).VolumeCopy(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCheck", + FullMethod: "/volume_server_pb.VolumeServer/VolumeCopy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).VacuumVolumeCheck(ctx, req.(*VacuumVolumeCheckRequest)) + return srv.(VolumeServerServer).VolumeCopy(ctx, req.(*VolumeCopyRequest)) } return interceptor(ctx, in, info, handler) } -func _VolumeServer_VacuumVolumeCompact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VacuumVolumeCompactRequest) +func _VolumeServer_ReadVolumeFileStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadVolumeFileStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).VacuumVolumeCompact(ctx, in) + return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCompact", + FullMethod: "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).VacuumVolumeCompact(ctx, req.(*VacuumVolumeCompactRequest)) + return srv.(VolumeServerServer).ReadVolumeFileStatus(ctx, req.(*ReadVolumeFileStatusRequest)) } return interceptor(ctx, in, info, handler) } -func _VolumeServer_VacuumVolumeCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VacuumVolumeCommitRequest) +func _VolumeServer_CopyFile_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(CopyFileRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).CopyFile(m, &volumeServerCopyFileServer{stream}) +} + +type VolumeServer_CopyFileServer interface { + Send(*CopyFileResponse) error + grpc.ServerStream +} + +type volumeServerCopyFileServer struct { + grpc.ServerStream +} + +func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeTailSender_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeTailSenderRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeTailSender(m, &volumeServerVolumeTailSenderServer{stream}) +} + +type VolumeServer_VolumeTailSenderServer interface { + Send(*VolumeTailSenderResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTailSenderServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTailSenderServer) Send(m *VolumeTailSenderResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeTailReceiver_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeTailReceiverRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, in) + return srv.(VolumeServerServer).VolumeTailReceiver(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCommit", + FullMethod: "/volume_server_pb.VolumeServer/VolumeTailReceiver", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).VacuumVolumeCommit(ctx, req.(*VacuumVolumeCommitRequest)) + return srv.(VolumeServerServer).VolumeTailReceiver(ctx, req.(*VolumeTailReceiverRequest)) } return interceptor(ctx, in, info, handler) } -func _VolumeServer_VacuumVolumeCleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VacuumVolumeCleanupRequest) +func _VolumeServer_VolumeEcShardsGenerate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsGenerateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, in) + return srv.(VolumeServerServer).VolumeEcShardsGenerate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).VacuumVolumeCleanup(ctx, req.(*VacuumVolumeCleanupRequest)) + return srv.(VolumeServerServer).VolumeEcShardsGenerate(ctx, req.(*VolumeEcShardsGenerateRequest)) } return interceptor(ctx, in, info, handler) } -func _VolumeServer_DeleteCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteCollectionRequest) +func _VolumeServer_VolumeEcShardsRebuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsRebuildRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).DeleteCollection(ctx, in) + return srv.(VolumeServerServer).VolumeEcShardsRebuild(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/DeleteCollection", + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).DeleteCollection(ctx, req.(*DeleteCollectionRequest)) + return srv.(VolumeServerServer).VolumeEcShardsRebuild(ctx, req.(*VolumeEcShardsRebuildRequest)) } return interceptor(ctx, in, info, handler) } -func _VolumeServer_AssignVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AssignVolumeRequest) +func _VolumeServer_VolumeEcShardsCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsCopyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).AssignVolume(ctx, in) + return srv.(VolumeServerServer).VolumeEcShardsCopy(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/AssignVolume", + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).AssignVolume(ctx, req.(*AssignVolumeRequest)) + return srv.(VolumeServerServer).VolumeEcShardsCopy(ctx, req.(*VolumeEcShardsCopyRequest)) } return interceptor(ctx, in, info, handler) } -func _VolumeServer_VolumeSyncStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VolumeSyncStatusRequest) +func _VolumeServer_VolumeEcShardsDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsDeleteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).VolumeSyncStatus(ctx, in) + return srv.(VolumeServerServer).VolumeEcShardsDelete(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/VolumeSyncStatus", + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).VolumeSyncStatus(ctx, req.(*VolumeSyncStatusRequest)) + return srv.(VolumeServerServer).VolumeEcShardsDelete(ctx, req.(*VolumeEcShardsDeleteRequest)) } return interceptor(ctx, in, info, handler) } -func _VolumeServer_VolumeSyncIndex_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(VolumeSyncIndexRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func _VolumeServer_VolumeEcShardsMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsMountRequest) + if err := dec(in); err != nil { + return nil, err } - return srv.(VolumeServerServer).VolumeSyncIndex(m, &volumeServerVolumeSyncIndexServer{stream}) -} - -type VolumeServer_VolumeSyncIndexServer interface { - Send(*VolumeSyncIndexResponse) error - grpc.ServerStream -} - -type volumeServerVolumeSyncIndexServer struct { - grpc.ServerStream + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcShardsMount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsMount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcShardsMount(ctx, req.(*VolumeEcShardsMountRequest)) + } + return interceptor(ctx, in, info, handler) } -func (x *volumeServerVolumeSyncIndexServer) Send(m *VolumeSyncIndexResponse) error { - return x.ServerStream.SendMsg(m) +func _VolumeServer_VolumeEcShardsUnmount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsUnmountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeEcShardsUnmount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeEcShardsUnmount(ctx, req.(*VolumeEcShardsUnmountRequest)) + } + return interceptor(ctx, in, info, handler) } -func _VolumeServer_VolumeSyncData_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(VolumeSyncDataRequest) +func _VolumeServer_VolumeEcShardRead_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeEcShardReadRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(VolumeServerServer).VolumeSyncData(m, &volumeServerVolumeSyncDataServer{stream}) + return srv.(VolumeServerServer).VolumeEcShardRead(m, &volumeServerVolumeEcShardReadServer{stream}) } -type VolumeServer_VolumeSyncDataServer interface { - Send(*VolumeSyncDataResponse) error +type VolumeServer_VolumeEcShardReadServer interface { + Send(*VolumeEcShardReadResponse) error grpc.ServerStream } -type volumeServerVolumeSyncDataServer struct { +type volumeServerVolumeEcShardReadServer struct { grpc.ServerStream } -func (x *volumeServerVolumeSyncDataServer) Send(m *VolumeSyncDataResponse) error { +func (x *volumeServerVolumeEcShardReadServer) Send(m *VolumeEcShardReadResponse) error { return x.ServerStream.SendMsg(m) } -func _VolumeServer_VolumeMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VolumeMountRequest) +func _VolumeServer_VolumeEcBlobDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcBlobDeleteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).VolumeMount(ctx, in) + return srv.(VolumeServerServer).VolumeEcBlobDelete(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/VolumeMount", + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).VolumeMount(ctx, req.(*VolumeMountRequest)) + return srv.(VolumeServerServer).VolumeEcBlobDelete(ctx, req.(*VolumeEcBlobDeleteRequest)) } return interceptor(ctx, in, info, handler) } -func _VolumeServer_VolumeUnmount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VolumeUnmountRequest) +func _VolumeServer_VolumeEcShardsToVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeEcShardsToVolumeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).VolumeUnmount(ctx, in) + return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/VolumeUnmount", + FullMethod: "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).VolumeUnmount(ctx, req.(*VolumeUnmountRequest)) + return srv.(VolumeServerServer).VolumeEcShardsToVolume(ctx, req.(*VolumeEcShardsToVolumeRequest)) } return interceptor(ctx, in, info, handler) } -func _VolumeServer_VolumeDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VolumeDeleteRequest) +func _VolumeServer_VolumeTierMoveDatToRemote_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeTierMoveDatToRemoteRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeTierMoveDatToRemote(m, &volumeServerVolumeTierMoveDatToRemoteServer{stream}) +} + +type VolumeServer_VolumeTierMoveDatToRemoteServer interface { + Send(*VolumeTierMoveDatToRemoteResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTierMoveDatToRemoteServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTierMoveDatToRemoteServer) Send(m *VolumeTierMoveDatToRemoteResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeTierMoveDatFromRemote_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(VolumeTierMoveDatFromRemoteRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).VolumeTierMoveDatFromRemote(m, &volumeServerVolumeTierMoveDatFromRemoteServer{stream}) +} + +type VolumeServer_VolumeTierMoveDatFromRemoteServer interface { + Send(*VolumeTierMoveDatFromRemoteResponse) error + grpc.ServerStream +} + +type volumeServerVolumeTierMoveDatFromRemoteServer struct { + grpc.ServerStream +} + +func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDatFromRemoteResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeServerStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VolumeServerServer).VolumeDelete(ctx, in) + return srv.(VolumeServerServer).VolumeServerStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/volume_server_pb.VolumeServer/VolumeDelete", + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VolumeServerServer).VolumeDelete(ctx, req.(*VolumeDeleteRequest)) + return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest)) } return interceptor(ctx, in, info, handler) } +func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(QueryRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(VolumeServerServer).Query(m, &volumeServerQueryServer{stream}) +} + +type VolumeServer_QueryServer interface { + Send(*QueriedStripe) error + grpc.ServerStream +} + +type volumeServerQueryServer struct { + grpc.ServerStream +} + +func (x *volumeServerQueryServer) Send(m *QueriedStripe) error { + return x.ServerStream.SendMsg(m) +} + var _VolumeServer_serviceDesc = grpc.ServiceDesc{ ServiceName: "volume_server_pb.VolumeServer", HandlerType: (*VolumeServerServer)(nil), @@ -1229,8 +7597,8 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ Handler: _VolumeServer_DeleteCollection_Handler, }, { - MethodName: "AssignVolume", - Handler: _VolumeServer_AssignVolume_Handler, + MethodName: "AllocateVolume", + Handler: _VolumeServer_AllocateVolume_Handler, }, { MethodName: "VolumeSyncStatus", @@ -1248,90 +7616,99 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeDelete", Handler: _VolumeServer_VolumeDelete_Handler, }, + { + MethodName: "VolumeMarkReadonly", + Handler: _VolumeServer_VolumeMarkReadonly_Handler, + }, + { + MethodName: "VolumeConfigure", + Handler: _VolumeServer_VolumeConfigure_Handler, + }, + { + MethodName: "VolumeCopy", + Handler: _VolumeServer_VolumeCopy_Handler, + }, + { + MethodName: "ReadVolumeFileStatus", + Handler: _VolumeServer_ReadVolumeFileStatus_Handler, + }, + { + MethodName: "VolumeTailReceiver", + Handler: _VolumeServer_VolumeTailReceiver_Handler, + }, + { + MethodName: "VolumeEcShardsGenerate", + Handler: _VolumeServer_VolumeEcShardsGenerate_Handler, + }, + { + MethodName: "VolumeEcShardsRebuild", + Handler: _VolumeServer_VolumeEcShardsRebuild_Handler, + }, + { + MethodName: "VolumeEcShardsCopy", + Handler: _VolumeServer_VolumeEcShardsCopy_Handler, + }, + { + MethodName: "VolumeEcShardsDelete", + Handler: _VolumeServer_VolumeEcShardsDelete_Handler, + }, + { + MethodName: "VolumeEcShardsMount", + Handler: _VolumeServer_VolumeEcShardsMount_Handler, + }, + { + MethodName: "VolumeEcShardsUnmount", + Handler: _VolumeServer_VolumeEcShardsUnmount_Handler, + }, + { + MethodName: "VolumeEcBlobDelete", + Handler: _VolumeServer_VolumeEcBlobDelete_Handler, + }, + { + MethodName: "VolumeEcShardsToVolume", + Handler: _VolumeServer_VolumeEcShardsToVolume_Handler, + }, + { + MethodName: "VolumeServerStatus", + Handler: _VolumeServer_VolumeServerStatus_Handler, + }, }, Streams: []grpc.StreamDesc{ { - StreamName: "VolumeSyncIndex", - Handler: _VolumeServer_VolumeSyncIndex_Handler, + StreamName: "VolumeIncrementalCopy", + Handler: _VolumeServer_VolumeIncrementalCopy_Handler, + ServerStreams: true, + }, + { + StreamName: "CopyFile", + Handler: _VolumeServer_CopyFile_Handler, + ServerStreams: true, + }, + { + StreamName: "VolumeTailSender", + Handler: _VolumeServer_VolumeTailSender_Handler, + ServerStreams: true, + }, + { + StreamName: "VolumeEcShardRead", + Handler: _VolumeServer_VolumeEcShardRead_Handler, ServerStreams: true, }, { - StreamName: "VolumeSyncData", - Handler: _VolumeServer_VolumeSyncData_Handler, + StreamName: "VolumeTierMoveDatToRemote", + Handler: _VolumeServer_VolumeTierMoveDatToRemote_Handler, + ServerStreams: true, + }, + { + StreamName: "VolumeTierMoveDatFromRemote", + Handler: _VolumeServer_VolumeTierMoveDatFromRemote_Handler, + ServerStreams: true, + }, + { + StreamName: "Query", + Handler: _VolumeServer_Query_Handler, ServerStreams: true, }, }, Metadata: "volume_server.proto", } - -func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1044 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x57, 0xdd, 0x72, 0xdb, 0x44, - 0x14, 0x8e, 0x6a, 0x3b, 0x76, 0x8e, 0x6d, 0x6a, 0xd6, 0x69, 0xa2, 0xaa, 0x10, 0x8c, 0x80, 0xd4, - 0x69, 0x43, 0x80, 0x74, 0x80, 0x32, 0xdc, 0x00, 0x09, 0x30, 0xb9, 0xe8, 0x94, 0xd9, 0x4c, 0x3b, - 0xcc, 0xd0, 0x19, 0x8f, 0x22, 0xad, 0x9d, 0x25, 0xb2, 0xe4, 0x6a, 0x57, 0x99, 0x94, 0x37, 0xe1, - 0x9a, 0x1b, 0x9e, 0x8e, 0x17, 0xe0, 0x86, 0xd9, 0x1f, 0xd9, 0xfa, 0x73, 0x24, 0xe0, 0x6e, 0xf7, - 0xec, 0x39, 0xdf, 0xf9, 0xd9, 0xa3, 0xf3, 0xad, 0x60, 0x78, 0x1d, 0xfa, 0xf1, 0x9c, 0x4c, 0x18, - 0x89, 0xae, 0x49, 0x74, 0xb4, 0x88, 0x42, 0x1e, 0xa2, 0x41, 0x46, 0x38, 0x59, 0x5c, 0xd8, 0x9f, - 0x00, 0xfa, 0xce, 0xe1, 0xee, 0xe5, 0x29, 0xf1, 0x09, 0x27, 0x98, 0xbc, 0x8e, 0x09, 0xe3, 0xe8, - 0x3e, 0x74, 0xa6, 0xd4, 0x27, 0x13, 0xea, 0x31, 0xd3, 0x18, 0x35, 0xc6, 0x5b, 0xb8, 0x2d, 0xf6, - 0x67, 0x1e, 0xb3, 0x9f, 0xc3, 0x30, 0x63, 0xc0, 0x16, 0x61, 0xc0, 0x08, 0x7a, 0x0a, 0xed, 0x88, - 0xb0, 0xd8, 0xe7, 0xca, 0xa0, 0x7b, 0xbc, 0x77, 0x94, 0xf7, 0x75, 0xb4, 0x34, 0x89, 0x7d, 0x8e, - 0x13, 0x75, 0x9b, 0x42, 0x2f, 0x7d, 0x80, 0x76, 0xa1, 0xad, 0x7d, 0x9b, 0xc6, 0xc8, 0x18, 0x6f, - 0xe1, 0x4d, 0xe5, 0x1a, 0xed, 0xc0, 0x26, 0xe3, 0x0e, 0x8f, 0x99, 0x79, 0x67, 0x64, 0x8c, 0x5b, - 0x58, 0xef, 0xd0, 0x36, 0xb4, 0x48, 0x14, 0x85, 0x91, 0xd9, 0x90, 0xea, 0x6a, 0x83, 0x10, 0x34, - 0x19, 0xfd, 0x8d, 0x98, 0xcd, 0x91, 0x31, 0xee, 0x63, 0xb9, 0xb6, 0xdb, 0xd0, 0xfa, 0x7e, 0xbe, - 0xe0, 0x6f, 0xec, 0x2f, 0xc1, 0x7c, 0xe9, 0xb8, 0x71, 0x3c, 0x7f, 0x29, 0x63, 0x3c, 0xb9, 0x24, - 0xee, 0x55, 0x92, 0xfb, 0x03, 0xd8, 0x92, 0x91, 0x7b, 0x49, 0x04, 0x7d, 0xdc, 0x51, 0x82, 0x33, - 0xcf, 0xfe, 0x06, 0xee, 0x97, 0x18, 0xea, 0x1a, 0x7c, 0x00, 0xfd, 0x99, 0x13, 0x5d, 0x38, 0x33, - 0x32, 0x89, 0x1c, 0x4e, 0x43, 0x69, 0x6d, 0xe0, 0x9e, 0x16, 0x62, 0x21, 0xb3, 0x7f, 0x01, 0x2b, - 0x83, 0x10, 0xce, 0x17, 0x8e, 0xcb, 0xeb, 0x38, 0x47, 0x23, 0xe8, 0x2e, 0x22, 0xe2, 0xf8, 0x7e, - 0xe8, 0x3a, 0x9c, 0xc8, 0x2a, 0x34, 0x70, 0x5a, 0x64, 0xbf, 0x0b, 0x0f, 0x4a, 0xc1, 0x55, 0x80, - 0xf6, 0xd3, 0x5c, 0xf4, 0xe1, 0x7c, 0x4e, 0x6b, 0xb9, 0xb6, 0xdf, 0x29, 0x44, 0x2d, 0x2d, 0x35, - 0xee, 0x57, 0xb9, 0x53, 0x9f, 0x38, 0x41, 0xbc, 0xa8, 0x05, 0x9c, 0x8f, 0x38, 0x31, 0x5d, 0x22, - 0xef, 0xaa, 0xe6, 0x38, 0x09, 0x7d, 0x9f, 0xb8, 0x9c, 0x86, 0x41, 0x02, 0xbb, 0x07, 0xe0, 0x2e, - 0x85, 0xba, 0x55, 0x52, 0x12, 0xdb, 0x02, 0xb3, 0x68, 0xaa, 0x61, 0xff, 0x34, 0x60, 0xf8, 0x2d, - 0x63, 0x74, 0x16, 0x28, 0xb7, 0xb5, 0xca, 0x9f, 0x75, 0x78, 0x27, 0xef, 0x30, 0x7f, 0x3d, 0x8d, - 0xc2, 0xf5, 0x08, 0x8d, 0x88, 0x2c, 0x7c, 0xea, 0x3a, 0x12, 0xa2, 0x29, 0x21, 0xd2, 0x22, 0x34, - 0x80, 0x06, 0xe7, 0xbe, 0xd9, 0x92, 0x27, 0x62, 0x69, 0xef, 0xc0, 0x76, 0x36, 0x52, 0x9d, 0xc2, - 0x17, 0xb0, 0xab, 0x24, 0xe7, 0x6f, 0x02, 0xf7, 0x5c, 0x7e, 0x09, 0xb5, 0x0a, 0xfe, 0xb7, 0x01, - 0x66, 0xd1, 0x50, 0x77, 0xf0, 0xff, 0xcd, 0xff, 0xdf, 0x66, 0x87, 0xde, 0x83, 0x2e, 0x77, 0xa8, - 0x3f, 0x09, 0xa7, 0x53, 0x46, 0xb8, 0xb9, 0x39, 0x32, 0xc6, 0x4d, 0x0c, 0x42, 0xf4, 0x5c, 0x4a, - 0xd0, 0x01, 0x0c, 0x5c, 0xd5, 0xc5, 0x93, 0x88, 0x5c, 0x53, 0x26, 0x90, 0xdb, 0x32, 0xb0, 0xbb, - 0x6e, 0xd2, 0xdd, 0x4a, 0x8c, 0x6c, 0xe8, 0x53, 0xef, 0x66, 0x22, 0x87, 0x87, 0xfc, 0xf4, 0x3b, - 0x12, 0xad, 0x4b, 0xbd, 0x9b, 0x1f, 0xa8, 0x4f, 0xce, 0xc5, 0x04, 0xf8, 0x1c, 0x76, 0x56, 0xc9, - 0x9f, 0x05, 0x1e, 0xb9, 0xa9, 0x55, 0xb4, 0x1f, 0xd3, 0xc5, 0xd6, 0x66, 0xba, 0x64, 0x87, 0x80, - 0xa8, 0x10, 0x28, 0xbf, 0x6e, 0x18, 0x70, 0x12, 0x70, 0x09, 0xd0, 0xc3, 0x03, 0x79, 0x22, 0x9c, - 0x9f, 0x28, 0xb9, 0xfd, 0xbb, 0x01, 0xf7, 0x56, 0x48, 0xa7, 0x0e, 0x77, 0x6a, 0xb5, 0x9e, 0x05, - 0x9d, 0x65, 0xf6, 0x77, 0xd4, 0x59, 0xb2, 0x17, 0x63, 0x51, 0x57, 0xaf, 0x21, 0x4f, 0xf4, 0xae, - 0x6c, 0x00, 0x0a, 0x27, 0x01, 0x21, 0x9e, 0x9a, 0xae, 0xea, 0x1a, 0x3a, 0x4a, 0x70, 0xe6, 0xd9, - 0x5f, 0xa7, 0x6b, 0xa3, 0x42, 0xd3, 0x39, 0xbe, 0x0f, 0xbd, 0x92, 0xec, 0xba, 0xd3, 0x54, 0x62, - 0x9f, 0x01, 0x52, 0xc6, 0xcf, 0xc2, 0x38, 0xa8, 0x37, 0x53, 0xee, 0xc1, 0x30, 0x63, 0xa2, 0x1b, - 0xfb, 0x09, 0x6c, 0x2b, 0xf1, 0x8b, 0x60, 0x5e, 0x1b, 0x6b, 0x37, 0x29, 0xeb, 0xd2, 0x48, 0xa3, - 0x1d, 0x27, 0x4e, 0xb2, 0x04, 0x77, 0x2b, 0xd8, 0x4e, 0x12, 0x41, 0x96, 0xe3, 0x56, 0x01, 0xbf, - 0xa0, 0x3f, 0x89, 0x79, 0xae, 0xb0, 0x56, 0xea, 0x89, 0x58, 0xab, 0xff, 0x0c, 0x70, 0x4a, 0xd9, - 0x95, 0xfa, 0xc4, 0x44, 0xef, 0x7b, 0x34, 0xd2, 0x73, 0x4a, 0x2c, 0x85, 0xc4, 0xf1, 0x7d, 0x79, - 0x9f, 0x4d, 0x2c, 0x96, 0xe2, 0xca, 0x62, 0x46, 0x3c, 0x79, 0x91, 0x4d, 0x2c, 0xd7, 0x42, 0x36, - 0x8d, 0x88, 0xba, 0xc6, 0x26, 0x96, 0x6b, 0xfb, 0x0f, 0x03, 0xb6, 0x9e, 0x91, 0xb9, 0x46, 0xde, - 0x03, 0x98, 0x85, 0x51, 0x18, 0x73, 0x1a, 0x10, 0x26, 0x1d, 0xb4, 0x70, 0x4a, 0xf2, 0xdf, 0xfd, - 0xc8, 0x16, 0x22, 0xfe, 0x54, 0x76, 0x4a, 0x13, 0xcb, 0xb5, 0x90, 0x5d, 0x12, 0x67, 0xa1, 0x3f, - 0x55, 0xb9, 0x16, 0x0c, 0xcc, 0xb8, 0xe3, 0x5e, 0xc9, 0x2f, 0xb3, 0x89, 0xd5, 0xe6, 0xf8, 0x2f, - 0x80, 0x9e, 0x6e, 0x28, 0xf9, 0x04, 0x40, 0xaf, 0xa0, 0x9b, 0x7a, 0x3a, 0xa0, 0x0f, 0x8b, 0x2f, - 0x84, 0xe2, 0x53, 0xc4, 0xfa, 0xa8, 0x42, 0x4b, 0x17, 0x7b, 0x03, 0x05, 0xf0, 0x76, 0x81, 0x9a, - 0xd1, 0xa3, 0xa2, 0xf5, 0x3a, 0xe2, 0xb7, 0x1e, 0xd7, 0xd2, 0x5d, 0xfa, 0xe3, 0x30, 0x2c, 0xe1, - 0x5a, 0x74, 0x58, 0x81, 0x92, 0xe1, 0x7b, 0xeb, 0xe3, 0x9a, 0xda, 0x4b, 0xaf, 0xaf, 0x01, 0x15, - 0x89, 0x18, 0x3d, 0xae, 0x84, 0x59, 0x11, 0xbd, 0x75, 0x58, 0x4f, 0x79, 0x6d, 0xa2, 0x8a, 0xa2, - 0x2b, 0x13, 0xcd, 0x3c, 0x02, 0x2a, 0x13, 0xcd, 0xf1, 0xfe, 0x06, 0xba, 0x82, 0x41, 0x9e, 0xbe, - 0xd1, 0xc1, 0xba, 0x37, 0x65, 0xe1, 0x75, 0x60, 0x3d, 0xaa, 0xa3, 0xba, 0x74, 0x36, 0x81, 0x5e, - 0x9a, 0x64, 0x51, 0x49, 0xd3, 0x95, 0x3c, 0x17, 0xac, 0xfd, 0x2a, 0xb5, 0x74, 0x36, 0x79, 0xd2, - 0x2d, 0xcb, 0x66, 0x0d, 0xa3, 0x97, 0x65, 0xb3, 0x8e, 0xc3, 0xed, 0x0d, 0xf4, 0x2b, 0xdc, 0xcd, - 0xb1, 0x15, 0x1a, 0xdf, 0x06, 0x90, 0xe6, 0x41, 0xeb, 0xa0, 0x86, 0x66, 0xe2, 0xe9, 0x53, 0x03, - 0xcd, 0xe0, 0xad, 0x2c, 0x69, 0xa0, 0x87, 0xb7, 0x01, 0xa4, 0x18, 0xcf, 0x1a, 0x57, 0x2b, 0xa6, - 0x1c, 0xbd, 0x82, 0x6e, 0x8a, 0x2d, 0xca, 0x86, 0x47, 0x91, 0x7f, 0xca, 0x86, 0x47, 0x19, 0xe5, - 0x6c, 0xa0, 0x0b, 0xe8, 0x67, 0xf8, 0x03, 0xed, 0xaf, 0xb3, 0xcc, 0xb2, 0x92, 0xf5, 0xb0, 0x52, - 0x2f, 0xdd, 0x64, 0x69, 0x5a, 0x41, 0x6b, 0x83, 0xcb, 0x0e, 0xc0, 0xfd, 0x2a, 0xb5, 0xc4, 0xc1, - 0xc5, 0xa6, 0xfc, 0xc9, 0x7b, 0xf2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdb, 0x3c, 0x6d, 0xd7, - 0xfb, 0x0d, 0x00, 0x00, -} diff --git a/weed/pb/volume_server_pb/volume_server_helper.go b/weed/pb/volume_server_pb/volume_server_helper.go new file mode 100644 index 000000000..356be27ff --- /dev/null +++ b/weed/pb/volume_server_pb/volume_server_helper.go @@ -0,0 +1,5 @@ +package volume_server_pb + +func (m *RemoteFile) BackendName() string { + return m.BackendType + "." + m.BackendId +} diff --git a/weed/query/json/query_json.go b/weed/query/json/query_json.go new file mode 100644 index 000000000..46f3b1b56 --- /dev/null +++ b/weed/query/json/query_json.go @@ -0,0 +1,107 @@ +package json + +import ( + "strconv" + + "github.com/chrislusf/seaweedfs/weed/query/sqltypes" + "github.com/tidwall/gjson" + "github.com/tidwall/match" +) + +type Query struct { + Field string + Op string + Value string +} + +func QueryJson(jsonLine string, projections []string, query Query) (passedFilter bool, values []sqltypes.Value) { + if filterJson(jsonLine, query) { + passedFilter = true + fields := gjson.GetMany(jsonLine, projections...) + for _, f := range fields { + values = append(values, sqltypes.MakeTrusted(sqltypes.Type(f.Type), sqltypes.StringToBytes(f.Raw))) + } + return + } + return false, nil +} + +func filterJson(jsonLine string, query Query) bool { + + value := gjson.Get(jsonLine, query.Field) + + // copied from gjson.go queryMatches() function + rpv := query.Value + + if !value.Exists() { + return false + } + if query.Op == "" { + // the query is only looking for existence, such as: + // friends.#(name) + // which makes sure that the array "friends" has an element of + // "name" that exists + return true + } + switch value.Type { + case gjson.String: + switch query.Op { + case "=": + return value.Str == rpv + case "!=": + return value.Str != rpv + case "<": + return value.Str < rpv + case "<=": + return value.Str <= rpv + case ">": + return value.Str > rpv + case ">=": + return value.Str >= rpv + case "%": + return match.Match(value.Str, rpv) + case "!%": + return !match.Match(value.Str, rpv) + } + case gjson.Number: + rpvn, _ := strconv.ParseFloat(rpv, 64) + switch query.Op { + case "=": + return value.Num == rpvn + case "!=": + return value.Num != rpvn + case "<": + return value.Num < rpvn + case "<=": + return value.Num <= rpvn + case ">": + return value.Num > rpvn + case ">=": + return value.Num >= rpvn + } + case gjson.True: + switch query.Op { + case "=": + return rpv == "true" + case "!=": + return rpv != "true" + case ">": + return rpv == "false" + case ">=": + return true + } + case gjson.False: + switch query.Op { + case "=": + return rpv == "false" + case "!=": + return rpv != "false" + case "<": + return rpv == "true" + case "<=": + return true + } + } + return false + +} diff --git a/weed/query/json/query_json_test.go b/weed/query/json/query_json_test.go new file mode 100644 index 000000000..1794bb333 --- /dev/null +++ b/weed/query/json/query_json_test.go @@ -0,0 +1,133 @@ +package json + +import ( + "testing" + + "github.com/tidwall/gjson" +) + +func TestGjson(t *testing.T) { + data := ` + { + "quiz": { + "sport": { + "q1": { + "question": "Which one is correct team name in NBA?", + "options": [ + "New York Bulls", + "Los Angeles Kings", + "Golden State Warriros", + "Huston Rocket" + ], + "answer": "Huston Rocket" + } + }, + "maths": { + "q1": { + "question": "5 + 7 = ?", + "options": [ + "10", + "11", + "12", + "13" + ], + "answer": "12" + }, + "q2": { + "question": "12 - 8 = ?", + "options": [ + "1", + "2", + "3", + "4" + ], + "answer": "4" + } + } + } + } + + { + "fruit": "Apple", + "size": "Large", + "quiz": "Red" + } + +` + + projections := []string{"quiz", "fruit"} + + gjson.ForEachLine(data, func(line gjson.Result) bool { + println(line.Raw) + println("+++++++++++") + results := gjson.GetMany(line.Raw, projections...) + for _, result := range results { + println(result.Index, result.Type, result.String()) + } + println("-----------") + return true + }) + +} + +func TestJsonQueryRow(t *testing.T) { + + data := ` + { + "fruit": "Bl\"ue", + "size": 6, + "quiz": "green" + } + +` + selections := []string{"fruit", "size"} + + isFiltered, values := QueryJson(data, selections, Query{ + Field: "quiz", + Op: "=", + Value: "green", + }) + + if !isFiltered { + t.Errorf("should have been filtered") + } + + if values == nil { + t.Errorf("values should have been returned") + } + + buf := ToJson(nil, selections, values) + println(string(buf)) + +} + +func TestJsonQueryNumber(t *testing.T) { + + data := ` + { + "fruit": "Bl\"ue", + "size": 6, + "quiz": "green" + } + +` + selections := []string{"fruit", "quiz"} + + isFiltered, values := QueryJson(data, selections, Query{ + Field: "size", + Op: ">=", + Value: "6", + }) + + if !isFiltered { + t.Errorf("should have been filtered") + } + + if values == nil { + t.Errorf("values should have been returned") + } + + buf := ToJson(nil, selections, values) + println(string(buf)) + +} diff --git a/weed/query/json/seralize.go b/weed/query/json/seralize.go new file mode 100644 index 000000000..9bbddc2ff --- /dev/null +++ b/weed/query/json/seralize.go @@ -0,0 +1,17 @@ +package json + +import "github.com/chrislusf/seaweedfs/weed/query/sqltypes" + +func ToJson(buf []byte, selections []string, values []sqltypes.Value) []byte { + buf = append(buf, '{') + for i, value := range values { + if i > 0 { + buf = append(buf, ',') + } + buf = append(buf, selections[i]...) + buf = append(buf, ':') + buf = append(buf, value.Raw()...) + } + buf = append(buf, '}') + return buf +} diff --git a/weed/query/sqltypes/const.go b/weed/query/sqltypes/const.go new file mode 100644 index 000000000..f1d0f540e --- /dev/null +++ b/weed/query/sqltypes/const.go @@ -0,0 +1,124 @@ +package sqltypes + +// copied from vitness + +// Flag allows us to qualify types by their common properties. +type Flag int32 + +const ( + Flag_NONE Flag = 0 + Flag_ISINTEGRAL Flag = 256 + Flag_ISUNSIGNED Flag = 512 + Flag_ISFLOAT Flag = 1024 + Flag_ISQUOTED Flag = 2048 + Flag_ISTEXT Flag = 4096 + Flag_ISBINARY Flag = 8192 +) + +var Flag_name = map[int32]string{ + 0: "NONE", + 256: "ISINTEGRAL", + 512: "ISUNSIGNED", + 1024: "ISFLOAT", + 2048: "ISQUOTED", + 4096: "ISTEXT", + 8192: "ISBINARY", +} +var Flag_value = map[string]int32{ + "NONE": 0, + "ISINTEGRAL": 256, + "ISUNSIGNED": 512, + "ISFLOAT": 1024, + "ISQUOTED": 2048, + "ISTEXT": 4096, + "ISBINARY": 8192, +} + +// Type defines the various supported data types in bind vars +// and query results. +type Type int32 + +const ( + // NULL_TYPE specifies a NULL type. + Type_NULL_TYPE Type = 0 + // INT8 specifies a TINYINT type. + // Properties: 1, IsNumber. + Type_INT8 Type = 257 + // UINT8 specifies a TINYINT UNSIGNED type. + // Properties: 2, IsNumber, IsUnsigned. + Type_UINT8 Type = 770 + // INT16 specifies a SMALLINT type. + // Properties: 3, IsNumber. + Type_INT16 Type = 259 + // UINT16 specifies a SMALLINT UNSIGNED type. + // Properties: 4, IsNumber, IsUnsigned. + Type_UINT16 Type = 772 + // INT24 specifies a MEDIUMINT type. + // Properties: 5, IsNumber. + Type_INT32 Type = 263 + // UINT32 specifies a INTEGER UNSIGNED type. + // Properties: 8, IsNumber, IsUnsigned. + Type_UINT32 Type = 776 + // INT64 specifies a BIGINT type. + // Properties: 9, IsNumber. + Type_INT64 Type = 265 + // UINT64 specifies a BIGINT UNSIGNED type. + // Properties: 10, IsNumber, IsUnsigned. + Type_UINT64 Type = 778 + // FLOAT32 specifies a FLOAT type. + // Properties: 11, IsFloat. + Type_FLOAT32 Type = 1035 + // FLOAT64 specifies a DOUBLE or REAL type. + // Properties: 12, IsFloat. + Type_FLOAT64 Type = 1036 + // TIMESTAMP specifies a TIMESTAMP type. + // Properties: 13, IsQuoted. + Type_TIMESTAMP Type = 2061 + // DATE specifies a DATE type. + // Properties: 14, IsQuoted. + Type_DATE Type = 2062 + // TIME specifies a TIME type. + // Properties: 15, IsQuoted. + Type_TIME Type = 2063 + // DATETIME specifies a DATETIME type. + // Properties: 16, IsQuoted. + Type_DATETIME Type = 2064 + // YEAR specifies a YEAR type. + // Properties: 17, IsNumber, IsUnsigned. + Type_YEAR Type = 785 + // DECIMAL specifies a DECIMAL or NUMERIC type. + // Properties: 18, None. + Type_DECIMAL Type = 18 + // TEXT specifies a TEXT type. + // Properties: 19, IsQuoted, IsText. + Type_TEXT Type = 6163 + // BLOB specifies a BLOB type. + // Properties: 20, IsQuoted, IsBinary. + Type_BLOB Type = 10260 + // VARCHAR specifies a VARCHAR type. + // Properties: 21, IsQuoted, IsText. + Type_VARCHAR Type = 6165 + // VARBINARY specifies a VARBINARY type. + // Properties: 22, IsQuoted, IsBinary. + Type_VARBINARY Type = 10262 + // CHAR specifies a CHAR type. + // Properties: 23, IsQuoted, IsText. + Type_CHAR Type = 6167 + // BINARY specifies a BINARY type. + // Properties: 24, IsQuoted, IsBinary. + Type_BINARY Type = 10264 + // BIT specifies a BIT type. + // Properties: 25, IsQuoted. + Type_BIT Type = 2073 + // JSON specifies a JSON type. + // Properties: 30, IsQuoted. + Type_JSON Type = 2078 +) + +// BindVariable represents a single bind variable in a Query. +type BindVariable struct { + Type Type `protobuf:"varint,1,opt,name=type,enum=query.Type" json:"type,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // values are set if type is TUPLE. + Values []*Value `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"` +} diff --git a/weed/query/sqltypes/type.go b/weed/query/sqltypes/type.go new file mode 100644 index 000000000..f4f3dd471 --- /dev/null +++ b/weed/query/sqltypes/type.go @@ -0,0 +1,101 @@ +package sqltypes + +// These bit flags can be used to query on the +// common properties of types. +const ( + flagIsIntegral = int(Flag_ISINTEGRAL) + flagIsUnsigned = int(Flag_ISUNSIGNED) + flagIsFloat = int(Flag_ISFLOAT) + flagIsQuoted = int(Flag_ISQUOTED) + flagIsText = int(Flag_ISTEXT) + flagIsBinary = int(Flag_ISBINARY) +) + +// IsIntegral returns true if Type is an integral +// (signed/unsigned) that can be represented using +// up to 64 binary bits. +// If you have a Value object, use its member function. +func IsIntegral(t Type) bool { + return int(t)&flagIsIntegral == flagIsIntegral +} + +// IsSigned returns true if Type is a signed integral. +// If you have a Value object, use its member function. +func IsSigned(t Type) bool { + return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral +} + +// IsUnsigned returns true if Type is an unsigned integral. +// Caution: this is not the same as !IsSigned. +// If you have a Value object, use its member function. +func IsUnsigned(t Type) bool { + return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral|flagIsUnsigned +} + +// IsFloat returns true is Type is a floating point. +// If you have a Value object, use its member function. +func IsFloat(t Type) bool { + return int(t)&flagIsFloat == flagIsFloat +} + +// IsQuoted returns true if Type is a quoted text or binary. +// If you have a Value object, use its member function. +func IsQuoted(t Type) bool { + return (int(t)&flagIsQuoted == flagIsQuoted) && t != Bit +} + +// IsText returns true if Type is a text. +// If you have a Value object, use its member function. +func IsText(t Type) bool { + return int(t)&flagIsText == flagIsText +} + +// IsBinary returns true if Type is a binary. +// If you have a Value object, use its member function. +func IsBinary(t Type) bool { + return int(t)&flagIsBinary == flagIsBinary +} + +// isNumber returns true if the type is any type of number. +func isNumber(t Type) bool { + return IsIntegral(t) || IsFloat(t) || t == Decimal +} + +// IsTemporal returns true if Value is time type. +func IsTemporal(t Type) bool { + switch t { + case Timestamp, Date, Time, Datetime: + return true + } + return false +} + +// Vitess data types. These are idiomatically +// named synonyms for the Type values. +const ( + Null = Type_NULL_TYPE + Int8 = Type_INT8 + Uint8 = Type_UINT8 + Int16 = Type_INT16 + Uint16 = Type_UINT16 + Int32 = Type_INT32 + Uint32 = Type_UINT32 + Int64 = Type_INT64 + Uint64 = Type_UINT64 + Float32 = Type_FLOAT32 + Float64 = Type_FLOAT64 + Timestamp = Type_TIMESTAMP + Date = Type_DATE + Time = Type_TIME + Datetime = Type_DATETIME + Year = Type_YEAR + Decimal = Type_DECIMAL + Text = Type_TEXT + Blob = Type_BLOB + VarChar = Type_VARCHAR + VarBinary = Type_VARBINARY + Char = Type_CHAR + Binary = Type_BINARY + Bit = Type_BIT + TypeJSON = Type_JSON +) diff --git a/weed/query/sqltypes/unsafe.go b/weed/query/sqltypes/unsafe.go new file mode 100644 index 000000000..e322c92ce --- /dev/null +++ b/weed/query/sqltypes/unsafe.go @@ -0,0 +1,30 @@ +package sqltypes + +import ( + "reflect" + "unsafe" +) + +// BytesToString casts slice to string without copy +func BytesToString(b []byte) (s string) { + if len(b) == 0 { + return "" + } + + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + sh := reflect.StringHeader{Data: bh.Data, Len: bh.Len} + + return *(*string)(unsafe.Pointer(&sh)) +} + +// StringToBytes casts string to slice without copy +func StringToBytes(s string) []byte { + if len(s) == 0 { + return []byte{} + } + + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh := reflect.SliceHeader{Data: sh.Data, Len: sh.Len, Cap: sh.Len} + + return *(*[]byte)(unsafe.Pointer(&bh)) +} diff --git a/weed/query/sqltypes/value.go b/weed/query/sqltypes/value.go new file mode 100644 index 000000000..012de2b45 --- /dev/null +++ b/weed/query/sqltypes/value.go @@ -0,0 +1,355 @@ +package sqltypes + +import ( + "fmt" + "strconv" + "time" +) + +var ( + // NULL represents the NULL value. + NULL = Value{} + // DontEscape tells you if a character should not be escaped. + DontEscape = byte(255) + nullstr = []byte("null") +) + +type Value struct { + typ Type + val []byte +} + +// NewValue builds a Value using typ and val. If the value and typ +// don't match, it returns an error. +func NewValue(typ Type, val []byte) (v Value, err error) { + switch { + case IsSigned(typ): + if _, err := strconv.ParseInt(string(val), 0, 64); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsUnsigned(typ): + if _, err := strconv.ParseUint(string(val), 0, 64); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsFloat(typ) || typ == Decimal: + if _, err := strconv.ParseFloat(string(val), 64); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsQuoted(typ) || typ == Bit || typ == Null: + return MakeTrusted(typ, val), nil + } + // All other types are unsafe or invalid. + return NULL, fmt.Errorf("invalid type specified for MakeValue: %v", typ) +} + +// MakeTrusted makes a new Value based on the type. +// This function should only be used if you know the value +// and type conform to the rules. Every place this function is +// called, a comment is needed that explains why it's justified. +// Exceptions: The current package and mysql package do not need +// comments. Other packages can also use the function to create +// VarBinary or VarChar values. +func MakeTrusted(typ Type, val []byte) Value { + + if typ == Null { + return NULL + } + + return Value{typ: typ, val: val} +} + +// NewInt64 builds an Int64 Value. +func NewInt64(v int64) Value { + return MakeTrusted(Int64, strconv.AppendInt(nil, v, 10)) +} + +// NewInt32 builds an Int64 Value. +func NewInt32(v int32) Value { + return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10)) +} + +// NewUint64 builds an Uint64 Value. +func NewUint64(v uint64) Value { + return MakeTrusted(Uint64, strconv.AppendUint(nil, v, 10)) +} + +// NewFloat32 builds an Float64 Value. +func NewFloat32(v float32) Value { + return MakeTrusted(Float32, strconv.AppendFloat(nil, float64(v), 'f', -1, 64)) +} + +// NewFloat64 builds an Float64 Value. +func NewFloat64(v float64) Value { + return MakeTrusted(Float64, strconv.AppendFloat(nil, v, 'g', -1, 64)) +} + +// NewVarChar builds a VarChar Value. +func NewVarChar(v string) Value { + return MakeTrusted(VarChar, []byte(v)) +} + +// NewVarBinary builds a VarBinary Value. +// The input is a string because it's the most common use case. +func NewVarBinary(v string) Value { + return MakeTrusted(VarBinary, []byte(v)) +} + +// NewIntegral builds an integral type from a string representation. +// The type will be Int64 or Uint64. Int64 will be preferred where possible. +func NewIntegral(val string) (n Value, err error) { + signed, err := strconv.ParseInt(val, 0, 64) + if err == nil { + return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil + } + unsigned, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return Value{}, err + } + return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil +} + +// MakeString makes a VarBinary Value. +func MakeString(val []byte) Value { + return MakeTrusted(VarBinary, val) +} + +// BuildValue builds a value from any go type. sqltype.Value is +// also allowed. +func BuildValue(goval interface{}) (v Value, err error) { + // Look for the most common types first. + switch goval := goval.(type) { + case nil: + // no op + case []byte: + v = MakeTrusted(VarBinary, goval) + case int64: + v = MakeTrusted(Int64, strconv.AppendInt(nil, int64(goval), 10)) + case uint64: + v = MakeTrusted(Uint64, strconv.AppendUint(nil, uint64(goval), 10)) + case float64: + v = MakeTrusted(Float64, strconv.AppendFloat(nil, goval, 'f', -1, 64)) + case int: + v = MakeTrusted(Int64, strconv.AppendInt(nil, int64(goval), 10)) + case int8: + v = MakeTrusted(Int8, strconv.AppendInt(nil, int64(goval), 10)) + case int16: + v = MakeTrusted(Int16, strconv.AppendInt(nil, int64(goval), 10)) + case int32: + v = MakeTrusted(Int32, strconv.AppendInt(nil, int64(goval), 10)) + case uint: + v = MakeTrusted(Uint64, strconv.AppendUint(nil, uint64(goval), 10)) + case uint8: + v = MakeTrusted(Uint8, strconv.AppendUint(nil, uint64(goval), 10)) + case uint16: + v = MakeTrusted(Uint16, strconv.AppendUint(nil, uint64(goval), 10)) + case uint32: + v = MakeTrusted(Uint32, strconv.AppendUint(nil, uint64(goval), 10)) + case float32: + v = MakeTrusted(Float32, strconv.AppendFloat(nil, float64(goval), 'f', -1, 64)) + case string: + v = MakeTrusted(VarBinary, []byte(goval)) + case time.Time: + v = MakeTrusted(Datetime, []byte(goval.Format("2006-01-02 15:04:05"))) + case Value: + v = goval + case *BindVariable: + return ValueFromBytes(goval.Type, goval.Value) + default: + return v, fmt.Errorf("unexpected type %T: %v", goval, goval) + } + return v, nil +} + +// BuildConverted is like BuildValue except that it tries to +// convert a string or []byte to an integral if the target type +// is an integral. We don't perform other implicit conversions +// because they're unsafe. +func BuildConverted(typ Type, goval interface{}) (v Value, err error) { + if IsIntegral(typ) { + switch goval := goval.(type) { + case []byte: + return ValueFromBytes(typ, goval) + case string: + return ValueFromBytes(typ, []byte(goval)) + case Value: + if goval.IsQuoted() { + return ValueFromBytes(typ, goval.Raw()) + } + } + } + return BuildValue(goval) +} + +// ValueFromBytes builds a Value using typ and val. It ensures that val +// matches the requested type. If type is an integral it's converted to +// a canonical form. Otherwise, the original representation is preserved. +func ValueFromBytes(typ Type, val []byte) (v Value, err error) { + switch { + case IsSigned(typ): + signed, err := strconv.ParseInt(string(val), 0, 64) + if err != nil { + return NULL, err + } + v = MakeTrusted(typ, strconv.AppendInt(nil, signed, 10)) + case IsUnsigned(typ): + unsigned, err := strconv.ParseUint(string(val), 0, 64) + if err != nil { + return NULL, err + } + v = MakeTrusted(typ, strconv.AppendUint(nil, unsigned, 10)) + case IsFloat(typ) || typ == Decimal: + _, err := strconv.ParseFloat(string(val), 64) + if err != nil { + return NULL, err + } + // After verification, we preserve the original representation. + fallthrough + default: + v = MakeTrusted(typ, val) + } + return v, nil +} + +// BuildIntegral builds an integral type from a string representation. +// The type will be Int64 or Uint64. Int64 will be preferred where possible. +func BuildIntegral(val string) (n Value, err error) { + signed, err := strconv.ParseInt(val, 0, 64) + if err == nil { + return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil + } + unsigned, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return Value{}, err + } + return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil +} + +// Type returns the type of Value. +func (v Value) Type() Type { + return v.typ +} + +// Raw returns the raw bytes. All types are currently implemented as []byte. +// You should avoid using this function. If you do, you should treat the +// bytes as read-only. +func (v Value) Raw() []byte { + return v.val +} + +// Len returns the length. +func (v Value) Len() int { + return len(v.val) +} + +// Values represents the array of Value. +type Values []Value + +// Len implements the interface. +func (vs Values) Len() int { + len := 0 + for _, v := range vs { + len += v.Len() + } + return len +} + +// String returns the raw value as a string. +func (v Value) String() string { + return BytesToString(v.val) +} + +// ToNative converts Value to a native go type. +// This does not work for sqltypes.Tuple. The function +// panics if there are inconsistencies. +func (v Value) ToNative() interface{} { + var out interface{} + var err error + switch { + case v.typ == Null: + // no-op + case IsSigned(v.typ): + out, err = v.ParseInt64() + case IsUnsigned(v.typ): + out, err = v.ParseUint64() + case IsFloat(v.typ): + out, err = v.ParseFloat64() + default: + out = v.val + } + if err != nil { + panic(err) + } + return out +} + +// ParseInt64 will parse a Value into an int64. It does +// not check the type. +func (v Value) ParseInt64() (val int64, err error) { + return strconv.ParseInt(v.String(), 10, 64) +} + +// ParseUint64 will parse a Value into a uint64. It does +// not check the type. +func (v Value) ParseUint64() (val uint64, err error) { + return strconv.ParseUint(v.String(), 10, 64) +} + +// ParseFloat64 will parse a Value into an float64. It does +// not check the type. +func (v Value) ParseFloat64() (val float64, err error) { + return strconv.ParseFloat(v.String(), 64) +} + +// IsNull returns true if Value is null. +func (v Value) IsNull() bool { + return v.typ == Null +} + +// IsIntegral returns true if Value is an integral. +func (v Value) IsIntegral() bool { + return IsIntegral(v.typ) +} + +// IsSigned returns true if Value is a signed integral. +func (v Value) IsSigned() bool { + return IsSigned(v.typ) +} + +// IsUnsigned returns true if Value is an unsigned integral. +func (v Value) IsUnsigned() bool { + return IsUnsigned(v.typ) +} + +// IsFloat returns true if Value is a float. +func (v Value) IsFloat() bool { + return IsFloat(v.typ) +} + +// IsQuoted returns true if Value must be SQL-quoted. +func (v Value) IsQuoted() bool { + return IsQuoted(v.typ) +} + +// IsText returns true if Value is a collatable text. +func (v Value) IsText() bool { + return IsText(v.typ) +} + +// IsBinary returns true if Value is binary. +func (v Value) IsBinary() bool { + return IsBinary(v.typ) +} + +// IsTemporal returns true if Value is time type. +func (v Value) IsTemporal() bool { + return IsTemporal(v.typ) +} + +// ToString returns the value as MySQL would return it as string. +// If the value is not convertible like in the case of Expression, it returns nil. +func (v Value) ToString() string { + return BytesToString(v.val) +} diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index ac8235fd5..051199adb 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -1,7 +1,8 @@ package replication import ( - "path/filepath" + "context" + "fmt" "strings" "github.com/chrislusf/seaweedfs/weed/glog" @@ -16,10 +17,10 @@ type Replicator struct { source *source.FilerSource } -func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSink) *Replicator { +func NewReplicator(sourceConfig util.Configuration, configPrefix string, dataSink sink.ReplicationSink) *Replicator { source := &source.FilerSource{} - source.Initialize(sourceConfig) + source.Initialize(sourceConfig, configPrefix) dataSink.SetSourceFiler(source) @@ -29,12 +30,15 @@ func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSin } } -func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification) error { +func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_pb.EventNotification) error { + if message.IsFromOtherCluster && r.sink.GetName() == "filer" { + return nil + } if !strings.HasPrefix(key, r.source.Dir) { glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir) return nil } - newKey := filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):]) + newKey := util.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):]) glog.V(3).Infof("replicate %s => %s", key, newKey) key = newKey if message.OldEntry != nil && message.NewEntry == nil { @@ -50,12 +54,17 @@ func (r *Replicator) Replicate(key string, message *filer_pb.EventNotification) return nil } - foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewEntry, message.DeleteChunks) + foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks) if foundExisting { glog.V(4).Infof("updated %v", key) return err } + err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false) + if err != nil { + return fmt.Errorf("delete old entry %v: %v", key, err) + } + glog.V(4).Infof("creating missing %v", key) return r.sink.CreateEntry(key, message.NewEntry) } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 7acf37fa5..aef97c06e 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -35,12 +35,12 @@ func (g *AzureSink) GetSinkToDirectory() string { return g.dir } -func (g *AzureSink) Initialize(configuration util.Configuration) error { +func (g *AzureSink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("account_name"), - configuration.GetString("account_key"), - configuration.GetString("container"), - configuration.GetString("directory"), + configuration.GetString(prefix+"account_name"), + configuration.GetString(prefix+"account_key"), + configuration.GetString(prefix+"container"), + configuration.GetString(prefix+"directory"), ) } @@ -78,9 +78,7 @@ func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks boo key = key + "/" } - ctx := context.Background() - - if _, err := g.containerURL.NewBlobURL(key).Delete(ctx, + if _, err := g.containerURL.NewBlobURL(key).Delete(context.Background(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil { return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err) } @@ -98,15 +96,13 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { } totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - - ctx := context.Background() + chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize)) // Create a URL that references a to-be-created blob in your // Azure Storage account's container. appendBlobURL := g.containerURL.NewAppendBlobURL(key) - _, err := appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) + _, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) if err != nil { return err } @@ -119,8 +115,8 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { } var writeErr error - _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { - _, writeErr = appendBlobURL.AppendBlock(ctx, bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) + readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) { + _, writeErr = appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) }) if readErr != nil { @@ -136,7 +132,7 @@ func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry) error { } -func (g *AzureSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *AzureSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 17f5e39b2..1e7d82ed4 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -31,12 +31,12 @@ func (g *B2Sink) GetSinkToDirectory() string { return g.dir } -func (g *B2Sink) Initialize(configuration util.Configuration) error { +func (g *B2Sink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("b2_account_id"), - configuration.GetString("b2_master_application_key"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"b2_account_id"), + configuration.GetString(prefix+"b2_master_application_key"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } @@ -45,8 +45,7 @@ func (g *B2Sink) SetSourceFiler(s *source.FilerSource) { } func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { - ctx := context.Background() - client, err := b2.NewClient(ctx, accountId, accountKey) + client, err := b2.NewClient(context.Background(), accountId, accountKey) if err != nil { return err } @@ -66,16 +65,14 @@ func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) key = key + "/" } - ctx := context.Background() - - bucket, err := g.client.Bucket(ctx, g.bucket) + bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { return err } targetObject := bucket.Object(key) - return targetObject.Delete(ctx) + return targetObject.Delete(context.Background()) } @@ -88,17 +85,15 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { } totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - - ctx := context.Background() + chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize)) - bucket, err := g.client.Bucket(ctx, g.bucket) + bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { return err } targetObject := bucket.Object(key) - writer := targetObject.NewWriter(ctx) + writer := targetObject.NewWriter(context.Background()) for _, chunk := range chunkViews { @@ -108,7 +103,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { } var writeErr error - _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { + readErr := util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) { _, err := writer.Write(data) if err != nil { writeErr = err @@ -128,7 +123,7 @@ func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry) error { } -func (g *B2Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *B2Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) diff --git a/weed/replication/sink/filersink/README.txt b/weed/replication/sink/filersink/README.txt new file mode 100644 index 000000000..4ba0fc752 --- /dev/null +++ b/weed/replication/sink/filersink/README.txt @@ -0,0 +1,12 @@ +How replication works +====== + +All metadata changes within current cluster would be notified to a message queue. + +If the meta data change is from other clusters, this metadata would change would not be notified to the message queue. + +So active<=>active replication is possible. + + +All metadata changes would be published as metadata changes. +So all mounts listening for metadata changes will get updated. \ No newline at end of file diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index c14566723..bde29176c 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -3,41 +3,46 @@ package filersink import ( "context" "fmt" - "strings" "sync" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/security" ) -func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) { +func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, dir string) (replicatedChunks []*filer_pb.FileChunk, err error) { if len(sourceChunks) == 0 { return } + + replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks)) + var wg sync.WaitGroup - for _, sourceChunk := range sourceChunks { + for chunkIndex, sourceChunk := range sourceChunks { wg.Add(1) - go func(chunk *filer_pb.FileChunk) { + go func(chunk *filer_pb.FileChunk, index int) { defer wg.Done() - replicatedChunk, e := fs.replicateOneChunk(chunk) + replicatedChunk, e := fs.replicateOneChunk(chunk, dir) if e != nil { err = e } - replicatedChunks = append(replicatedChunks, replicatedChunk) - }(sourceChunk) + replicatedChunks[index] = replicatedChunk + }(sourceChunk, chunkIndex) } wg.Wait() return } -func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) { +func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, dir string) (*filer_pb.FileChunk, error) { - fileId, err := fs.fetchAndWrite(sourceChunk) + fileId, err := fs.fetchAndWrite(sourceChunk, dir) if err != nil { - return nil, fmt.Errorf("copy %s: %v", sourceChunk.FileId, err) + return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err) } return &filer_pb.FileChunk{ @@ -46,21 +51,24 @@ func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk) (*filer_ Size: sourceChunk.Size, Mtime: sourceChunk.Mtime, ETag: sourceChunk.ETag, - SourceFileId: sourceChunk.FileId, + SourceFileId: sourceChunk.GetFileIdString(), + CipherKey: sourceChunk.CipherKey, + IsCompressed: sourceChunk.IsCompressed, }, nil } -func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId string, err error) { +func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, dir string) (fileId string, err error) { - filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.FileId) + filename, header, readCloser, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString()) if err != nil { - return "", fmt.Errorf("read part %s: %v", sourceChunk.FileId, err) + return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err) } defer readCloser.Close() var host string + var auth security.EncodedJwt - if err := fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -68,6 +76,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri Collection: fs.collection, TtlSec: fs.ttlSec, DataCenter: fs.dataCenter, + ParentPath: dir, } resp, err := client.AssignVolume(context.Background(), request) @@ -75,8 +84,11 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } - fileId, host = resp.FileId, resp.Url + fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) return nil }); err != nil { @@ -87,8 +99,8 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) - uploadResult, err := operation.Upload(fileUrl, filename, readCloser, - "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, "") + // fetch data as is, regardless whether it is encrypted or not + uploadResult, err, _ := operation.Upload(fileUrl, filename, false, readCloser, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) if err != nil { glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err) return "", fmt.Errorf("upload data: %v", err) @@ -101,23 +113,16 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri return } -func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +var _ = filer_pb.FilerClient(&FilerSink{}) - grpcConnection, err := util.GrpcDial(fs.grpcAddress) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) - } - defer grpcConnection.Close() +func (fs *FilerSink) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, fs.grpcAddress, fs.grpcDialOption) - return fn(client) } - -func volumeId(fileId string) string { - lastCommaIndex := strings.LastIndex(fileId, ",") - if lastCommaIndex > 0 { - return fileId[:lastCommaIndex] - } - return fileId +func (fs *FilerSink) AdjustedUrl(hostAndPort string) string { + return hostAndPort } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index 2e9cc86d1..50721a8f3 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -4,6 +4,10 @@ import ( "context" "fmt" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -13,13 +17,14 @@ import ( ) type FilerSink struct { - filerSource *source.FilerSource - grpcAddress string - dir string - replication string - collection string - ttlSec int32 - dataCenter string + filerSource *source.FilerSource + grpcAddress string + dir string + replication string + collection string + ttlSec int32 + dataCenter string + grpcDialOption grpc.DialOption } func init() { @@ -34,13 +39,13 @@ func (fs *FilerSink) GetSinkToDirectory() string { return fs.dir } -func (fs *FilerSink) Initialize(configuration util.Configuration) error { +func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error { return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), - configuration.GetString("replication"), - configuration.GetString("collection"), - configuration.GetInt("ttlSec"), + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), + configuration.GetString(prefix+"replication"), + configuration.GetString(prefix+"collection"), + configuration.GetInt(prefix+"ttlSec"), ) } @@ -55,37 +60,28 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string, fs.replication = replication fs.collection = collection fs.ttlSec = int32(ttlSec) + fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") return nil } func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error { - return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - dir, name := filer2.FullPath(key).DirAndName() - request := &filer_pb.DeleteEntryRequest{ - Directory: dir, - Name: name, - IsDeleteData: deleteIncludeChunks, - } + dir, name := util.FullPath(key).DirAndName() - glog.V(1).Infof("delete entry: %v", request) - _, err := client.DeleteEntry(context.Background(), request) - if err != nil { - glog.V(0).Infof("delete entry %s: %v", key, err) - return fmt.Errorf("delete entry %s: %v", key, err) - } - - return nil - }) + glog.V(1).Infof("delete entry: %v", key) + err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, false, false, true) + if err != nil { + glog.V(0).Infof("delete entry %s: %v", key, err) + return fmt.Errorf("delete entry %s: %v", key, err) + } + return nil } func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { - return fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - dir, name := filer2.FullPath(key).DirAndName() - ctx := context.Background() + dir, name := util.FullPath(key).DirAndName() // look up existing entry lookupRequest := &filer_pb.LookupDirectoryEntryRequest{ @@ -93,14 +89,14 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { Name: name, } glog.V(1).Infof("lookup: %v", lookupRequest) - if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil { - if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) { + if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { + if filer2.ETag(resp.Entry) == filer2.ETag(entry) { glog.V(0).Infof("already replicated %s", key) return nil } } - replicatedChunks, err := fs.replicateChunks(entry.Chunks) + replicatedChunks, err := fs.replicateChunks(entry.Chunks, dir) if err != nil { glog.V(0).Infof("replicate entry chunks %s: %v", key, err) @@ -117,10 +113,11 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { Attributes: entry.Attributes, Chunks: replicatedChunks, }, + IsFromOtherCluster: true, } glog.V(1).Infof("create: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("create entry %s: %v", key, err) return fmt.Errorf("create entry %s: %v", key, err) } @@ -129,15 +126,13 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry) error { }) } -func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { - - ctx := context.Background() +func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { - dir, name := filer2.FullPath(key).DirAndName() + dir, name := util.FullPath(key).DirAndName() // read existing entry var existingEntry *filer_pb.Entry - err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -145,7 +140,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, } glog.V(4).Infof("lookup entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) + resp, err := filer_pb.LookupEntry(client, request) if err != nil { glog.V(0).Infof("lookup %s: %v", key, err) return err @@ -166,7 +161,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, // skip if already changed // this usually happens when the messages are not ordered glog.V(0).Infof("late updates %s", key) - } else if filer2.ETag(newEntry.Chunks) == filer2.ETag(existingEntry.Chunks) { + } else if filer2.ETag(newEntry) == filer2.ETag(existingEntry) { // skip if no change // this usually happens when retrying the replication glog.V(0).Infof("already replicated %s", key) @@ -177,11 +172,11 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, // delete the chunks that are deleted from the source if deleteIncludeChunks { // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks - existingEntry.Chunks = minusChunks(existingEntry.Chunks, deletedChunks) + existingEntry.Chunks = filer2.MinusChunks(existingEntry.Chunks, deletedChunks) } // replicate the chunks that are new in the source - replicatedChunks, err := fs.replicateChunks(newChunks) + replicatedChunks, err := fs.replicateChunks(newChunks, newParentPath) if err != nil { return true, fmt.Errorf("replicte %s chunks error: %v", key, err) } @@ -189,14 +184,15 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, } // save updated meta data - return true, fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ - Directory: dir, - Entry: existingEntry, + Directory: newParentPath, + Entry: existingEntry, + IsFromOtherCluster: true, } - if _, err := client.UpdateEntry(ctx, request); err != nil { + if _, err := client.UpdateEntry(context.Background(), request); err != nil { return fmt.Errorf("update existingEntry %s: %v", key, err) } @@ -205,23 +201,7 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, } func compareChunks(oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk) { - deletedChunks = minusChunks(oldEntry.Chunks, newEntry.Chunks) - newChunks = minusChunks(newEntry.Chunks, oldEntry.Chunks) - return -} - -func minusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) { - for _, a := range as { - found := false - for _, b := range bs { - if a.FileId == b.FileId { - found = true - break - } - } - if !found { - delta = append(delta, a) - } - } + deletedChunks = filer2.MinusChunks(oldEntry.Chunks, newEntry.Chunks) + newChunks = filer2.MinusChunks(newEntry.Chunks, oldEntry.Chunks) return } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index c1beefc33..bb5a54272 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -6,13 +6,14 @@ import ( "os" "cloud.google.com/go/storage" + "google.golang.org/api/option" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/api/option" ) type GcsSink struct { @@ -34,11 +35,11 @@ func (g *GcsSink) GetSinkToDirectory() string { return g.dir } -func (g *GcsSink) Initialize(configuration util.Configuration) error { +func (g *GcsSink) Initialize(configuration util.Configuration, prefix string) error { return g.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } @@ -50,7 +51,6 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str g.bucket = bucketName g.dir = dir - ctx := context.Background() // Creates a client. if google_application_credentials == "" { var found bool @@ -59,7 +59,7 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml") } } - client, err := storage.NewClient(ctx, option.WithCredentialsFile(google_application_credentials)) + client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials)) if err != nil { glog.Fatalf("Failed to create client: %v", err) } @@ -90,11 +90,9 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { } totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - - ctx := context.Background() + chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize)) - wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx) + wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) for _, chunk := range chunkViews { @@ -103,7 +101,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { return err } - _, err = util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { + err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) { wc.Write(data) }) @@ -121,7 +119,7 @@ func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry) error { } -func (g *GcsSink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *GcsSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { // TODO improve efficiency return false, nil } diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index 0a86139d3..6d85f660a 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -8,10 +8,10 @@ import ( type ReplicationSink interface { GetName() string - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error DeleteEntry(key string, isDirectory, deleteIncludeChunks bool) error CreateEntry(key string, entry *filer_pb.Entry) error - UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) + UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) GetSinkToDirectory() string SetSourceFiler(s *source.FilerSource) } diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 0a4e78318..d7af105b8 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -1,6 +1,7 @@ package S3Sink import ( + "context" "fmt" "strings" "sync" @@ -10,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -23,6 +25,7 @@ type S3Sink struct { region string bucket string dir string + endpoint string filerSource *source.FilerSource } @@ -38,16 +41,18 @@ func (s3sink *S3Sink) GetSinkToDirectory() string { return s3sink.dir } -func (s3sink *S3Sink) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("sink.s3.region: %v", configuration.GetString("region")) - glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString("bucket")) - glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString("directory")) +func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket")) + glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory")) + glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint")) return s3sink.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), + configuration.GetString(prefix+"endpoint"), ) } @@ -55,16 +60,18 @@ func (s3sink *S3Sink) SetSourceFiler(s *source.FilerSource) { s3sink.filerSource = s } -func (s3sink *S3Sink) initialize(awsAccessKeyId, aswSecretAccessKey, region, bucket, dir string) error { +func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir, endpoint string) error { s3sink.region = region s3sink.bucket = bucket s3sink.dir = dir + s3sink.endpoint = endpoint config := &aws.Config{ - Region: aws.String(s3sink.region), + Region: aws.String(s3sink.region), + Endpoint: aws.String(s3sink.endpoint), } - if awsAccessKeyId != "" && aswSecretAccessKey != "" { - config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, aswSecretAccessKey, "") + if awsAccessKeyId != "" && awsSecretAccessKey != "" { + config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") } sess, err := session.NewSession(config) @@ -89,7 +96,6 @@ func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks b } func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { - key = cleanKey(key) if entry.IsDirectory { @@ -102,21 +108,22 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { } totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int64(totalSize)) + + parts := make([]*s3.CompletedPart, len(chunkViews)) - var parts []*s3.CompletedPart var wg sync.WaitGroup for chunkIndex, chunk := range chunkViews { partId := chunkIndex + 1 wg.Add(1) - go func(chunk *filer2.ChunkView) { + go func(chunk *filer2.ChunkView, index int) { defer wg.Done() if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { err = uploadErr } else { - parts = append(parts, part) + parts[index] = part } - }(chunk) + }(chunk, chunkIndex) } wg.Wait() @@ -125,11 +132,11 @@ func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry) error { return err } - return s3sink.completeMultipartUpload(key, uploadId, parts) + return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts) } -func (s3sink *S3Sink) UpdateEntry(key string, oldEntry, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (s3sink *S3Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index 5c4be7aee..c5c65ed5c 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -2,6 +2,7 @@ package S3Sink import ( "bytes" + "context" "fmt" "io" @@ -81,7 +82,7 @@ func (s3sink *S3Sink) abortMultipartUpload(key, uploadId string) error { } // To complete multipart upload -func (s3sink *S3Sink) completeMultipartUpload(key, uploadId string, parts []*s3.CompletedPart) error { +func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId string, parts []*s3.CompletedPart) error { input := &s3.CompleteMultipartUploadInput{ Bucket: aws.String(s3sink.bucket), Key: aws.String(key), @@ -161,6 +162,6 @@ func (s3sink *S3Sink) buildReadSeeker(chunk *filer2.ChunkView) (io.ReadSeeker, e return nil, err } buf := make([]byte, chunk.Size) - util.ReadUrl(fileUrl, chunk.Offset, int(chunk.Size), buf, true) + util.ReadUrl(fileUrl, nil, false, false, chunk.Offset, int(chunk.Size), buf) return bytes.NewReader(buf), nil } diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index efe71e706..69c23fe82 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -7,6 +7,11 @@ import ( "net/http" "strings" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -17,20 +22,22 @@ type ReplicationSource interface { } type FilerSource struct { - grpcAddress string - Dir string + grpcAddress string + grpcDialOption grpc.DialOption + Dir string } -func (fs *FilerSource) Initialize(configuration util.Configuration) error { +func (fs *FilerSource) Initialize(configuration util.Configuration, prefix string) error { return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), ) } func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { fs.grpcAddress = grpcAddress fs.Dir = dir + fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") return nil } @@ -40,7 +47,7 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) { vid := volumeId(part) - err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read lookup volume id locations: %v", vid) resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ @@ -84,17 +91,19 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade return filename, header, readCloser, err } -func (fs *FilerSource) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +var _ = filer_pb.FilerClient(&FilerSource{}) - grpcConnection, err := util.GrpcDial(fs.grpcAddress) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) - } - defer grpcConnection.Close() +func (fs *FilerSource) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, fs.grpcAddress, fs.grpcDialOption) - client := filer_pb.NewSeaweedFilerClient(grpcConnection) +} - return fn(client) +func (fs *FilerSource) AdjustedUrl(hostAndPort string) string { + return hostAndPort } func volumeId(fileId string) string { diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go index f0100f4de..1dd386ba7 100644 --- a/weed/replication/sub/notification_aws_sqs.go +++ b/weed/replication/sub/notification_aws_sqs.go @@ -27,24 +27,24 @@ func (k *AwsSqsInput) GetName() string { return "aws_sqs" } -func (k *AwsSqsInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } -func (k *AwsSqsInput) initialize(awsAccessKeyId, aswSecretAccessKey, region, queueName string) (err error) { +func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, queueName string) (err error) { config := &aws.Config{ Region: aws.String(region), } - if awsAccessKeyId != "" && aswSecretAccessKey != "" { - config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, aswSecretAccessKey, "") + if awsAccessKeyId != "" && awsSecretAccessKey != "" { + config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") } sess, err := session.NewSession(config) @@ -92,7 +92,9 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif } // process the message - key = *result.Messages[0].Attributes["key"] + // fmt.Printf("messages: %+v\n", result.Messages[0]) + keyValue := result.Messages[0].MessageAttributes["key"] + key = *keyValue.StringValue text := *result.Messages[0].Body message = &filer_pb.EventNotification{} err = proto.UnmarshalText(text, message) diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go new file mode 100644 index 000000000..9726096e5 --- /dev/null +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -0,0 +1,50 @@ +package sub + +import ( + "context" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" + "gocloud.dev/pubsub" + _ "gocloud.dev/pubsub/awssnssqs" + // _ "gocloud.dev/pubsub/azuresb" + _ "gocloud.dev/pubsub/gcppubsub" + _ "gocloud.dev/pubsub/natspubsub" + _ "gocloud.dev/pubsub/rabbitpubsub" +) + +func init() { + NotificationInputs = append(NotificationInputs, &GoCDKPubSubInput{}) +} + +type GoCDKPubSubInput struct { + sub *pubsub.Subscription +} + +func (k *GoCDKPubSubInput) GetName() string { + return "gocdk_pub_sub" +} + +func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error { + subURL := configuration.GetString(prefix + "sub_url") + glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL) + sub, err := pubsub.OpenSubscription(context.Background(), subURL) + if err != nil { + return err + } + k.sub = sub + return nil +} + +func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { + msg, err := k.sub.Receive(context.Background()) + key = msg.Metadata["key"] + message = &filer_pb.EventNotification{} + err = proto.Unmarshal(msg.Body, message) + if err != nil { + return "", nil, err + } + return key, message, nil +} diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go index ad6b42a2e..a950bb42b 100644 --- a/weed/replication/sub/notification_google_pub_sub.go +++ b/weed/replication/sub/notification_google_pub_sub.go @@ -27,13 +27,13 @@ func (k *GooglePubSubInput) GetName() string { return "google_pub_sub" } -func (k *GooglePubSubInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go index 1a86a8307..fa9cfad9b 100644 --- a/weed/replication/sub/notification_kafka.go +++ b/weed/replication/sub/notification_kafka.go @@ -28,14 +28,14 @@ func (k *KafkaInput) GetName() string { return "kafka" } -func (k *KafkaInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), - configuration.GetString("offsetFile"), - configuration.GetInt("offsetSaveIntervalSeconds"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), + configuration.GetString(prefix+"offsetFile"), + configuration.GetInt(prefix+"offsetSaveIntervalSeconds"), ) } diff --git a/weed/replication/sub/notifications.go b/weed/replication/sub/notifications.go index 66fbef824..8a2668f98 100644 --- a/weed/replication/sub/notifications.go +++ b/weed/replication/sub/notifications.go @@ -9,7 +9,7 @@ type NotificationInput interface { // GetName gets the name to locate the configuration in sync.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) } diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go new file mode 100644 index 000000000..c1e8dff1e --- /dev/null +++ b/weed/s3api/auth_credentials.go @@ -0,0 +1,188 @@ +package s3api + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + + "github.com/golang/protobuf/jsonpb" + "github.com/gorilla/mux" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" +) + +type Action string + +const ( + ACTION_READ = "Read" + ACTION_WRITE = "Write" + ACTION_ADMIN = "Admin" +) + +type Iam interface { + Check(f http.HandlerFunc, actions ...Action) http.HandlerFunc +} + +type IdentityAccessManagement struct { + identities []*Identity + domain string +} + +type Identity struct { + Name string + Credentials []*Credential + Actions []Action +} + +type Credential struct { + AccessKey string + SecretKey string +} + +func NewIdentityAccessManagement(fileName string, domain string) *IdentityAccessManagement { + iam := &IdentityAccessManagement{ + domain: domain, + } + if fileName == "" { + return iam + } + if err := iam.loadS3ApiConfiguration(fileName); err != nil { + glog.Fatalf("fail to load config file %s: %v", fileName, err) + } + return iam +} + +func (iam *IdentityAccessManagement) loadS3ApiConfiguration(fileName string) error { + + s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} + + rawData, readErr := ioutil.ReadFile(fileName) + if readErr != nil { + glog.Warningf("fail to read %s : %v", fileName, readErr) + return fmt.Errorf("fail to read %s : %v", fileName, readErr) + } + + glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) + if err := jsonpb.Unmarshal(bytes.NewReader(rawData), s3ApiConfiguration); err != nil { + glog.Warningf("unmarshal error: %v", err) + return fmt.Errorf("unmarshal %s error: %v", fileName, err) + } + + for _, ident := range s3ApiConfiguration.Identities { + t := &Identity{ + Name: ident.Name, + Credentials: nil, + Actions: nil, + } + for _, action := range ident.Actions { + t.Actions = append(t.Actions, Action(action)) + } + for _, cred := range ident.Credentials { + t.Credentials = append(t.Credentials, &Credential{ + AccessKey: cred.AccessKey, + SecretKey: cred.SecretKey, + }) + } + iam.identities = append(iam.identities, t) + } + + return nil +} + +func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) { + for _, ident := range iam.identities { + for _, cred := range ident.Credentials { + if cred.AccessKey == accessKey { + return ident, cred, true + } + } + } + return nil, nil, false +} + +func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc { + + if len(iam.identities) == 0 { + return f + } + + return func(w http.ResponseWriter, r *http.Request) { + errCode := iam.authRequest(r, action) + if errCode == ErrNone { + f(w, r) + return + } + writeErrorResponse(w, errCode, r.URL) + } +} + +// check whether the request has valid access keys +func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) ErrorCode { + var identity *Identity + var s3Err ErrorCode + switch getRequestAuthType(r) { + case authTypeStreamingSigned: + return ErrNone + case authTypeUnknown: + glog.V(3).Infof("unknown auth type") + return ErrAccessDenied + case authTypePresignedV2, authTypeSignedV2: + glog.V(3).Infof("v2 auth type") + identity, s3Err = iam.isReqAuthenticatedV2(r) + case authTypeSigned, authTypePresigned: + glog.V(3).Infof("v4 auth type") + identity, s3Err = iam.reqSignatureV4Verify(r) + case authTypePostPolicy: + glog.V(3).Infof("post policy auth type") + return ErrNotImplemented + case authTypeJWT: + glog.V(3).Infof("jwt auth type") + return ErrNotImplemented + case authTypeAnonymous: + return ErrAccessDenied + default: + return ErrNotImplemented + } + + glog.V(3).Infof("auth error: %v", s3Err) + if s3Err != ErrNone { + return s3Err + } + + glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + if !identity.canDo(action, bucket) { + return ErrAccessDenied + } + + return ErrNone + +} + +func (identity *Identity) canDo(action Action, bucket string) bool { + for _, a := range identity.Actions { + if a == "Admin" { + return true + } + } + for _, a := range identity.Actions { + if a == action { + return true + } + } + if bucket == "" { + return false + } + limitedByBucket := string(action) + ":" + bucket + for _, a := range identity.Actions { + if string(a) == limitedByBucket { + return true + } + } + return false +} diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go new file mode 100644 index 000000000..c6f76560c --- /dev/null +++ b/weed/s3api/auth_credentials_test.go @@ -0,0 +1,68 @@ +package s3api + +import ( + "testing" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" +) + +func TestIdentityListFileFormat(t *testing.T) { + + s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} + + identity1 := &iam_pb.Identity{ + Name: "some_name", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_ADMIN, + ACTION_READ, + ACTION_WRITE, + }, + } + identity2 := &iam_pb.Identity{ + Name: "some_read_only_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + }, + }, + Actions: []string{ + ACTION_READ, + }, + } + identity3 := &iam_pb.Identity{ + Name: "some_normal_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key2", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_READ, + ACTION_WRITE, + }, + } + + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity1) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity2) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity3) + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, _ := m.MarshalToString(s3ApiConfiguration) + + println(text) + +} diff --git a/weed/s3api/auth_signature_v2.go b/weed/s3api/auth_signature_v2.go new file mode 100644 index 000000000..151a9ec26 --- /dev/null +++ b/weed/s3api/auth_signature_v2.go @@ -0,0 +1,412 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3api + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/url" + "path" + "sort" + "strconv" + "strings" + "time" +) + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// The list should be alphabetically sorted +var resourceList = []string{ + "acl", + "delete", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// Verify if request has valid AWS Signature Version '2'. +func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, ErrorCode) { + if isRequestSignatureV2(r) { + return iam.doesSignV2Match(r) + } + return iam.doesPresignV2SignatureMatch(r) +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// doesSignV2Match - Verify authorization header with calculated header in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html +// returns true if matches, false otherwise. if error is not nil then it is always false + +func validateV2AuthHeader(v2Auth string) (accessKey string, errCode ErrorCode) { + if v2Auth == "" { + return "", ErrAuthHeaderEmpty + } + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v2Auth, signV2Algorithm) { + return "", ErrSignatureVersionNotSupported + } + + // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string). + // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature + authFields := strings.Split(v2Auth, " ") + if len(authFields) != 2 { + return "", ErrMissingFields + } + + // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. + keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") + if len(keySignFields) != 2 { + return "", ErrMissingFields + } + + return keySignFields[0], ErrNone +} + +func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, ErrorCode) { + v2Auth := r.Header.Get("Authorization") + + accessKey, apiError := validateV2AuthHeader(v2Auth) + if apiError != ErrNone { + return nil, apiError + } + + // Access credentials. + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + unescapedQueries, err := unescapeQueries(encodedQuery) + if err != nil { + return nil, ErrInvalidQueryParams + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, ErrInvalidRequest + } + + prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) + if !strings.HasPrefix(v2Auth, prefix) { + return nil, ErrSignatureDoesNotMatch + } + v2Auth = v2Auth[len(prefix):] + expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) + if !compareSignatureV2(v2Auth, expectedAuth) { + return nil, ErrSignatureDoesNotMatch + } + return ident, ErrNone +} + +// doesPresignV2SignatureMatch - Verify query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth +// returns ErrNone if matches. S3 errors otherwise. +func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, ErrorCode) { + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + var ( + filteredQueries []string + gotSignature string + expires string + accessKey string + err error + ) + + var unescapedQueries []string + unescapedQueries, err = unescapeQueries(encodedQuery) + if err != nil { + return nil, ErrInvalidQueryParams + } + + // Extract the necessary values from presigned query, construct a list of new filtered queries. + for _, query := range unescapedQueries { + keyval := strings.SplitN(query, "=", 2) + if len(keyval) != 2 { + return nil, ErrInvalidQueryParams + } + switch keyval[0] { + case "AWSAccessKeyId": + accessKey = keyval[1] + case "Signature": + gotSignature = keyval[1] + case "Expires": + expires = keyval[1] + default: + filteredQueries = append(filteredQueries, query) + } + } + + // Invalid values returns error. + if accessKey == "" || gotSignature == "" || expires == "" { + return nil, ErrInvalidQueryParams + } + + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // Make sure the request has not expired. + expiresInt, err := strconv.ParseInt(expires, 10, 64) + if err != nil { + return nil, ErrMalformedExpires + } + + // Check if the presigned URL has expired. + if expiresInt < time.Now().UTC().Unix() { + return nil, ErrExpiredPresignRequest + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, ErrInvalidRequest + } + + expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) + if !compareSignatureV2(gotSignature, expectedSignature) { + return nil, ErrSignatureDoesNotMatch + } + + return ident, ErrNone +} + +// Escape encodedQuery string into unescaped list of query params, returns error +// if any while unescaping the values. +func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { + for _, query := range strings.Split(encodedQuery, "&") { + var unescapedQuery string + unescapedQuery, err = url.QueryUnescape(query) + if err != nil { + return nil, err + } + unescapedQueries = append(unescapedQueries, unescapedQuery) + } + return unescapedQueries, nil +} + +// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. +func getResource(path string, host string, domain string) (string, error) { + if domain == "" { + return path, nil + } + // If virtual-host-style is enabled construct the "resource" properly. + if strings.Contains(host, ":") { + // In bucket.mydomain.com:9000, strip out :9000 + var err error + if host, _, err = net.SplitHostPort(host); err != nil { + return "", err + } + } + if !strings.HasSuffix(host, "."+domain) { + return path, nil + } + bucket := strings.TrimSuffix(host, "."+domain) + return "/" + pathJoin(bucket, path), nil +} + +// pathJoin - like path.Join() but retains trailing "/" of the last element +func pathJoin(elem ...string) string { + trailingSlash := "" + if len(elem) > 0 { + if strings.HasSuffix(elem[len(elem)-1], "/") { + trailingSlash = "/" + } + } + return path.Join(elem...) + trailingSlash +} + +// Return the signature v2 of a given request. +func signatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "") + signature := calculateSignatureV2(stringToSign, cred.SecretKey) + return signature +} + +// Return string to sign under two different conditions. +// - if expires string is set then string to sign includes date instead of the Date header. +// - if expires string is empty then string to sign includes date header instead. +func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string { + canonicalHeaders := canonicalizedAmzHeadersV2(headers) + if len(canonicalHeaders) > 0 { + canonicalHeaders += "\n" + } + + date := expires // Date is set to expires date for presign operations. + if date == "" { + // If expires date is empty then request header Date is used. + date = headers.Get("Date") + } + + // From the Amazon docs: + // + // StringToSign = HTTP-Verb + "\n" + + // Content-Md5 + "\n" + + // Content-Type + "\n" + + // Date/Expires + "\n" + + // CanonicalizedProtocolHeaders + + // CanonicalizedResource; + stringToSign := strings.Join([]string{ + method, + headers.Get("Content-MD5"), + headers.Get("Content-Type"), + date, + canonicalHeaders, + }, "\n") + + return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery) +} + +// Return canonical resource string. +func canonicalizedResourceV2(encodedResource, encodedQuery string) string { + queries := strings.Split(encodedQuery, "&") + keyval := make(map[string]string) + for _, query := range queries { + key := query + val := "" + index := strings.Index(query, "=") + if index != -1 { + key = query[:index] + val = query[index+1:] + } + keyval[key] = val + } + + var canonicalQueries []string + for _, key := range resourceList { + val, ok := keyval[key] + if !ok { + continue + } + if val == "" { + canonicalQueries = append(canonicalQueries, key) + continue + } + canonicalQueries = append(canonicalQueries, key+"="+val) + } + + // The queries will be already sorted as resourceList is sorted, if canonicalQueries + // is empty strings.Join returns empty. + canonicalQuery := strings.Join(canonicalQueries, "&") + if canonicalQuery != "" { + return encodedResource + "?" + canonicalQuery + } + return encodedResource +} + +// Return canonical headers. +func canonicalizedAmzHeadersV2(headers http.Header) string { + var keys []string + keyval := make(map[string]string) + for key := range headers { + lkey := strings.ToLower(key) + if !strings.HasPrefix(lkey, "x-amz-") { + continue + } + keys = append(keys, lkey) + keyval[lkey] = strings.Join(headers[key], ",") + } + sort.Strings(keys) + var canonicalHeaders []string + for _, key := range keys { + canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key]) + } + return strings.Join(canonicalHeaders, "\n") +} + +func calculateSignatureV2(stringToSign string, secret string) string { + hm := hmac.New(sha1.New, []byte(secret)) + hm.Write([]byte(stringToSign)) + return base64.StdEncoding.EncodeToString(hm.Sum(nil)) +} + +// compareSignatureV2 returns true if and only if both signatures +// are equal. The signatures are expected to be base64 encoded strings +// according to the AWS S3 signature V2 spec. +func compareSignatureV2(sig1, sig2 string) bool { + // Decode signature string to binary byte-sequence representation is required + // as Base64 encoding of a value is not unique: + // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice. + signature1, err := base64.StdEncoding.DecodeString(sig1) + if err != nil { + return false + } + signature2, err := base64.StdEncoding.DecodeString(sig2) + if err != nil { + return false + } + return subtle.ConstantTimeCompare(signature1, signature2) == 1 +} + +// Return signature-v2 for the presigned request. +func preSignatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires) + return calculateSignatureV2(stringToSign, cred.SecretKey) +} diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go new file mode 100644 index 000000000..cdfd8be1d --- /dev/null +++ b/weed/s3api/auth_signature_v4.go @@ -0,0 +1,720 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3api + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "encoding/hex" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, ErrorCode) { + sha256sum := getContentSha256Cksum(r) + switch { + case isRequestSignatureV4(r): + return iam.doesSignatureMatch(sha256sum, r) + case isRequestPresignedSignatureV4(r): + return iam.doesPresignedSignatureMatch(sha256sum, r) + } + return nil, ErrAccessDenied +} + +// Streaming AWS Signature Version '4' constants. +const ( + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" + + // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the + // client did not calculate sha256 of the payload. + unsignedPayload = "UNSIGNED-PAYLOAD" +) + +// Returns SHA256 for calculating canonical-request. +func getContentSha256Cksum(r *http.Request) string { + var ( + defaultSha256Cksum string + v []string + ok bool + ) + + // For a presigned request we look at the query param for sha256. + if isRequestPresignedSignatureV4(r) { + // X-Amz-Content-Sha256, if not set in presigned requests, checksum + // will default to 'UNSIGNED-PAYLOAD'. + defaultSha256Cksum = unsignedPayload + v, ok = r.URL.Query()["X-Amz-Content-Sha256"] + if !ok { + v, ok = r.Header["X-Amz-Content-Sha256"] + } + } else { + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = emptySHA256 + v, ok = r.Header["X-Amz-Content-Sha256"] + } + + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] + } + + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum +} + +// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, err := parseSignV4(v4Auth) + if err != ErrNone { + return nil, err + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != ErrNone { + return nil, errCode + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // Extract date, if not present throw error. + var date string + if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" { + if date = r.Header.Get("Date"); date == "" { + return nil, ErrMissingDateHeader + } + } + // Parse date header. + t, e := time.Parse(iso8601Format, date) + if e != nil { + return nil, ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, ErrSignatureDoesNotMatch + } + + // Return error none. + return identity, ErrNone +} + +// credentialHeader data type represents structured form of Credential +// string from authorization header. +type credentialHeader struct { + accessKey string + scope struct { + date time.Time + region string + service string + request string + } +} + +// signValues data type represents structured form of AWS Signature V4 header. +type signValues struct { + Credential credentialHeader + SignedHeaders []string + Signature string +} + +// Return scope string. +func (c credentialHeader) getScope() string { + return strings.Join([]string{ + c.scope.date.Format(yyyymmdd), + c.scope.region, + c.scope.service, + c.scope.request, + }, "/") +} + +// Authorization: algorithm Credential=accessKeyID/credScope, \ +// SignedHeaders=signedHeaders, Signature=signature +// +func parseSignV4(v4Auth string) (sv signValues, aec ErrorCode) { + // Replace all spaced strings, some clients can send spaced + // parameters and some won't. So we pro-actively remove any spaces + // to make parsing easier. + v4Auth = strings.Replace(v4Auth, " ", "", -1) + if v4Auth == "" { + return sv, ErrAuthHeaderEmpty + } + + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v4Auth, signV4Algorithm) { + return sv, ErrSignatureVersionNotSupported + } + + // Strip off the Algorithm prefix. + v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) + authFields := strings.Split(strings.TrimSpace(v4Auth), ",") + if len(authFields) != 3 { + return sv, ErrMissingFields + } + + // Initialize signature version '4' structured header. + signV4Values := signValues{} + + var err ErrorCode + // Save credentail values. + signV4Values.Credential, err = parseCredentialHeader(authFields[0]) + if err != ErrNone { + return sv, err + } + + // Save signed headers. + signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1]) + if err != ErrNone { + return sv, err + } + + // Save signature. + signV4Values.Signature, err = parseSignature(authFields[2]) + if err != ErrNone { + return sv, err + } + + // Return the structure here. + return signV4Values, ErrNone +} + +// parse credentialHeader string into its structured form. +func parseCredentialHeader(credElement string) (ch credentialHeader, aec ErrorCode) { + creds := strings.Split(strings.TrimSpace(credElement), "=") + if len(creds) != 2 { + return ch, ErrMissingFields + } + if creds[0] != "Credential" { + return ch, ErrMissingCredTag + } + credElements := strings.Split(strings.TrimSpace(creds[1]), "/") + if len(credElements) != 5 { + return ch, ErrCredMalformed + } + // Save access key id. + cred := credentialHeader{ + accessKey: credElements[0], + } + var e error + cred.scope.date, e = time.Parse(yyyymmdd, credElements[1]) + if e != nil { + return ch, ErrMalformedCredentialDate + } + + cred.scope.region = credElements[2] + cred.scope.service = credElements[3] // "s3" + cred.scope.request = credElements[4] // "aws4_request" + return cred, ErrNone +} + +// Parse slice of signed headers from signed headers tag. +func parseSignedHeader(signedHdrElement string) ([]string, ErrorCode) { + signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") + if len(signedHdrFields) != 2 { + return nil, ErrMissingFields + } + if signedHdrFields[0] != "SignedHeaders" { + return nil, ErrMissingSignHeadersTag + } + if signedHdrFields[1] == "" { + return nil, ErrMissingFields + } + signedHeaders := strings.Split(signedHdrFields[1], ";") + return signedHeaders, ErrNone +} + +// Parse signature from signature tag. +func parseSignature(signElement string) (string, ErrorCode) { + signFields := strings.Split(strings.TrimSpace(signElement), "=") + if len(signFields) != 2 { + return "", ErrMissingFields + } + if signFields[0] != "Signature" { + return "", ErrMissingSignTag + } + if signFields[1] == "" { + return "", ErrMissingFields + } + signature := signFields[1] + return signature, ErrNone +} + +// check query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html +func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, ErrorCode) { + + // Copy request + req := *r + + // Parse request query string. + pSignValues, err := parsePreSignV4(req.URL.Query()) + if err != ErrNone { + return nil, err + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey) + if !found { + return nil, ErrInvalidAccessKeyID + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) + if errCode != ErrNone { + return nil, errCode + } + // Construct new query. + query := make(url.Values) + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + query.Set("X-Amz-Content-Sha256", hashedPayload) + } + + query.Set("X-Amz-Algorithm", signV4Algorithm) + + now := time.Now().UTC() + + // If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the + // request should still be allowed. + if pSignValues.Date.After(now.Add(15 * time.Minute)) { + return nil, ErrRequestNotReadyYet + } + + if now.Sub(pSignValues.Date) > pSignValues.Expires { + return nil, ErrExpiredPresignRequest + } + + // Save the date and expires. + t := pSignValues.Date + expireSeconds := int(pSignValues.Expires / time.Second) + + // Construct the query. + query.Set("X-Amz-Date", t.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds)) + query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders)) + query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, pSignValues.Credential.scope.region)) + + // Save other headers available in the request parameters. + for k, v := range req.URL.Query() { + + // Handle the metadata in presigned put query string + if strings.Contains(strings.ToLower(k), "x-amz-meta-") { + query.Set(k, v[0]) + } + + if strings.HasPrefix(strings.ToLower(k), "x-amz") { + continue + } + query[k] = v + } + + // Get the encoded query. + encodedQuery := query.Encode() + + // Verify if date query is same. + if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if expires query is same. + if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if signed headers query is same. + if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if credential query is same. + if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") { + return nil, ErrSignatureDoesNotMatch + } + // Verify if sha256 payload query is same. + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") { + return nil, ErrContentSHA256Mismatch + } + } + + /// Verify finally if signature is same. + + // Get canonical request. + presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) + + // Get hmac presigned signing key. + presignedSigningKey := getSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region) + + // Get new signature. + newSignature := getSignature(presignedSigningKey, presignedStringToSign) + + // Verify signature. + if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) { + return nil, ErrSignatureDoesNotMatch + } + return identity, ErrNone +} + +func contains(list []string, elem string) bool { + for _, t := range list { + if t == elem { + return true + } + } + return false +} + +// preSignValues data type represents structued form of AWS Signature V4 query string. +type preSignValues struct { + signValues + Date time.Time + Expires time.Duration +} + +// Parses signature version '4' query string of the following form. +// +// querystring = X-Amz-Algorithm=algorithm +// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) +// querystring += &X-Amz-Date=date +// querystring += &X-Amz-Expires=timeout interval +// querystring += &X-Amz-SignedHeaders=signed_headers +// querystring += &X-Amz-Signature=signature +// +// verifies if any of the necessary query params are missing in the presigned request. +func doesV4PresignParamsExist(query url.Values) ErrorCode { + v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"} + for _, v4PresignQueryParam := range v4PresignQueryParams { + if _, ok := query[v4PresignQueryParam]; !ok { + return ErrInvalidQueryParams + } + } + return ErrNone +} + +// Parses all the presigned signature values into separate elements. +func parsePreSignV4(query url.Values) (psv preSignValues, aec ErrorCode) { + var err ErrorCode + // verify whether the required query params exist. + err = doesV4PresignParamsExist(query) + if err != ErrNone { + return psv, err + } + + // Verify if the query algorithm is supported or not. + if query.Get("X-Amz-Algorithm") != signV4Algorithm { + return psv, ErrInvalidQuerySignatureAlgo + } + + // Initialize signature version '4' structured header. + preSignV4Values := preSignValues{} + + // Save credential. + preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential")) + if err != ErrNone { + return psv, err + } + + var e error + // Save date in native time.Time. + preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date")) + if e != nil { + return psv, ErrMalformedPresignedDate + } + + // Save expires in native time.Duration. + preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s") + if e != nil { + return psv, ErrMalformedExpires + } + + if preSignV4Values.Expires < 0 { + return psv, ErrNegativeExpires + } + + // Check if Expiry time is less than 7 days (value in seconds). + if preSignV4Values.Expires.Seconds() > 604800 { + return psv, ErrMaximumExpires + } + + // Save signed headers. + preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders")) + if err != ErrNone { + return psv, err + } + + // Save signature. + preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature")) + if err != ErrNone { + return psv, err + } + + // Return structed form of signature query string. + return preSignV4Values, ErrNone +} + +// extractSignedHeaders extract signed headers from Authorization header +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, ErrorCode) { + reqHeaders := r.Header + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, ErrUnsignedHeaders + } + extractedSignedHeaders := make(http.Header) + for _, header := range signedHeaders { + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if ok { + for _, enc := range val { + extractedSignedHeaders.Add(header, enc) + } + continue + } + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + for _, enc := range r.TransferEncoding { + extractedSignedHeaders.Add(header, enc) + } + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, ErrUnsignedHeaders + } + } + return extractedSignedHeaders, ErrNone +} + +// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func getSignedHeaders(signedHeaders http.Header) string { + var headers []string + for k := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + } + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getScope generate a string of a specific date, an AWS region, and a service. +func getScope(t time.Time, region string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + return scope +} + +// getCanonicalRequest generate a canonical request of style +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +// +func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { + rawQuery := strings.Replace(queryStr, "+", "%20", -1) + encodedPath := encodePath(urlPath) + canonicalRequest := strings.Join([]string{ + method, + encodedPath, + rawQuery, + getCanonicalHeaders(extractedSignedHeaders), + getSignedHeaders(extractedSignedHeaders), + payload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSign(canonicalRequest string, t time.Time, scope string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" + stringToSign = stringToSign + scope + "\n" + canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) + stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) + return stringToSign +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secretKey string, t time.Time, region string) []byte { + date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd))) + regionBytes := sumHMAC(date, []byte(region)) + service := sumHMAC(regionBytes, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getCanonicalHeaders generate a list of request headers with their values +func getCanonicalHeaders(signedHeaders http.Header) string { + var headers []string + vals := make(http.Header) + for k, vv := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + sort.Strings(headers) + + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + return buf.String() +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func encodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// compareSignatureV4 returns true if and only if both signatures +// are equal. The signatures are expected to be HEX encoded strings +// according to the AWS S3 signature V4 spec. +func compareSignatureV4(sig1, sig2 string) bool { + // The CTC using []byte(str) works because the hex encoding + // is unique for a sequence of bytes. See also compareSignatureV2. + return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 +} diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go new file mode 100644 index 000000000..036b5c052 --- /dev/null +++ b/weed/s3api/auto_signature_v4_test.go @@ -0,0 +1,418 @@ +package s3api + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "testing" + "time" + "unicode/utf8" +) + +// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection. +func TestIsRequestPresignedSignatureV4(t *testing.T) { + testCases := []struct { + inputQueryKey string + inputQueryValue string + expectedResult bool + }{ + // Test case - 1. + // Test case with query key ""X-Amz-Credential" set. + {"", "", false}, + // Test case - 2. + {"X-Amz-Credential", "", true}, + // Test case - 3. + {"X-Amz-Content-Sha256", "", false}, + } + + for i, testCase := range testCases { + // creating an input HTTP request. + // Only the query parameters are relevant for this particular test. + inputReq, err := http.NewRequest("GET", "http://example.com", nil) + if err != nil { + t.Fatalf("Error initializing input HTTP request: %v", err) + } + q := inputReq.URL.Query() + q.Add(testCase.inputQueryKey, testCase.inputQueryValue) + inputReq.URL.RawQuery = q.Encode() + + actualResult := isRequestPresignedSignatureV4(inputReq) + if testCase.expectedResult != actualResult { + t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult) + } + } +} + +// Tests is requested authenticated function, tests replies for s3 errors. +func TestIsReqAuthenticated(t *testing.T) { + iam := NewIdentityAccessManagement("", "") + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", + }, + }, + Actions: nil, + }, + } + + // List of test cases for validating http request authentication. + testCases := []struct { + req *http.Request + s3Error ErrorCode + }{ + // When request is unsigned, access denied is returned. + {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied}, + // When request is properly signed, error is none. + {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone}, + } + + // Validates all testcases. + for i, testCase := range testCases { + if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error { + ioutil.ReadAll(testCase.req.Body) + t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error) + } + } +} + +func TestCheckAdminRequestAuthType(t *testing.T) { + iam := NewIdentityAccessManagement("", "") + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", + }, + }, + Actions: nil, + }, + } + + testCases := []struct { + Request *http.Request + ErrCode ErrorCode + }{ + {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied}, + {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone}, + {Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone}, + } + for i, testCase := range testCases { + if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode { + t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error) + } + } +} + +// Provides a fully populated http request instance, fails otherwise. +func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req, err := newTestRequest(method, urlStr, contentLength, body) + if err != nil { + t.Fatalf("Unable to initialize new http request %s", err) + } + return req +} + +// This is similar to mustNewRequest but additionally the request +// is signed with AWS Signature V4, fails if not able to do so. +func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req := mustNewRequest(method, urlStr, contentLength, body, t) + cred := &Credential{"access_key_1", "secret_key_1"} + if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// This is similar to mustNewRequest but additionally the request +// is presigned with AWS Signature V4, fails if not able to do so. +func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req := mustNewRequest(method, urlStr, contentLength, body, t) + cred := &Credential{"access_key_1", "secret_key_1"} + if err := preSignV4(req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// Returns new HTTP request object. +func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) { + if method == "" { + method = "POST" + } + + // Save for subsequent use + var hashedPayload string + var md5Base64 string + switch { + case body == nil: + hashedPayload = getSHA256Hash([]byte{}) + default: + payloadBytes, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + hashedPayload = getSHA256Hash(payloadBytes) + md5Base64 = getMD5HashBase64(payloadBytes) + } + // Seek back to beginning. + if body != nil { + body.Seek(0, 0) + } else { + body = bytes.NewReader([]byte("")) + } + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + if md5Base64 != "" { + req.Header.Set("Content-Md5", md5Base64) + } + req.Header.Set("x-amz-content-sha256", hashedPayload) + + // Add Content-Length + req.ContentLength = contentLength + + return req, nil +} + +// getSHA256Hash returns SHA-256 hash in hex encoding of given data. +func getSHA256Hash(data []byte) string { + return hex.EncodeToString(getSHA256Sum(data)) +} + +// getMD5HashBase64 returns MD5 hash in base64 encoding of given data. +func getMD5HashBase64(data []byte) string { + return base64.StdEncoding.EncodeToString(getMD5Sum(data)) +} + +// getSHA256Hash returns SHA-256 sum of given data. +func getSHA256Sum(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Sum returns MD5 sum of given data. +func getMD5Sum(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Hash returns MD5 hash in hex encoding of given data. +func getMD5Hash(data []byte) string { + return hex.EncodeToString(getMD5Sum(data)) +} + +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// Sign given request using Signature V4. +func signRequestV4(req *http.Request, accessKey, secretKey string) error { + // Get hashed payload. + hashedPayload := req.Header.Get("x-amz-content-sha256") + if hashedPayload == "" { + return fmt.Errorf("Invalid hashed payload") + } + + currTime := time.Now() + + // Set x-amz-date. + req.Header.Set("x-amz-date", currTime.Format(iso8601Format)) + + // Get header map. + headerMap := make(map[string][]string) + for k, vv := range req.Header { + // If request header key is not in ignored headers, then add it. + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok { + headerMap[strings.ToLower(k)] = vv + } + } + + // Get header keys. + headers := []string{"host"} + for k := range headerMap { + headers = append(headers, k) + } + sort.Strings(headers) + + region := "us-east-1" + + // Get canonical headers. + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(req.URL.Host) + fallthrough + default: + for idx, v := range headerMap[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + canonicalHeaders := buf.String() + + // Get signed headers. + signedHeaders := strings.Join(headers, ";") + + // Get canonical query string. + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + + // Get canonical URI. + canonicalURI := EncodePath(req.URL.Path) + + // Get canonical request. + // canonicalRequest = + // \n + // \n + // \n + // \n + // \n + // + // + canonicalRequest := strings.Join([]string{ + req.Method, + canonicalURI, + req.URL.RawQuery, + canonicalHeaders, + signedHeaders, + hashedPayload, + }, "\n") + + // Get scope. + scope := strings.Join([]string{ + currTime.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + + stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" + stringToSign = stringToSign + scope + "\n" + stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) + + date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) + regionHMAC := sumHMAC(date, []byte(region)) + service := sumHMAC(regionHMAC, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + + signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) + + // final Authorization header + parts := []string{ + "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return nil +} + +// preSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return errors.New("Presign cannot be generated without access and secret keys") + } + + region := "us-east-1" + date := time.Now().UTC() + scope := getScope(date, region) + credential := fmt.Sprintf("%s/%s", accessKeyID, scope) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", date.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", "host") + query.Set("X-Amz-Credential", credential) + query.Set("X-Amz-Content-Sha256", unsignedPayload) + + // "host" is the only header required to be signed for Presigned URLs. + extractedSignedHeaders := make(http.Header) + extractedSignedHeaders.Set("host", req.Host) + + queryStr := strings.Replace(query.Encode(), "+", "%20", -1) + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) + stringToSign := getStringToSign(canonicalRequest, date, scope) + signingKey := getSigningKey(secretAccessKey, date, region) + signature := getSignature(signingKey, stringToSign) + + req.URL.RawQuery = query.Encode() + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + url.QueryEscape(signature) + + // Construct the final presigned URL. + return nil +} + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go index 061fd4a92..76c4394c2 100644 --- a/weed/s3api/chunked_reader_v4.go +++ b/weed/s3api/chunked_reader_v4.go @@ -21,17 +21,115 @@ package s3api import ( "bufio" "bytes" + "crypto/sha256" + "encoding/hex" "errors" - "github.com/dustin/go-humanize" + "hash" "io" "net/http" -) + "time" -// Streaming AWS Signature Version '4' constants. -const ( - streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + "github.com/dustin/go-humanize" ) +// getChunkSignature - get chunk signature. +func getChunkSignature(secretKey string, seedSignature string, region string, date time.Time, hashedChunk string) string { + + // Calculate string to sign. + stringToSign := signV4ChunkedAlgorithm + "\n" + + date.Format(iso8601Format) + "\n" + + getScope(date, region) + "\n" + + seedSignature + "\n" + + emptySHA256 + "\n" + + hashedChunk + + // Get hmac signing key. + signingKey := getSigningKey(secretKey, date, region) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + return newSignature +} + +// calculateSeedSignature - Calculate seed signature in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html +// returns signature, error otherwise if the signature mismatches or any other +// error while parsing and validating. +func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, errCode := parseSignV4(v4Auth) + if errCode != ErrNone { + return nil, "", "", time.Time{}, errCode + } + + // Payload streaming. + payload := streamingContentSHA256 + + // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' + if payload != req.Header.Get("X-Amz-Content-Sha256") { + return nil, "", "", time.Time{}, ErrContentSHA256Mismatch + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != ErrNone { + return nil, "", "", time.Time{}, errCode + } + // Verify if the access key id matches. + _, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, "", "", time.Time{}, ErrInvalidAccessKeyID + } + + // Verify if region is valid. + region = signV4Values.Credential.scope.region + + // Extract date, if not present throw error. + var dateStr string + if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" { + if dateStr = r.Header.Get("Date"); dateStr == "" { + return nil, "", "", time.Time{}, ErrMissingDateHeader + } + } + // Parse date header. + var err error + date, err = time.Parse(iso8601Format, dateStr) + if err != nil { + return nil, "", "", time.Time{}, ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, "", "", time.Time{}, ErrSignatureDoesNotMatch + } + + // Return caculated signature. + return cred, newSignature, region, date, ErrNone +} + const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB // lineTooLong is generated as chunk header is bigger than 4KiB. @@ -43,22 +141,36 @@ var errMalformedEncoding = errors.New("malformed chunked encoding") // newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. // The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. -func newSignV4ChunkedReader(req *http.Request) io.ReadCloser { - return &s3ChunkedReader{ - reader: bufio.NewReader(req.Body), - state: readChunkHeader, +func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, ErrorCode) { + ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req) + if errCode != ErrNone { + return nil, errCode } + return &s3ChunkedReader{ + cred: ident, + reader: bufio.NewReader(req.Body), + seedSignature: seedSignature, + seedDate: seedDate, + region: region, + chunkSHA256Writer: sha256.New(), + state: readChunkHeader, + }, ErrNone } // Represents the overall state that is required for decoding a // AWS Signature V4 chunked reader. type s3ChunkedReader struct { - reader *bufio.Reader - state chunkState - lastChunk bool - chunkSignature string - n uint64 // Unread bytes in chunk - err error + cred *Credential + reader *bufio.Reader + seedSignature string + seedDate time.Time + region string + state chunkState + lastChunk bool + chunkSignature string + chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. + n uint64 // Unread bytes in chunk + err error } // Read chunk reads the chunk token signature portion. @@ -157,6 +269,9 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { return 0, cr.err } + // Calculate sha256. + cr.chunkSHA256Writer.Write(rbuf[:n0]) + // Update the bytes read into request buffer so far. n += n0 buf = buf[n0:] @@ -169,6 +284,19 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { continue } case verifyChunk: + // Calculate the hashed chunk. + hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil)) + // Calculate the chunk signature. + newSignature := getChunkSignature(cr.cred.SecretKey, cr.seedSignature, cr.region, cr.seedDate, hashedChunk) + if !compareSignatureV4(cr.chunkSignature, newSignature) { + // Chunk signature doesn't match we return signature does not match. + cr.err = errors.New("chunk signature does not match") + return 0, cr.err + } + // Newly calculated signature becomes the seed for the next chunk + // this follows the chaining. + cr.seedSignature = newSignature + cr.chunkSHA256Writer.Reset() if cr.lastChunk { cr.state = eofChunk } else { diff --git a/weed/s3api/custom_types.go b/weed/s3api/custom_types.go new file mode 100644 index 000000000..569dfc3ac --- /dev/null +++ b/weed/s3api/custom_types.go @@ -0,0 +1,3 @@ +package s3api + +const s3TimeFormat = "2006-01-02T15:04:05.999Z07:00" diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index 73be496d9..31ac850b1 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -1,6 +1,7 @@ package s3api import ( + "encoding/xml" "fmt" "path/filepath" "strconv" @@ -9,18 +10,20 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" + "github.com/google/uuid" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/satori/go.uuid" ) type InitiateMultipartUploadResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult"` s3.CreateMultipartUploadOutput } func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { - uploadId, _ := uuid.NewV4() + uploadId, _ := uuid.NewRandom() uploadIdString := uploadId.String() if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { @@ -34,9 +37,9 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp } output = &InitiateMultipartUploadResult{ - s3.CreateMultipartUploadOutput{ + CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{ Bucket: input.Bucket, - Key: input.Key, + Key: objectKey(input.Key), UploadId: aws.String(uploadIdString), }, } @@ -45,6 +48,7 @@ func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInp } type CompleteMultipartUploadResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult"` s3.CompleteMultipartUploadOutput } @@ -65,11 +69,12 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory { for _, chunk := range entry.Chunks { p := &filer_pb.FileChunk{ - FileId: chunk.FileId, - Offset: offset, - Size: chunk.Size, - Mtime: chunk.Mtime, - ETag: chunk.ETag, + FileId: chunk.GetFileIdString(), + Offset: offset, + Size: chunk.Size, + Mtime: chunk.Mtime, + CipherKey: chunk.CipherKey, + ETag: chunk.ETag, } finalParts = append(finalParts, p) offset += int64(chunk.Size) @@ -87,6 +92,11 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa } dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName) + // remove suffix '/' + if strings.HasSuffix(dirName, "/") { + dirName = dirName[:len(dirName)-1] + } + err = s3a.mkFile(dirName, entryName, finalParts) if err != nil { @@ -95,14 +105,15 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa } output = &CompleteMultipartUploadResult{ - s3.CompleteMultipartUploadOutput{ - Bucket: input.Bucket, - ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""), - Key: input.Key, + CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{ + Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)), + Bucket: input.Bucket, + ETag: aws.String("\"" + filer2.ETagChunks(finalParts) + "\""), + Key: objectKey(input.Key), }, } - if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { + if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil { glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) } @@ -117,7 +128,7 @@ func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput return nil, ErrNoSuchUpload } if exists { - err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) + err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true) } if err != nil { glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err) @@ -128,13 +139,14 @@ func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput } type ListMultipartUploadsResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult"` s3.ListMultipartUploadsOutput } func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { output = &ListMultipartUploadsResult{ - s3.ListMultipartUploadsOutput{ + ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{ Bucket: input.Bucket, Delimiter: input.Delimiter, EncodingType: input.EncodingType, @@ -144,7 +156,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput }, } - entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) + entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, uint32(*input.MaxUploads)) if err != nil { glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err) return @@ -154,7 +166,7 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput if entry.Extended != nil { key := entry.Extended["key"] output.Uploads = append(output.Uploads, &s3.MultipartUpload{ - Key: aws.String(string(key)), + Key: objectKey(aws.String(string(key))), UploadId: aws.String(entry.Name), }) } @@ -164,22 +176,22 @@ func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput } type ListPartsResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult"` s3.ListPartsOutput } func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { output = &ListPartsResult{ - s3.ListPartsOutput{ + ListPartsOutput: s3.ListPartsOutput{ Bucket: input.Bucket, - Key: input.Key, + Key: objectKey(input.Key), UploadId: input.UploadId, MaxParts: input.MaxParts, // the maximum number of parts to return. PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive }, } - entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, - "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts)) + entries, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts)) if err != nil { glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) return nil, ErrNoSuchUpload @@ -195,9 +207,9 @@ func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListP } output.Parts = append(output.Parts, &s3.Part{ PartNumber: aws.Int64(int64(partNumber)), - LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)), + LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()), Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))), - ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""), + ETag: aws.String("\"" + filer2.ETag(entry) + "\""), }) } } diff --git a/weed/s3api/filer_multipart_test.go b/weed/s3api/filer_multipart_test.go new file mode 100644 index 000000000..835665dd6 --- /dev/null +++ b/weed/s3api/filer_multipart_test.go @@ -0,0 +1,26 @@ +package s3api + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "testing" +) + +func TestInitiateMultipartUploadResult(t *testing.T) { + + expected := ` +example-bucketexample-objectVXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA` + response := &InitiateMultipartUploadResult{ + CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{ + Bucket: aws.String("example-bucket"), + Key: aws.String("example-object"), + UploadId: aws.String("VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA"), + }, + } + + encoded := string(encodeResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } + +} diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index 40c5a3e26..7f49c320e 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -3,119 +3,42 @@ package s3api import ( "context" "fmt" - "os" - "time" + "strings" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - entry := &filer_pb.Entry{ - Name: dirName, - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(0777 | os.ModeDir), - Uid: OS_UID, - Gid: OS_GID, - }, - } - - if fn != nil { - fn(entry) - } - - request := &filer_pb.CreateEntryRequest{ - Directory: parentDirectoryPath, - Entry: entry, - } - glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(context.Background(), request); err != nil { - return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) - } + return filer_pb.Mkdir(s3a, parentDirectoryPath, dirName, fn) - return nil - }) } func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - entry := &filer_pb.Entry{ - Name: fileName, - IsDirectory: false, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(0770), - Uid: OS_UID, - Gid: OS_GID, - }, - Chunks: chunks, - } - request := &filer_pb.CreateEntryRequest{ - Directory: parentDirectoryPath, - Entry: entry, - } + return filer_pb.MkFile(s3a, parentDirectoryPath, fileName, chunks) - glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) - if _, err := client.CreateEntry(context.Background(), request); err != nil { - return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) - } - - return nil - }) } -func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { - - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.ListEntriesRequest{ - Directory: parentDirectoryPath, - Prefix: prefix, - StartFromFileName: startFrom, - InclusiveStartFrom: inclusive, - Limit: uint32(limit), - } - - glog.V(4).Infof("read directory: %v", request) - resp, err := client.ListEntries(context.Background(), request) - if err != nil { - return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err) - } - - entries = resp.Entries +func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit uint32) (entries []*filer_pb.Entry, err error) { + err = filer_pb.List(s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLast bool) error { + entries = append(entries, entry) return nil - }) + }, startFrom, inclusive, limit) return } -func (s3a *S3ApiServer) rm(parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error { - - return s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - ctx := context.Background() +func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDeleteData, isRecursive bool) error { - request := &filer_pb.DeleteEntryRequest{ - Directory: parentDirectoryPath, - Name: entryName, - IsDeleteData: isDeleteData, - IsRecursive: isRecursive, - } + return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) - if _, err := client.DeleteEntry(ctx, request); err != nil { - return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) + err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) + if err != nil { + return err } return nil @@ -123,27 +46,36 @@ func (s3a *S3ApiServer) rm(parentDirectoryPath string, entryName string, isDirec } -func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - ctx := context.Background() - - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: parentDirectoryPath, - Name: entryName, +func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath string, entryName string, isDeleteData bool, isRecursive bool) error { + request := &filer_pb.DeleteEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + IsDeleteData: isDeleteData, + IsRecursive: isRecursive, + } + + glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) + if resp, err := client.DeleteEntry(context.Background(), request); err != nil { + glog.V(0).Infof("delete entry %v: %v", request, err) + return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) + } else { + if resp.Error != "" { + return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, resp.Error) } + } + return nil +} - glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) - } +func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - exists = resp.Entry.IsDirectory == isDirectory + return filer_pb.Exists(s3a, parentDirectoryPath, entryName, isDirectory) - return nil - }) +} - return +func objectKey(key *string) *string { + if strings.HasPrefix(*key, "/") { + t := (*key)[1:] + return &t + } + return key } diff --git a/weed/s3api/s3api_auth.go b/weed/s3api/s3api_auth.go index b680fe1e1..bf5cf5fab 100644 --- a/weed/s3api/s3api_auth.go +++ b/weed/s3api/s3api_auth.go @@ -9,6 +9,8 @@ import ( const ( signV4Algorithm = "AWS4-HMAC-SHA256" signV2Algorithm = "AWS" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" ) // Verify if request has JWT. @@ -23,8 +25,8 @@ func isRequestSignatureV4(r *http.Request) bool { // Verify if request has AWS Signature Version '2'. func isRequestSignatureV2(r *http.Request) bool { - return (!strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && - strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm)) + return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && + strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) } // Verify if request has AWS PreSign Version '4'. diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 1d319e354..7d96e3e0e 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -2,27 +2,24 @@ package s3api import ( "context" + "encoding/xml" "fmt" "math" "net/http" - "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/gorilla/mux" -) -var ( - OS_UID = uint32(os.Getuid()) - OS_GID = uint32(os.Getgid()) + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) type ListAllMyBucketsResult struct { - Buckets []*s3.Bucket `xml:"Buckets>Bucket"` + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` Owner *s3.Owner + Buckets []*s3.Bucket `xml:"Buckets>Bucket"` } func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { @@ -41,7 +38,7 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques if entry.IsDirectory { buckets = append(buckets, &s3.Bucket{ Name: aws.String(entry.Name), - CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0)), + CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0).UTC()), }) } } @@ -76,9 +73,7 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque vars := mux.Vars(r) bucket := vars["bucket"] - err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { - - ctx := context.Background() + err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { // delete collection deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{ @@ -86,14 +81,14 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque } glog.V(1).Infof("delete collection: %v", deleteCollectionRequest) - if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil { + if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil { return fmt.Errorf("delete collection %s: %v", bucket, err) } return nil }) - err = s3a.rm(s3a.option.BucketsPath, bucket, true, false, true) + err = s3a.rm(s3a.option.BucketsPath, bucket, false, true) if err != nil { writeErrorResponse(w, ErrInternalError, r.URL) @@ -108,7 +103,7 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request vars := mux.Vars(r) bucket := vars["bucket"] - err := s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: s3a.option.BucketsPath, @@ -116,7 +111,10 @@ func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request } glog.V(1).Infof("lookup bucket: %v", request) - if _, err := client.LookupDirectoryEntry(context.Background(), request); err != nil { + if _, err := filer_pb.LookupEntry(client, request); err != nil { + if err == filer_pb.ErrNotFound { + return filer_pb.ErrNotFound + } return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err) } diff --git a/weed/s3api/s3api_bucket_handlers_test.go b/weed/s3api/s3api_bucket_handlers_test.go new file mode 100644 index 000000000..7ab04830b --- /dev/null +++ b/weed/s3api/s3api_bucket_handlers_test.go @@ -0,0 +1,39 @@ +package s3api + +import ( + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" +) + +func TestListBucketsHandler(t *testing.T) { + + expected := ` +2011-04-09T12:34:49Ztest12011-02-09T12:34:49Ztest2` + var response ListAllMyBucketsResult + + var buckets []*s3.Bucket + buckets = append(buckets, &s3.Bucket{ + Name: aws.String("test1"), + CreationDate: aws.Time(time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC)), + }) + buckets = append(buckets, &s3.Bucket{ + Name: aws.String("test2"), + CreationDate: aws.Time(time.Date(2011, 2, 9, 12, 34, 49, 0, time.UTC)), + }) + + response = ListAllMyBucketsResult{ + Owner: &s3.Owner{ + ID: aws.String(""), + DisplayName: aws.String(""), + }, + Buckets: buckets, + } + + encoded := string(encodeResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } +} diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go index 7ba55ed28..ff411f276 100644 --- a/weed/s3api/s3api_errors.go +++ b/weed/s3api/s3api_errors.go @@ -27,11 +27,13 @@ type ErrorCode int // Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html const ( ErrNone ErrorCode = iota + ErrAccessDenied ErrMethodNotAllowed ErrBucketNotEmpty ErrBucketAlreadyExists ErrBucketAlreadyOwnedByYou ErrNoSuchBucket + ErrNoSuchKey ErrNoSuchUpload ErrInvalidBucketName ErrInvalidDigest @@ -41,12 +43,43 @@ const ( ErrInvalidPartNumberMarker ErrInvalidPart ErrInternalError + ErrInvalidCopyDest + ErrInvalidCopySource + ErrAuthHeaderEmpty + ErrSignatureVersionNotSupported + ErrMissingFields + ErrMissingCredTag + ErrCredMalformed + ErrMalformedXML + ErrMalformedDate + ErrMalformedPresignedDate + ErrMalformedCredentialDate + ErrMissingSignHeadersTag + ErrMissingSignTag + ErrUnsignedHeaders + ErrInvalidQueryParams + ErrInvalidQuerySignatureAlgo + ErrExpiredPresignRequest + ErrMalformedExpires + ErrNegativeExpires + ErrMaximumExpires + ErrSignatureDoesNotMatch + ErrContentSHA256Mismatch + ErrInvalidAccessKeyID + ErrRequestNotReadyYet + ErrMissingDateHeader + ErrInvalidRequest ErrNotImplemented ) // error code to APIError structure, these fields carry respective // descriptions for all the error responses. var errorCodeResponse = map[ErrorCode]APIError{ + ErrAccessDenied: { + Code: "AccessDenied", + Description: "Access Denied.", + HTTPStatusCode: http.StatusForbidden, + }, ErrMethodNotAllowed: { Code: "MethodNotAllowed", Description: "The specified method is not allowed against this resource.", @@ -102,6 +135,11 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "The specified bucket does not exist", HTTPStatusCode: http.StatusNotFound, }, + ErrNoSuchKey: { + Code: "NoSuchKey", + Description: "The specified key does not exist.", + HTTPStatusCode: http.StatusNotFound, + }, ErrNoSuchUpload: { Code: "NoSuchUpload", Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", @@ -118,6 +156,139 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", HTTPStatusCode: http.StatusBadRequest, }, + + ErrInvalidCopyDest: { + Code: "InvalidRequest", + Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCopySource: { + Code: "InvalidArgument", + Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrMalformedXML: { + Code: "MalformedXML", + Description: "The XML you provided was not well-formed or did not validate against our published schema.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrAuthHeaderEmpty: { + Code: "InvalidArgument", + Description: "Authorization header is invalid -- one and only one ' ' (space) required.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSignatureVersionNotSupported: { + Code: "InvalidRequest", + Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingFields: { + Code: "MissingFields", + Description: "Missing fields in request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingCredTag: { + Code: "InvalidRequest", + Description: "Missing Credential field for this request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrCredMalformed: { + Code: "AuthorizationQueryParametersError", + Description: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedDate: { + Code: "MalformedDate", + Description: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedPresignedDate: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignHeadersTag: { + Code: "InvalidArgument", + Description: "Signature header missing SignedHeaders field.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignTag: { + Code: "AccessDenied", + Description: "Signature header missing Signature field.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrUnsignedHeaders: { + Code: "AccessDenied", + Description: "There were headers present in the request which were not signed", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQueryParams: { + Code: "AuthorizationQueryParametersError", + Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQuerySignatureAlgo: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Algorithm only supports \"AWS4-HMAC-SHA256\".", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrExpiredPresignRequest: { + Code: "AccessDenied", + Description: "Request has expired", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMalformedExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires should be a number", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNegativeExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires must be non-negative", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMaximumExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrInvalidAccessKeyID: { + Code: "InvalidAccessKeyId", + Description: "The access key ID you provided does not exist in our records.", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrRequestNotReadyYet: { + Code: "AccessDenied", + Description: "Request is not valid yet", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrSignatureDoesNotMatch: { + Code: "SignatureDoesNotMatch", + Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrContentSHA256Mismatch: { + Code: "XAmzContentSHA256Mismatch", + Description: "The provided 'x-amz-content-sha256' header does not match what was computed.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingDateHeader: { + Code: "AccessDenied", + Description: "AWS authentication requires a valid Date or x-amz-date header", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRequest: { + Code: "InvalidRequest", + Description: "Invalid Request", + HTTPStatusCode: http.StatusBadRequest, + }, ErrNotImplemented: { Code: "NotImplemented", Description: "A header you provided implies functionality that is not implemented", diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 286398310..7ef676400 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -5,12 +5,16 @@ import ( "encoding/base64" "encoding/xml" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" "net/http" "net/url" + "strconv" "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) type mimeType string @@ -35,17 +39,18 @@ func encodeResponse(response interface{}) []byte { return bytesBuffer.Bytes() } -func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { +var _ = filer_pb.FilerClient(&S3ApiServer{}) - grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress) - if err != nil { - return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err) - } - defer grpcConnection.Close() +func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) - return fn(client) +} +func (s3a *S3ApiServer) AdjustedUrl(hostAndPort string) string { + return hostAndPort } // If none of the http routes match respond with MethodNotAllowed @@ -72,13 +77,19 @@ func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse { func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { setCommonHeaders(w) + if response != nil { + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + } if mType != mimeNone { w.Header().Set("Content-Type", string(mType)) } w.WriteHeader(statusCode) if response != nil { glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response)) - w.Write(response) + _, err := w.Write(response) + if err != nil { + glog.V(0).Infof("write err: %v", err) + } w.(http.Flusher).Flush() } } diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go new file mode 100644 index 000000000..b8fb3f6a4 --- /dev/null +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -0,0 +1,151 @@ +package s3api + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/gorilla/mux" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { + + vars := mux.Vars(r) + dstBucket := vars["bucket"] + dstObject := getObject(vars) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + if srcBucket == dstBucket && srcObject == dstObject { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + dstUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", + s3a.option.Filer, s3a.option.BucketsPath, dstBucket, dstObject, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + _, _, dataReader, err := util.DownloadFile(srcUrl) + if err != nil { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + defer dataReader.Close() + + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + + if errCode != ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyObjectResult{ + ETag: etag, + LastModified: time.Now(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} + +func pathToBucketAndObject(path string) (bucket, object string) { + path = strings.TrimPrefix(path, "/") + parts := strings.SplitN(path, "/", 2) + if len(parts) == 2 { + return parts[0], "/" + parts[1] + } + return parts[0], "/" +} + +type CopyPartResult struct { + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` +} + +func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) { + // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + vars := mux.Vars(r) + dstBucket := vars["bucket"] + // dstObject := getObject(vars) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + + uploadID := r.URL.Query().Get("uploadId") + partIDString := r.URL.Query().Get("partNumber") + + partID, err := strconv.Atoi(partIDString) + if err != nil { + writeErrorResponse(w, ErrInvalidPart, r.URL) + return + } + + // check partID with maximum part ID for multipart objects + if partID > globalMaxPartID { + writeErrorResponse(w, ErrInvalidMaxParts, r.URL) + return + } + + rangeHeader := r.Header.Get("x-amz-copy-source-range") + + dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", + s3a.option.Filer, s3a.genUploadsFolder(dstBucket), uploadID, partID-1, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader) + if err != nil { + writeErrorResponse(w, ErrInvalidCopySource, r.URL) + return + } + defer dataReader.Close() + + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + + if errCode != ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyPartResult{ + ETag: etag, + LastModified: time.Now(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 44e93d297..0d287c4ff 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -3,15 +3,19 @@ package s3api import ( "crypto/md5" "encoding/json" + "encoding/xml" "fmt" "io" "io/ioutil" "net/http" "strings" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/server" "github.com/gorilla/mux" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -40,12 +44,17 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) rAuthType := getRequestAuthType(r) dataReader := r.Body + var s3ErrCode ErrorCode if rAuthType == authTypeStreamingSigned { - dataReader = newSignV4ChunkedReader(r) + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + } + if s3ErrCode != ErrNone { + writeErrorResponse(w, s3ErrCode, r.URL) + return } + defer dataReader.Close() - uploadUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket) + uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, object) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) @@ -96,11 +105,11 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque bucket := vars["bucket"] object := getObject(vars) - destUrl := fmt.Sprintf("http://%s%s/%s%s", + destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true", s3a.option.Filer, s3a.option.BucketsPath, bucket, object) - s3a.proxyToFiler(w, r, destUrl, func(proxyResonse *http.Response, w http.ResponseWriter) { - for k, v := range proxyResonse.Header { + s3a.proxyToFiler(w, r, destUrl, func(proxyResponse *http.Response, w http.ResponseWriter) { + for k, v := range proxyResponse.Header { w.Header()[k] = v } w.WriteHeader(http.StatusNoContent) @@ -108,13 +117,94 @@ func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Reque } +/// ObjectIdentifier carries key name for the object to delete. +type ObjectIdentifier struct { + ObjectName string `xml:"Key"` +} + +// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted. +type DeleteObjectsRequest struct { + // Element to enable quiet mode for the request + Quiet bool + // List of objects to be deleted + Objects []ObjectIdentifier `xml:"Object"` +} + +// DeleteError structure. +type DeleteError struct { + Code string + Message string + Key string +} + +// DeleteObjectsResponse container for multiple object deletes. +type DeleteObjectsResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` + + // Collection of all deleted objects + DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` + + // Collection of errors deleting certain objects. + Errors []DeleteError `xml:"Error,omitempty"` +} + // DeleteMultipleObjectsHandler - Delete multiple objects func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { - // TODO - writeErrorResponse(w, ErrNotImplemented, r.URL) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + deleteXMLBytes, err := ioutil.ReadAll(r.Body) + if err != nil { + writeErrorResponse(w, ErrInternalError, r.URL) + return + } + + deleteObjects := &DeleteObjectsRequest{} + if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { + writeErrorResponse(w, ErrMalformedXML, r.URL) + return + } + + var deletedObjects []ObjectIdentifier + var deleteErrors []DeleteError + + s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + for _, object := range deleteObjects.Objects { + lastSeparator := strings.LastIndex(object.ObjectName, "/") + parentDirectoryPath, entryName, isDeleteData, isRecursive := "/", object.ObjectName, true, true + if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) { + entryName = object.ObjectName[lastSeparator+1:] + parentDirectoryPath = "/" + object.ObjectName[:lastSeparator] + } + parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) + + err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) + if err == nil { + deletedObjects = append(deletedObjects, object) + } else { + deleteErrors = append(deleteErrors, DeleteError{ + Code: "", + Message: err.Error(), + Key: object.ObjectName, + }) + } + } + return nil + }) + + deleteResp := DeleteObjectsResponse{} + if !deleteObjects.Quiet { + deleteResp.DeletedObjects = deletedObjects + } + deleteResp.Errors = deleteErrors + + writeSuccessResponseXML(w, encodeResponse(deleteResp)) + } -func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResonse *http.Response, w http.ResponseWriter)) { +func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter)) { glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl) @@ -128,7 +218,6 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des proxyReq.Header.Set("Host", s3a.option.Filer) proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) - proxyReq.Header.Set("Etag-MD5", "True") for header, values := range r.Header { for _, value := range values { @@ -143,22 +232,23 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des writeErrorResponse(w, ErrInternalError, r.URL) return } - defer resp.Body.Close() + defer util.CloseResponse(resp) responseFn(resp, w) + } -func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) { - for k, v := range proxyResonse.Header { +func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) { + for k, v := range proxyResponse.Header { w.Header()[k] = v } - w.WriteHeader(proxyResonse.StatusCode) - io.Copy(w, proxyResonse.Body) + w.WriteHeader(proxyResponse.StatusCode) + io.Copy(w, proxyResponse.Body) } -func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.ReadCloser) (etag string, code ErrorCode) { +func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code ErrorCode) { hash := md5.New() - var body io.Reader = io.TeeReader(dataReader, hash) + var body = io.TeeReader(dataReader, hash) proxyReq, err := http.NewRequest("PUT", uploadUrl, body) @@ -178,8 +268,6 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader resp, postErr := client.Do(proxyReq) - dataReader.Close() - if postErr != nil { glog.Errorf("post to filer: %v", postErr) return "", ErrInternalError diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 267d126c5..3282e4176 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -2,20 +2,21 @@ package s3api import ( "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/gorilla/mux" "net/http" "net/url" "strconv" "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/gorilla/mux" ) const ( - maxObjectList = 1000 // Limit number of objects in a listObjectsResponse. - maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse. - maxPartsList = 1000 // Limit number of parts in a listPartsResponse. - globalMaxPartID = 10000 + maxObjectListSizeLimit = 10000 // Limit number of objects in a listObjectsResponse. + maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. + maxPartsList = 10000 // Limit number of parts in a listPartsResponse. + globalMaxPartID = 100000 ) // NewMultipartUploadHandler - New multipart upload. @@ -27,7 +28,7 @@ func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{ Bucket: aws.String(bucket), - Key: aws.String(object), + Key: objectKey(aws.String(object)), }) if errCode != ErrNone { @@ -52,7 +53,7 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{ Bucket: aws.String(bucket), - Key: aws.String(object), + Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), }) @@ -78,7 +79,7 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{ Bucket: aws.String(bucket), - Key: aws.String(object), + Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), }) @@ -150,7 +151,7 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re response, errCode := s3a.listObjectParts(&s3.ListPartsInput{ Bucket: aws.String(bucket), - Key: aws.String(object), + Key: objectKey(aws.String(object)), MaxParts: aws.Int64(int64(maxParts)), PartNumberMarker: aws.Int64(int64(partNumberMarker)), UploadId: aws.String(uploadID), @@ -192,13 +193,19 @@ func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Requ return } + var s3ErrCode ErrorCode dataReader := r.Body if rAuthType == authTypeStreamingSigned { - dataReader = newSignV4ChunkedReader(r) + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + } + if s3ErrCode != ErrNone { + writeErrorResponse(w, s3ErrCode, r.URL) + return } + defer dataReader.Close() - uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part", - s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1) + uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", + s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index d751a3b1d..919e6230a 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -3,22 +3,19 @@ package s3api import ( "context" "fmt" + "io" "net/http" "net/url" "path/filepath" "strconv" + "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" + "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gorilla/mux" -) - -const ( - maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse. ) func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { @@ -85,13 +82,16 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ writeSuccessResponseXML(w, encodeResponse(response)) } -func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response *s3.ListObjectsOutput, err error) { +func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { // convert full path prefix into directory name and prefix for entry name dir, prefix := filepath.Split(originalPrefix) + if strings.HasPrefix(dir, "/") { + dir = dir[1:] + } // check filer - err = s3a.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.ListEntriesRequest{ Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir), @@ -101,17 +101,28 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys InclusiveStartFrom: false, } - resp, err := client.ListEntries(context.Background(), request) + stream, err := client.ListEntries(context.Background(), request) if err != nil { return fmt.Errorf("list buckets: %v", err) } - var contents []*s3.Object - var commonPrefixes []*s3.CommonPrefix + var contents []ListEntry + var commonPrefixes []PrefixEntry var counter int var lastEntryName string var isTruncated bool - for _, entry := range resp.Entries { + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + return recvErr + } + } + + entry := resp.Entry counter++ if counter > maxKeys { isTruncated = true @@ -119,37 +130,40 @@ func (s3a *S3ApiServer) listFilerEntries(bucket, originalPrefix string, maxKeys } lastEntryName = entry.Name if entry.IsDirectory { - commonPrefixes = append(commonPrefixes, &s3.CommonPrefix{ - Prefix: aws.String(fmt.Sprintf("%s%s/", dir, entry.Name)), - }) + if entry.Name != ".uploads" { + commonPrefixes = append(commonPrefixes, PrefixEntry{ + Prefix: fmt.Sprintf("%s%s/", dir, entry.Name), + }) + } } else { - contents = append(contents, &s3.Object{ - Key: aws.String(fmt.Sprintf("%s%s", dir, entry.Name)), - LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)), - ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""), - Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))), - Owner: &s3.Owner{ - ID: aws.String("bcaf161ca5fb16fd081034f"), - DisplayName: aws.String("webfile"), + contents = append(contents, ListEntry{ + Key: fmt.Sprintf("%s%s", dir, entry.Name), + LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), + ETag: "\"" + filer2.ETag(entry) + "\"", + Size: int64(filer2.TotalSize(entry.Chunks)), + Owner: CanonicalUser{ + ID: fmt.Sprintf("%x", entry.Attributes.Uid), + DisplayName: entry.Attributes.UserName, }, - StorageClass: aws.String("STANDARD"), + StorageClass: "STANDARD", }) } + } - response = &s3.ListObjectsOutput{ - Name: aws.String(bucket), - Prefix: aws.String(originalPrefix), - Marker: aws.String(marker), - NextMarker: aws.String(lastEntryName), - MaxKeys: aws.Int64(int64(maxKeys)), - Delimiter: aws.String("/"), - IsTruncated: aws.Bool(isTruncated), + response = ListBucketResult{ + Name: bucket, + Prefix: originalPrefix, + Marker: marker, + NextMarker: lastEntryName, + MaxKeys: maxKeys, + Delimiter: "/", + IsTruncated: isTruncated, Contents: contents, CommonPrefixes: commonPrefixes, } - glog.V(4).Infof("read directory: %v, found: %v", request, counter) + glog.V(4).Infof("read directory: %v, found: %v, %+v", request, counter, response) return nil }) diff --git a/weed/s3api/s3api_objects_list_handlers_test.go b/weed/s3api/s3api_objects_list_handlers_test.go new file mode 100644 index 000000000..7b87b32fb --- /dev/null +++ b/weed/s3api/s3api_objects_list_handlers_test.go @@ -0,0 +1,38 @@ +package s3api + +import ( + "testing" + "time" +) + +func TestListObjectsHandler(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html + + expected := ` +test_container1000false1.zip"4397da7a7649e8085de9916c240e8166"123456765a011niqo39cdf8ec533ec3d1ccaafsa932STANDARD2011-04-09T12:34:49Z` + + response := ListBucketResult{ + Name: "test_container", + Prefix: "", + Marker: "", + NextMarker: "", + MaxKeys: 1000, + IsTruncated: false, + Contents: []ListEntry{{ + Key: "1.zip", + LastModified: time.Date(2011, 4, 9, 12, 34, 49, 0, time.UTC), + ETag: "\"4397da7a7649e8085de9916c240e8166\"", + Size: 1234567, + Owner: CanonicalUser{ + ID: "65a011niqo39cdf8ec533ec3d1ccaafsa932", + }, + StorageClass: "STANDARD", + }}, + } + + encoded := string(encodeResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } +} diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index db798a546..773094a5f 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -1,30 +1,30 @@ package s3api import ( - _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" - _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" - _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb" - _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" - _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" - _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" - "github.com/gorilla/mux" "net/http" + + "github.com/gorilla/mux" + "google.golang.org/grpc" ) type S3ApiServerOption struct { Filer string FilerGrpcAddress string + Config string DomainName string BucketsPath string + GrpcDialOption grpc.DialOption } type S3ApiServer struct { option *S3ApiServerOption + iam *IdentityAccessManagement } func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) { s3ApiServer = &S3ApiServer{ option: option, + iam: NewIdentityAccessManagement(option.Config, option.DomainName), } s3ApiServer.registerRouter(router) @@ -44,48 +44,47 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { for _, bucket := range routers { // HeadObject - bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.HeadObjectHandler) + bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ)) // HeadBucket - bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler) + bucket.Methods("HEAD").HandlerFunc(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN)) + // CopyObjectPart + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // PutObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE)).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // CompleteMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}") // NewMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.NewMultipartUploadHandler).Queries("uploads", "") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE)).Queries("uploads", "") // AbortMultipartUpload - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}") // ListObjectParts - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_WRITE)).Queries("uploadId", "{uploadId:.*}") // ListMultipartUploads - bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "") + bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_WRITE)).Queries("uploads", "") + // CopyObject + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE)) // PutObject - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler) + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE)) // PutBucket - bucket.Methods("PUT").HandlerFunc(s3a.PutBucketHandler) + bucket.Methods("PUT").HandlerFunc(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN)) // DeleteObject - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.DeleteObjectHandler) + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE)) // DeleteBucket - bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketHandler) + bucket.Methods("DELETE").HandlerFunc(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE)) // ListObjectsV2 - bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV2Handler).Queries("list-type", "2") + bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_READ)).Queries("list-type", "2") // GetObject, but directory listing is not supported - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectHandler) + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ)) // ListObjectsV1 (Legacy) - bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV1Handler) + bucket.Methods("GET").HandlerFunc(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_READ)) // DeleteMultipleObjects - bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "") + bucket.Methods("POST").HandlerFunc(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)).Queries("delete", "") /* - // CopyObject - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler) - - // CopyObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // not implemented // GetBucketLocation @@ -107,7 +106,7 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { } // ListBuckets - apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.ListBucketsHandler) + apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN)) // NotFound apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler) diff --git a/weed/s3api/s3api_test.go b/weed/s3api/s3api_test.go new file mode 100644 index 000000000..026766beb --- /dev/null +++ b/weed/s3api/s3api_test.go @@ -0,0 +1,32 @@ +package s3api + +import ( + "testing" + "time" +) + +func TestCopyObjectResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + + response := CopyObjectResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} + +func TestCopyPartResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + + response := CopyPartResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} diff --git a/weed/s3api/s3api_xsd_generated.go b/weed/s3api/s3api_xsd_generated.go index df07f3fea..9d62afc4e 100644 --- a/weed/s3api/s3api_xsd_generated.go +++ b/weed/s3api/s3api_xsd_generated.go @@ -25,8 +25,8 @@ type BucketLoggingStatus struct { } type CanonicalUser struct { - ID string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ID"` - DisplayName string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DisplayName,omitempty"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` } type CopyObject struct { @@ -506,15 +506,15 @@ func (t *ListAllMyBuckets) UnmarshalXML(d *xml.Decoder, start xml.StartElement) } type ListAllMyBucketsEntry struct { - Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"` - CreationDate time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"` + Name string `xml:"Name"` + CreationDate time.Time `xml:"CreationDate"` } func (t *ListAllMyBucketsEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T ListAllMyBucketsEntry var layout struct { *T - CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"` + CreationDate *xsdDateTime `xml:"CreationDate"` } layout.T = (*T)(t) layout.CreationDate = (*xsdDateTime)(&layout.T.CreationDate) @@ -524,7 +524,7 @@ func (t *ListAllMyBucketsEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElem type T ListAllMyBucketsEntry var overlay struct { *T - CreationDate *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreationDate"` + CreationDate *xsdDateTime `xml:"CreationDate"` } overlay.T = (*T)(t) overlay.CreationDate = (*xsdDateTime)(&overlay.T.CreationDate) @@ -532,7 +532,7 @@ func (t *ListAllMyBucketsEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElem } type ListAllMyBucketsList struct { - Bucket []ListAllMyBucketsEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Bucket,omitempty"` + Bucket []ListAllMyBucketsEntry `xml:"Bucket,omitempty"` } type ListAllMyBucketsResponse struct { @@ -577,32 +577,33 @@ type ListBucketResponse struct { } type ListBucketResult struct { - Metadata []MetadataEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Metadata,omitempty"` - Name string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Name"` - Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"` - Marker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Marker"` - NextMarker string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NextMarker,omitempty"` - MaxKeys int `xml:"http://s3.amazonaws.com/doc/2006-03-01/ MaxKeys"` - Delimiter string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delimiter,omitempty"` - IsTruncated bool `xml:"http://s3.amazonaws.com/doc/2006-03-01/ IsTruncated"` - Contents []ListEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Contents,omitempty"` - CommonPrefixes []PrefixEntry `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CommonPrefixes,omitempty"` + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"` + Metadata []MetadataEntry `xml:"Metadata,omitempty"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker,omitempty"` + MaxKeys int `xml:"MaxKeys"` + Delimiter string `xml:"Delimiter,omitempty"` + IsTruncated bool `xml:"IsTruncated"` + Contents []ListEntry `xml:"Contents,omitempty"` + CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` } type ListEntry struct { - Key string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Key"` - LastModified time.Time `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` - ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` - Size int64 `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Size"` - Owner CanonicalUser `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Owner,omitempty"` - StorageClass StorageClass `xml:"http://s3.amazonaws.com/doc/2006-03-01/ StorageClass"` + Key string `xml:"Key"` + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` + Size int64 `xml:"Size"` + Owner CanonicalUser `xml:"Owner,omitempty"` + StorageClass StorageClass `xml:"StorageClass"` } func (t *ListEntry) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type T ListEntry var layout struct { *T - LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + LastModified *xsdDateTime `xml:"LastModified"` } layout.T = (*T)(t) layout.LastModified = (*xsdDateTime)(&layout.T.LastModified) @@ -612,7 +613,7 @@ func (t *ListEntry) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { type T ListEntry var overlay struct { *T - LastModified *xsdDateTime `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + LastModified *xsdDateTime `xml:"LastModified"` } overlay.T = (*T)(t) overlay.LastModified = (*xsdDateTime)(&overlay.T.LastModified) @@ -674,7 +675,7 @@ type PostResponse struct { } type PrefixEntry struct { - Prefix string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Prefix"` + Prefix string `xml:"Prefix"` } type PutObject struct { @@ -965,10 +966,10 @@ func (b xsdBase64Binary) MarshalText() ([]byte, error) { type xsdDateTime time.Time func (t *xsdDateTime) UnmarshalText(text []byte) error { - return _unmarshalTime(text, (*time.Time)(t), "2006-01-02T15:04:05.999999999") + return _unmarshalTime(text, (*time.Time)(t), s3TimeFormat) } func (t xsdDateTime) MarshalText() ([]byte, error) { - return []byte((time.Time)(t).Format("2006-01-02T15:04:05.999999999")), nil + return []byte((time.Time)(t).Format(s3TimeFormat)), nil } func (t xsdDateTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if (time.Time)(t).IsZero() { diff --git a/weed/security/guard.go b/weed/security/guard.go index dea3b12f2..87ec91ec1 100644 --- a/weed/security/guard.go +++ b/weed/security/guard.go @@ -41,21 +41,30 @@ https://github.com/pkieltyka/jwtauth/blob/master/jwtauth.go */ type Guard struct { - whiteList []string - SecretKey Secret + whiteList []string + SigningKey SigningKey + ExpiresAfterSec int + ReadSigningKey SigningKey + ReadExpiresAfterSec int - isActive bool + isWriteActive bool } -func NewGuard(whiteList []string, secretKey string) *Guard { - g := &Guard{whiteList: whiteList, SecretKey: Secret(secretKey)} - g.isActive = len(g.whiteList) != 0 || len(g.SecretKey) != 0 +func NewGuard(whiteList []string, signingKey string, expiresAfterSec int, readSigningKey string, readExpiresAfterSec int) *Guard { + g := &Guard{ + whiteList: whiteList, + SigningKey: SigningKey(signingKey), + ExpiresAfterSec: expiresAfterSec, + ReadSigningKey: SigningKey(readSigningKey), + ReadExpiresAfterSec: readExpiresAfterSec, + } + g.isWriteActive = len(g.whiteList) != 0 || len(g.SigningKey) != 0 return g } -func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { - if !g.isActive { - //if no security needed, just skip all checkings +func (g *Guard) WhiteList(f http.HandlerFunc) http.HandlerFunc { + if !g.isWriteActive { + //if no security needed, just skip all checking return f } return func(w http.ResponseWriter, r *http.Request) { @@ -67,20 +76,6 @@ func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w } } -func (g *Guard) Secure(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { - if !g.isActive { - //if no security needed, just skip all checkings - return f - } - return func(w http.ResponseWriter, r *http.Request) { - if err := g.checkJwt(w, r); err != nil { - w.WriteHeader(http.StatusUnauthorized) - return - } - f(w, r) - } -} - func GetActualRemoteHost(r *http.Request) (host string, err error) { host = r.Header.Get("HTTP_X_FORWARDED_FOR") if host == "" { @@ -130,33 +125,3 @@ func (g *Guard) checkWhiteList(w http.ResponseWriter, r *http.Request) error { glog.V(0).Infof("Not in whitelist: %s", r.RemoteAddr) return fmt.Errorf("Not in whitelis: %s", r.RemoteAddr) } - -func (g *Guard) checkJwt(w http.ResponseWriter, r *http.Request) error { - if g.checkWhiteList(w, r) == nil { - return nil - } - - if len(g.SecretKey) == 0 { - return nil - } - - tokenStr := GetJwt(r) - - if tokenStr == "" { - return ErrUnauthorized - } - - // Verify the token - token, err := DecodeJwt(g.SecretKey, tokenStr) - if err != nil { - glog.V(1).Infof("Token verification error from %s: %v", r.RemoteAddr, err) - return ErrUnauthorized - } - if !token.Valid { - glog.V(1).Infof("Token invliad from %s: %v", r.RemoteAddr, tokenStr) - return ErrUnauthorized - } - - glog.V(1).Infof("No permission from %s", r.RemoteAddr) - return fmt.Errorf("No write permisson from %s", r.RemoteAddr) -} diff --git a/weed/security/jwt.go b/weed/security/jwt.go index 46b7efaaf..0bd7fa974 100644 --- a/weed/security/jwt.go +++ b/weed/security/jwt.go @@ -1,9 +1,9 @@ package security import ( + "fmt" "net/http" "strings" - "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -11,21 +11,29 @@ import ( ) type EncodedJwt string -type Secret string +type SigningKey []byte + +type SeaweedFileIdClaims struct { + Fid string `json:"fid"` + jwt.StandardClaims +} -func GenJwt(secret Secret, fileId string) EncodedJwt { - if secret == "" { +func GenJwt(signingKey SigningKey, expiresAfterSec int, fileId string) EncodedJwt { + if len(signingKey) == 0 { return "" } - t := jwt.New(jwt.GetSigningMethod("HS256")) - t.Claims = &jwt.StandardClaims{ - ExpiresAt: time.Now().Add(time.Second * 10).Unix(), - Subject: fileId, + claims := SeaweedFileIdClaims{ + fileId, + jwt.StandardClaims{}, + } + if expiresAfterSec > 0 { + claims.ExpiresAt = time.Now().Add(time.Second * time.Duration(expiresAfterSec)).Unix() } - encoded, e := t.SignedString(secret) + t := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + encoded, e := t.SignedString([]byte(signingKey)) if e != nil { - glog.V(0).Infof("Failed to sign claims: %v", t.Claims) + glog.V(0).Infof("Failed to sign claims %+v: %v", t.Claims, e) return "" } return EncodedJwt(encoded) @@ -44,31 +52,15 @@ func GetJwt(r *http.Request) EncodedJwt { } } - // Get token from cookie - if tokenStr == "" { - cookie, err := r.Cookie("jwt") - if err == nil { - tokenStr = cookie.Value - } - } - return EncodedJwt(tokenStr) } -func EncodeJwt(secret Secret, claims *jwt.StandardClaims) (EncodedJwt, error) { - if secret == "" { - return "", nil - } - - t := jwt.New(jwt.GetSigningMethod("HS256")) - t.Claims = claims - encoded, e := t.SignedString(secret) - return EncodedJwt(encoded), e -} - -func DecodeJwt(secret Secret, tokenString EncodedJwt) (token *jwt.Token, err error) { +func DecodeJwt(signingKey SigningKey, tokenString EncodedJwt) (token *jwt.Token, err error) { // check exp, nbf - return jwt.Parse(string(tokenString), func(token *jwt.Token) (interface{}, error) { - return secret, nil + return jwt.ParseWithClaims(string(tokenString), &SeaweedFileIdClaims{}, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unknown token method") + } + return []byte(signingKey), nil }) } diff --git a/weed/security/tls.go b/weed/security/tls.go new file mode 100644 index 000000000..1832e6e07 --- /dev/null +++ b/weed/security/tls.go @@ -0,0 +1,68 @@ +package security + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + + "github.com/spf13/viper" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/chrislusf/seaweedfs/weed/glog" +) + +func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { + if config == nil { + return nil + } + + // load cert/key, ca cert + cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) + if err != nil { + glog.V(1).Infof("load cert/key error: %v", err) + return nil + } + caCert, err := ioutil.ReadFile(config.GetString(component + ".ca")) + if err != nil { + glog.V(1).Infof("read ca cert file error: %v", err) + return nil + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + ta := credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + }) + + return grpc.Creds(ta) +} + +func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { + if config == nil { + return grpc.WithInsecure() + } + + // load cert/key, cacert + cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) + if err != nil { + glog.V(1).Infof("load cert/key error: %v", err) + return grpc.WithInsecure() + } + caCert, err := ioutil.ReadFile(config.GetString(component + ".ca")) + if err != nil { + glog.V(1).Infof("read ca cert file error: %v", err) + return grpc.WithInsecure() + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + ta := credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + InsecureSkipVerify: true, + }) + return grpc.WithTransportCredentials(ta) +} diff --git a/weed/sequence/etcd_sequencer.go b/weed/sequence/etcd_sequencer.go new file mode 100644 index 000000000..1fc378640 --- /dev/null +++ b/weed/sequence/etcd_sequencer.go @@ -0,0 +1,296 @@ +package sequence + +/* +Note : +(1) store the sequence in the ETCD cluster, and local file(sequence.dat) +(2) batch get the sequences from ETCD cluster, and store the max sequence id in the local file +(3) the sequence range is : [currentSeqId, maxSeqId), when the currentSeqId >= maxSeqId, fetch the new maxSeqId. +*/ + +import ( + "context" + "fmt" + "sync" + "time" + + "io" + "os" + "strconv" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "go.etcd.io/etcd/client" +) + +const ( + // EtcdKeyPrefix = "/seaweedfs" + EtcdKeySequence = "/master/sequence" + EtcdContextTimeoutSecond = 100 * time.Second + DefaultEtcdSteps uint64 = 500 // internal counter + SequencerFileName = "sequencer.dat" + FileMaxSequenceLength = 128 +) + +type EtcdSequencer struct { + sequenceLock sync.Mutex + + // available sequence range : [currentSeqId, maxSeqId) + currentSeqId uint64 + maxSeqId uint64 + + keysAPI client.KeysAPI + seqFile *os.File +} + +func NewEtcdSequencer(etcdUrls string, metaFolder string) (*EtcdSequencer, error) { + file, err := openSequenceFile(metaFolder + "/" + SequencerFileName) + if nil != err { + return nil, fmt.Errorf("open sequence file fialed, %v", err) + } + + cli, err := client.New(client.Config{ + Endpoints: strings.Split(etcdUrls, ","), + Username: "", + Password: "", + }) + if err != nil { + return nil, err + } + keysApi := client.NewKeysAPI(cli) + + // TODO: the current sequence id in local file is not used + maxValue, _, err := readSequenceFile(file) + if err != nil { + return nil, fmt.Errorf("read sequence from file failed, %v", err) + } + glog.V(4).Infof("read sequence from file : %d", maxValue) + + newSeq, err := setMaxSequenceToEtcd(keysApi, maxValue) + if err != nil { + return nil, err + } + + sequencer := &EtcdSequencer{maxSeqId: newSeq, + currentSeqId: newSeq, + keysAPI: keysApi, + seqFile: file, + } + return sequencer, nil +} + +func (es *EtcdSequencer) NextFileId(count uint64) uint64 { + es.sequenceLock.Lock() + defer es.sequenceLock.Unlock() + + if (es.currentSeqId + count) >= es.maxSeqId { + reqSteps := DefaultEtcdSteps + if count > DefaultEtcdSteps { + reqSteps += count + } + maxId, err := batchGetSequenceFromEtcd(es.keysAPI, reqSteps) + glog.V(4).Infof("get max sequence id from etcd, %d", maxId) + if err != nil { + glog.Error(err) + return 0 + } + es.currentSeqId, es.maxSeqId = maxId-reqSteps, maxId + glog.V(4).Infof("current id : %d, max id : %d", es.currentSeqId, es.maxSeqId) + + if err := writeSequenceFile(es.seqFile, es.maxSeqId, es.currentSeqId); err != nil { + glog.Errorf("flush sequence to file failed, %v", err) + } + } + + ret := es.currentSeqId + es.currentSeqId += count + return ret +} + +/** +instead of collecting the max value from volume server, +the max value should be saved in local config file and ETCD cluster +*/ +func (es *EtcdSequencer) SetMax(seenValue uint64) { + es.sequenceLock.Lock() + defer es.sequenceLock.Unlock() + if seenValue > es.maxSeqId { + maxId, err := setMaxSequenceToEtcd(es.keysAPI, seenValue) + if err != nil { + glog.Errorf("set Etcd Max sequence failed : %v", err) + return + } + es.currentSeqId, es.maxSeqId = maxId, maxId + + if err := writeSequenceFile(es.seqFile, maxId, maxId); err != nil { + glog.Errorf("flush sequence to file failed, %v", err) + } + } +} + +func (es *EtcdSequencer) GetMax() uint64 { + return es.maxSeqId +} + +func (es *EtcdSequencer) Peek() uint64 { + return es.currentSeqId +} + +func batchGetSequenceFromEtcd(kvApi client.KeysAPI, step uint64) (uint64, error) { + if step <= 0 { + return 0, fmt.Errorf("the step must be large than 1") + } + + ctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond) + var endSeqValue uint64 = 0 + defer cancel() + for { + getResp, err := kvApi.Get(ctx, EtcdKeySequence, &client.GetOptions{Recursive: false, Quorum: true}) + if err != nil { + return 0, err + } + if getResp.Node == nil { + continue + } + + prevValue := getResp.Node.Value + prevSeqValue, err := strconv.ParseUint(prevValue, 10, 64) + if err != nil { + return 0, fmt.Errorf("get sequence from etcd failed, %v", err) + } + endSeqValue = prevSeqValue + step + endSeqStr := strconv.FormatUint(endSeqValue, 10) + + _, err = kvApi.Set(ctx, EtcdKeySequence, endSeqStr, &client.SetOptions{PrevValue: prevValue}) + if err == nil { + break + } + glog.Error(err) + } + + return endSeqValue, nil +} + +/** +update the value of the key EtcdKeySequence in ETCD cluster with the parameter of maxSeq, +when the value of the key EtcdKeySequence is equal to or large than the parameter maxSeq, +return the value of EtcdKeySequence in the ETCD cluster; +when the value of the EtcdKeySequence is less than the parameter maxSeq, +return the value of the parameter maxSeq +*/ +func setMaxSequenceToEtcd(kvApi client.KeysAPI, maxSeq uint64) (uint64, error) { + maxSeqStr := strconv.FormatUint(maxSeq, 10) + ctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond) + defer cancel() + + for { + getResp, err := kvApi.Get(ctx, EtcdKeySequence, &client.GetOptions{Recursive: false, Quorum: true}) + if err != nil { + if ce, ok := err.(client.Error); ok && (ce.Code == client.ErrorCodeKeyNotFound) { + _, err := kvApi.Create(ctx, EtcdKeySequence, maxSeqStr) + if err == nil { + continue + } + if ce, ok = err.(client.Error); ok && (ce.Code == client.ErrorCodeNodeExist) { + continue + } + return 0, err + } else { + return 0, err + } + } + + if getResp.Node == nil { + continue + } + prevSeqStr := getResp.Node.Value + prevSeq, err := strconv.ParseUint(prevSeqStr, 10, 64) + if err != nil { + return 0, err + } + if prevSeq >= maxSeq { + return prevSeq, nil + } + + _, err = kvApi.Set(ctx, EtcdKeySequence, maxSeqStr, &client.SetOptions{PrevValue: prevSeqStr}) + if err != nil { + return 0, err + } + } +} + +func openSequenceFile(file string) (*os.File, error) { + _, err := os.Stat(file) + if os.IsNotExist(err) { + fid, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return nil, err + } + if err := writeSequenceFile(fid, 1, 0); err != nil { + return nil, err + } + return fid, nil + } else { + return os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0644) + } +} + +/* +read sequence and step from sequence file +*/ +func readSequenceFile(file *os.File) (uint64, uint64, error) { + sequence := make([]byte, FileMaxSequenceLength) + size, err := file.ReadAt(sequence, 0) + if (err != nil) && (err != io.EOF) { + err := fmt.Errorf("cannot read file %s, %v", file.Name(), err) + return 0, 0, err + } + sequence = sequence[0:size] + seqs := strings.Split(string(sequence), ":") + maxId, err := strconv.ParseUint(seqs[0], 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("parse sequence from file failed, %v", err) + } + + if len(seqs) > 1 { + step, err := strconv.ParseUint(seqs[1], 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("parse sequence from file failed, %v", err) + } + return maxId, step, nil + } + + return maxId, 0, nil +} + +/** +write the sequence and step to sequence file +*/ +func writeSequenceFile(file *os.File, sequence, step uint64) error { + _ = step + seqStr := fmt.Sprintf("%d:%d", sequence, sequence) + if _, err := file.Seek(0, 0); err != nil { + err = fmt.Errorf("cannot seek to the beginning of %s: %v", file.Name(), err) + return err + } + if err := file.Truncate(0); err != nil { + return fmt.Errorf("truncate sequence file faield : %v", err) + } + if _, err := file.WriteString(seqStr); err != nil { + return fmt.Errorf("write file %s failed, %v", file.Name(), err) + } + if err := file.Sync(); err != nil { + return fmt.Errorf("flush file %s failed, %v", file.Name(), err) + } + return nil +} + +// the UT helper method +// func deleteEtcdKey(kvApi client.KeysAPI, key string) error { +// ctx, cancel := context.WithTimeout(context.Background(), EtcdContextTimeoutSecond) +// defer cancel() +// _, err := kvApi.Delete(ctx, key, &client.DeleteOptions{Dir: false}) +// if err != nil { +// return err +// } +// return nil +// } diff --git a/weed/sequence/memory_sequencer.go b/weed/sequence/memory_sequencer.go index d727dc723..e20c29cc7 100644 --- a/weed/sequence/memory_sequencer.go +++ b/weed/sequence/memory_sequencer.go @@ -15,12 +15,12 @@ func NewMemorySequencer() (m *MemorySequencer) { return } -func (m *MemorySequencer) NextFileId(count uint64) (uint64, uint64) { +func (m *MemorySequencer) NextFileId(count uint64) uint64 { m.sequenceLock.Lock() defer m.sequenceLock.Unlock() ret := m.counter - m.counter += uint64(count) - return ret, count + m.counter += count + return ret } func (m *MemorySequencer) SetMax(seenValue uint64) { diff --git a/weed/sequence/sequence.go b/weed/sequence/sequence.go index fbdc3b8ef..2258d001b 100644 --- a/weed/sequence/sequence.go +++ b/weed/sequence/sequence.go @@ -1,7 +1,7 @@ package sequence type Sequencer interface { - NextFileId(count uint64) (uint64, uint64) + NextFileId(count uint64) uint64 SetMax(uint64) Peek() uint64 } diff --git a/weed/server/common.go b/weed/server/common.go index d88abfdc8..bc6008864 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -1,26 +1,29 @@ package weed_server import ( - "bytes" "encoding/json" "errors" "fmt" + "io" + "mime/multipart" "net/http" "path/filepath" "strconv" "strings" "time" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" - _ "github.com/chrislusf/seaweedfs/weed/statik" "github.com/gorilla/mux" statik "github.com/rakyll/statik/fs" + + _ "github.com/chrislusf/seaweedfs/weed/statik" ) var serverStats *stats.ServerStats @@ -43,14 +46,26 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter if err != nil { return } + + if httpStatus >= 400 { + glog.V(0).Infof("response method:%s URL:%s with httpStatus:%d and JSON:%s", + r.Method, r.URL.String(), httpStatus, string(bytes)) + } + callback := r.FormValue("callback") if callback == "" { w.Header().Set("Content-Type", "application/json") w.WriteHeader(httpStatus) + if httpStatus == http.StatusNotModified { + return + } _, err = w.Write(bytes) } else { w.Header().Set("Content-Type", "application/javascript") w.WriteHeader(httpStatus) + if httpStatus == http.StatusNotModified { + return + } if _, err = w.Write([]uint8(callback)); err != nil { return } @@ -69,7 +84,8 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter // wrapper for writeJson - just logs errors func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) { if err := writeJson(w, r, httpStatus, obj); err != nil { - glog.V(0).Infof("error writing JSON %+v status %d: %v", obj, httpStatus, err) + glog.V(0).Infof("error writing JSON status %d: %v", httpStatus, err) + glog.V(1).Infof("JSON content: %+v", obj) } } func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err error) { @@ -82,8 +98,7 @@ func debug(params ...interface{}) { glog.V(4).Infoln(params...) } -func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string) { - jwt := security.GetJwt(r) +func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string, grpcDialOption grpc.DialOption) { m := make(map[string]interface{}) if r.Method != "POST" { writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!")) @@ -91,13 +106,13 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := storage.ParseUpload(r) + pu, pe := needle.ParseUpload(r, 256*1024*1024) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return } - debug("assigning file id for", fname) + debug("assigning file id for", pu.FileName) r.ParseForm() count := uint64(1) if r.FormValue("count") != "" { @@ -109,32 +124,33 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } ar := &operation.VolumeAssignRequest{ Count: count, + DataCenter: r.FormValue("dataCenter"), Replication: r.FormValue("replication"), Collection: r.FormValue("collection"), Ttl: r.FormValue("ttl"), } - assignResult, ae := operation.Assign(masterUrl, ar) + assignResult, ae := operation.Assign(masterUrl, grpcDialOption, ar) if ae != nil { writeJsonError(w, r, http.StatusInternalServerError, ae) return } url := "http://" + assignResult.Url + "/" + assignResult.Fid - if lastModified != 0 { - url = url + "?ts=" + strconv.FormatUint(lastModified, 10) + if pu.ModifiedTime != 0 { + url = url + "?ts=" + strconv.FormatUint(pu.ModifiedTime, 10) } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, jwt) + uploadResult, err := operation.UploadData(url, pu.FileName, false, pu.Data, pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return } - m["fileName"] = fname + m["fileName"] = pu.FileName m["fid"] = assignResult.Fid m["fileUrl"] = assignResult.PublicUrl + "/" + assignResult.Fid - m["size"] = originalDataSize + m["size"] = pu.OriginalDataSize m["eTag"] = uploadResult.ETag writeJsonQuiet(w, r, http.StatusCreated, m) return @@ -175,19 +191,19 @@ func parseURLPath(path string) (vid, fid, filename, ext string, isVolumeIdOnly b func statsHealthHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() writeJsonQuiet(w, r, http.StatusOK, m) } func statsCounterHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() m["Counters"] = serverStats writeJsonQuiet(w, r, http.StatusOK, m) } func statsMemoryHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() m["Memory"] = stats.MemStat() writeJsonQuiet(w, r, http.StatusOK, m) } @@ -201,3 +217,107 @@ func handleStaticResources2(r *mux.Router) { r.Handle("/favicon.ico", http.FileServer(statikFS)) r.PathPrefix("/seaweedfsstatic/").Handler(http.StripPrefix("/seaweedfsstatic", http.FileServer(statikFS))) } + +func adjustHeadersAfterHEAD(w http.ResponseWriter, r *http.Request, filename string) { + if filename != "" { + contentDisposition := "inline" + if r.FormValue("dl") != "" { + if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl { + contentDisposition = "attachment" + } + } + w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`) + } +} + +func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { + rangeReq := r.Header.Get("Range") + + if rangeReq == "" { + w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) + if err := writeFn(w, 0, totalSize); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return + } + + //the rest is dealing with partial content request + //mostly copy from src/pkg/net/http/fs.go + ranges, err := parseRange(rangeReq, totalSize) + if err != nil { + http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) + return + } + if sumRangesSize(ranges) > totalSize { + // The total number of bytes in all the ranges + // is larger than the size of the file by + // itself, so this is probably an attack, or a + // dumb client. Ignore the range request. + return + } + if len(ranges) == 0 { + return + } + if len(ranges) == 1 { + // RFC 2616, Section 14.16: + // "When an HTTP message includes the content of a single + // range (for example, a response to a request for a + // single range, or to a request for a set of ranges + // that overlap without any holes), this content is + // transmitted with a Content-Range header, and a + // Content-Length header showing the number of bytes + // actually transferred. + // ... + // A response to a request for a single range MUST NOT + // be sent using the multipart/byteranges media type." + ra := ranges[0] + w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) + w.Header().Set("Content-Range", ra.contentRange(totalSize)) + w.WriteHeader(http.StatusPartialContent) + + err = writeFn(w, ra.start, ra.length) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return + } + + // process multiple ranges + for _, ra := range ranges { + if ra.start > totalSize { + http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) + return + } + } + sendSize := rangesMIMESize(ranges, mimeType, totalSize) + pr, pw := io.Pipe() + mw := multipart.NewWriter(pw) + w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) + sendContent := pr + defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. + go func() { + for _, ra := range ranges { + part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) + if e != nil { + pw.CloseWithError(e) + return + } + if e = writeFn(part, ra.start, ra.length); e != nil { + pw.CloseWithError(e) + return + } + } + mw.Close() + pw.Close() + }() + if w.Header().Get("Content-Encoding") == "" { + w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) + } + w.WriteHeader(http.StatusPartialContent) + if _, err := io.CopyN(w, sendContent, sendSize); err != nil { + http.Error(w, "Internal Error", http.StatusInternalServerError) + return + } +} diff --git a/weed/server/common_test.go b/weed/server/common_test.go new file mode 100644 index 000000000..2e6c70bfe --- /dev/null +++ b/weed/server/common_test.go @@ -0,0 +1,31 @@ +package weed_server + +import ( + "strings" + "testing" +) + +func TestParseURL(t *testing.T) { + if vid, fid, _, _, _ := parseURLPath("/1,06dfa8a684"); true { + if vid != "1" { + t.Errorf("fail to parse vid: %s", vid) + } + if fid != "06dfa8a684" { + t.Errorf("fail to parse fid: %s", fid) + } + } + if vid, fid, _, _, _ := parseURLPath("/1,06dfa8a684_1"); true { + if vid != "1" { + t.Errorf("fail to parse vid: %s", vid) + } + if fid != "06dfa8a684_1" { + t.Errorf("fail to parse fid: %s", fid) + } + if sepIndex := strings.LastIndex(fid, "_"); sepIndex > 0 { + fid = fid[:sepIndex] + } + if fid != "06dfa8a684" { + t.Errorf("fail to parse fid: %s", fid) + } + } +} diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index 06589e3c6..17e32731c 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -19,9 +19,15 @@ import ( func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { - entry, err := fs.filer.FindEntry(filer2.FullPath(filepath.Join(req.Directory, req.Name))) + glog.V(4).Infof("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name)) + + entry, err := fs.filer.FindEntry(ctx, util.JoinPath(req.Directory, req.Name)) + if err == filer_pb.ErrNotFound { + return &filer_pb.LookupDirectoryEntryResponse{}, err + } if err != nil { - return nil, fmt.Errorf("%s not found under %s: %v", req.Name, req.Directory, err) + glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) + return nil, err } return &filer_pb.LookupDirectoryEntryResponse{ @@ -30,27 +36,35 @@ func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.L IsDirectory: entry.IsDirectory(), Attributes: filer2.EntryAttributeToPb(entry), Chunks: entry.Chunks, + Extended: entry.Extended, }, }, nil } -func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntriesRequest) (*filer_pb.ListEntriesResponse, error) { +func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error { + + glog.V(4).Infof("ListEntries %v", req) limit := int(req.Limit) if limit == 0 { limit = fs.option.DirListingLimit } - resp := &filer_pb.ListEntriesResponse{} + paginationLimit := filer2.PaginationSize + if limit < paginationLimit { + paginationLimit = limit + } + lastFileName := req.StartFromFileName includeLastFile := req.InclusiveStartFrom for limit > 0 { - entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(req.Directory), lastFileName, includeLastFile, 1024) + entries, err := fs.filer.ListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) + if err != nil { - return nil, err + return err } if len(entries) == 0 { - return resp, nil + return nil } includeLastFile = false @@ -65,22 +79,31 @@ func (fs *FilerServer) ListEntries(ctx context.Context, req *filer_pb.ListEntrie } } - resp.Entries = append(resp.Entries, &filer_pb.Entry{ - Name: entry.Name(), - IsDirectory: entry.IsDirectory(), - Chunks: entry.Chunks, - Attributes: filer2.EntryAttributeToPb(entry), - }) + if err := stream.Send(&filer_pb.ListEntriesResponse{ + Entry: &filer_pb.Entry{ + Name: entry.Name(), + IsDirectory: entry.IsDirectory(), + Chunks: entry.Chunks, + Attributes: filer2.EntryAttributeToPb(entry), + Extended: entry.Extended, + }, + }); err != nil { + return err + } + limit-- + if limit == 0 { + return nil + } } - if len(resp.Entries) < 1024 { + if len(entries) < paginationLimit { break } } - return resp, nil + return nil } func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVolumeRequest) (*filer_pb.LookupVolumeResponse, error) { @@ -96,7 +119,11 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol return nil, err } var locs []*filer_pb.Location - for _, loc := range fs.filer.MasterClient.GetLocations(uint32(vid)) { + locations, found := fs.filer.MasterClient.GetLocations(uint32(vid)) + if !found { + continue + } + for _, loc := range locations { locs = append(locs, &filer_pb.Location{ Url: loc.Url, PublicUrl: loc.PublicUrl, @@ -112,49 +139,60 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { - fullpath := filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)) - chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) + glog.V(4).Infof("CreateEntry %v", req) - fs.filer.DeleteChunks(garbages) + resp = &filer_pb.CreateEntryResponse{} + + chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) if req.Entry.Attributes == nil { - return nil, fmt.Errorf("can not create entry with empty attributes") + glog.V(3).Infof("CreateEntry %s: nil attributes", filepath.Join(req.Directory, req.Entry.Name)) + resp.Error = fmt.Sprintf("can not create entry with empty attributes") + return } - err = fs.filer.CreateEntry(&filer2.Entry{ - FullPath: fullpath, + createErr := fs.filer.CreateEntry(ctx, &filer2.Entry{ + FullPath: util.JoinPath(req.Directory, req.Entry.Name), Attr: filer2.PbToEntryAttribute(req.Entry.Attributes), Chunks: chunks, - }) + }, req.OExcl, req.IsFromOtherCluster) - if err == nil { + if createErr == nil { + fs.filer.DeleteChunks(garbages) + } else { + glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr) + resp.Error = createErr.Error() } - return &filer_pb.CreateEntryResponse{}, err + return } func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { - fullpath := filepath.Join(req.Directory, req.Entry.Name) - entry, err := fs.filer.FindEntry(filer2.FullPath(fullpath)) + glog.V(4).Infof("UpdateEntry %v", req) + + fullpath := util.Join(req.Directory, req.Entry.Name) + entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath)) if err != nil { return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) } // remove old chunks if not included in the new ones - unusedChunks := filer2.FindUnusedFileChunks(entry.Chunks, req.Entry.Chunks) + unusedChunks := filer2.MinusChunks(entry.Chunks, req.Entry.Chunks) chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) newEntry := &filer2.Entry{ - FullPath: filer2.FullPath(filepath.Join(req.Directory, req.Entry.Name)), + FullPath: util.JoinPath(req.Directory, req.Entry.Name), Attr: entry.Attr, + Extended: req.Entry.Extended, Chunks: chunks, } - glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v", + glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v, extended: %v => %v", fullpath, entry.Attr, len(entry.Chunks), entry.Chunks, - req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks) + req.Entry.Attributes, len(req.Entry.Chunks), req.Entry.Chunks, + entry.Extended, req.Entry.Extended) if req.Entry.Attributes != nil { if req.Entry.Attributes.Mtime != 0 { @@ -175,19 +213,62 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr return &filer_pb.UpdateEntryResponse{}, err } - if err = fs.filer.UpdateEntry(entry, newEntry); err == nil { + if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil { fs.filer.DeleteChunks(unusedChunks) fs.filer.DeleteChunks(garbages) + } else { + glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) } - fs.filer.NotifyUpdateEvent(entry, newEntry, true) + fs.filer.NotifyUpdateEvent(ctx, entry, newEntry, true, req.IsFromOtherCluster) return &filer_pb.UpdateEntryResponse{}, err } +func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendToEntryRequest) (*filer_pb.AppendToEntryResponse, error) { + + glog.V(4).Infof("AppendToEntry %v", req) + + fullpath := util.NewFullPath(req.Directory, req.EntryName) + var offset int64 = 0 + entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath)) + if err == filer_pb.ErrNotFound { + entry = &filer2.Entry{ + FullPath: fullpath, + Attr: filer2.Attr{ + Crtime: time.Now(), + Mtime: time.Now(), + Mode: os.FileMode(0644), + Uid: OS_UID, + Gid: OS_GID, + }, + } + } else { + offset = int64(filer2.TotalSize(entry.Chunks)) + } + + for _, chunk := range req.Chunks { + chunk.Offset = offset + offset += int64(chunk.Size) + } + + entry.Chunks = append(entry.Chunks, req.Chunks...) + + err = fs.filer.CreateEntry(context.Background(), entry, false, false) + + return &filer_pb.AppendToEntryResponse{}, err +} + func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { - err = fs.filer.DeleteEntryMetaAndData(filer2.FullPath(filepath.Join(req.Directory, req.Name)), req.IsRecursive, req.IsDeleteData) - return &filer_pb.DeleteEntryResponse{}, err + + glog.V(4).Infof("DeleteEntry %v", req) + + err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster) + resp = &filer_pb.DeleteEntryResponse{} + if err != nil { + resp.Error = err.Error() + } + return resp, nil } func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) { @@ -196,6 +277,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol if req.TtlSec > 0 { ttlStr = strconv.Itoa(int(req.TtlSec)) } + collection, replication, _ := fs.detectCollection(req.ParentPath, req.Collection, req.Replication) var altRequest *operation.VolumeAssignRequest @@ -206,54 +288,73 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol assignRequest := &operation.VolumeAssignRequest{ Count: uint64(req.Count), - Replication: req.Replication, - Collection: req.Collection, + Replication: replication, + Collection: collection, Ttl: ttlStr, DataCenter: dataCenter, } if dataCenter != "" { altRequest = &operation.VolumeAssignRequest{ Count: uint64(req.Count), - Replication: req.Replication, - Collection: req.Collection, + Replication: replication, + Collection: collection, Ttl: ttlStr, DataCenter: "", } } - assignResult, err := operation.Assign(fs.filer.GetMaster(), assignRequest, altRequest) + assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) if err != nil { - return nil, fmt.Errorf("assign volume: %v", err) + glog.V(3).Infof("AssignVolume: %v", err) + return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil } if assignResult.Error != "" { - return nil, fmt.Errorf("assign volume result: %v", assignResult.Error) + glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) + return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil } return &filer_pb.AssignVolumeResponse{ - FileId: assignResult.Fid, - Count: int32(assignResult.Count), - Url: assignResult.Url, - PublicUrl: assignResult.PublicUrl, - }, err + FileId: assignResult.Fid, + Count: int32(assignResult.Count), + Url: assignResult.Url, + PublicUrl: assignResult.PublicUrl, + Auth: string(assignResult.Auth), + Collection: collection, + Replication: replication, + }, nil } func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - for _, master := range fs.option.Masters { - _, err = util.Get(fmt.Sprintf("http://%s/col/delete?collection=%s", master, req.Collection)) - } + glog.V(4).Infof("DeleteCollection %v", req) + + err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ + Name: req.GetCollection(), + }) + return err + }) return &filer_pb.DeleteCollectionResponse{}, err } func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) { - input := &master_pb.StatisticsRequest{ - Replication: req.Replication, - Collection: req.Collection, - Ttl: req.Ttl, - } + var output *master_pb.StatisticsResponse + + err = fs.filer.MasterClient.WithClient(func(masterClient master_pb.SeaweedClient) error { + grpcResponse, grpcErr := masterClient.Statistics(context.Background(), &master_pb.StatisticsRequest{ + Replication: req.Replication, + Collection: req.Collection, + Ttl: req.Ttl, + }) + if grpcErr != nil { + return grpcErr + } + + output = grpcResponse + return nil + }) - output, err := operation.Statistics(fs.filer.GetMaster(), input) if err != nil { return nil, err } @@ -264,3 +365,91 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR FileCount: output.FileCount, }, nil } + +func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) { + + t := &filer_pb.GetFilerConfigurationResponse{ + Masters: fs.option.Masters, + Collection: fs.option.Collection, + Replication: fs.option.DefaultReplication, + MaxMb: uint32(fs.option.MaxMB), + DirBuckets: fs.filer.DirBucketsPath, + Cipher: fs.filer.Cipher, + } + + glog.V(4).Infof("GetFilerConfiguration: %v", t) + + return t, nil +} + +func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error { + + req, err := stream.Recv() + if err != nil { + return err + } + + clientName := fmt.Sprintf("%s:%d", req.Name, req.GrpcPort) + m := make(map[string]bool) + for _, tp := range req.Resources { + m[tp] = true + } + fs.brokersLock.Lock() + fs.brokers[clientName] = m + glog.V(0).Infof("+ broker %v", clientName) + fs.brokersLock.Unlock() + + defer func() { + fs.brokersLock.Lock() + delete(fs.brokers, clientName) + glog.V(0).Infof("- broker %v: %v", clientName, err) + fs.brokersLock.Unlock() + }() + + for { + if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil { + glog.V(0).Infof("send broker %v: %+v", clientName, err) + return err + } + // println("replied") + + if _, err := stream.Recv(); err != nil { + glog.V(0).Infof("recv broker %v: %v", clientName, err) + return err + } + // println("received") + } + +} + +func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) { + + resp = &filer_pb.LocateBrokerResponse{} + + fs.brokersLock.Lock() + defer fs.brokersLock.Unlock() + + var localBrokers []*filer_pb.LocateBrokerResponse_Resource + + for b, m := range fs.brokers { + if _, found := m[req.Resource]; found { + resp.Found = true + resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{ + { + GrpcAddresses: b, + ResourceCount: int32(len(m)), + }, + } + return + } + localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{ + GrpcAddresses: b, + ResourceCount: int32(len(m)), + }) + } + + resp.Resources = localBrokers + + return resp, nil + +} diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go new file mode 100644 index 000000000..9642fec24 --- /dev/null +++ b/weed/server/filer_grpc_server_rename.go @@ -0,0 +1,141 @@ +package weed_server + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) { + + glog.V(1).Infof("AtomicRenameEntry %v", req) + + ctx, err := fs.filer.BeginTransaction(ctx) + if err != nil { + return nil, err + } + + oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory)) + + oldEntry, err := fs.filer.FindEntry(ctx, oldParent.Child(req.OldName)) + if err != nil { + fs.filer.RollbackTransaction(ctx) + return nil, fmt.Errorf("%s/%s not found: %v", req.OldDirectory, req.OldName, err) + } + + var events MoveEvents + moveErr := fs.moveEntry(ctx, oldParent, oldEntry, util.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName, &events) + if moveErr != nil { + fs.filer.RollbackTransaction(ctx) + return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, moveErr) + } else { + if commitError := fs.filer.CommitTransaction(ctx); commitError != nil { + fs.filer.RollbackTransaction(ctx) + return nil, fmt.Errorf("%s/%s move commit error: %v", req.OldDirectory, req.OldName, commitError) + } + } + + return &filer_pb.AtomicRenameEntryResponse{}, nil +} + +func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error { + + if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events, func() error { + if entry.IsDirectory() { + if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("fail to move %s => %s: %v", oldParent.Child(entry.Name()), newParent.Child(newName), err) + } + + return nil +} + +func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents) error { + + currentDirPath := oldParent.Child(entry.Name()) + newDirPath := newParent.Child(newName) + + glog.V(1).Infof("moving folder %s => %s", currentDirPath, newDirPath) + + lastFileName := "" + includeLastFile := false + for { + + entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024) + if err != nil { + return err + } + + // println("found", len(entries), "entries under", currentDirPath) + + for _, item := range entries { + lastFileName = item.Name() + // println("processing", lastFileName) + err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name(), events) + if err != nil { + return err + } + } + if len(entries) < 1024 { + break + } + } + return nil +} + +func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer2.Entry, newParent util.FullPath, newName string, events *MoveEvents, + moveFolderSubEntries func() error) error { + + oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) + + glog.V(1).Infof("moving entry %s => %s", oldPath, newPath) + + if oldPath == newPath { + glog.V(1).Infof("skip moving entry %s => %s", oldPath, newPath) + return nil + } + + // add to new directory + newEntry := &filer2.Entry{ + FullPath: newPath, + Attr: entry.Attr, + Chunks: entry.Chunks, + } + createErr := fs.filer.CreateEntry(ctx, newEntry, false, false) + if createErr != nil { + return createErr + } + + events.newEntries = append(events.newEntries, newEntry) + + if moveFolderSubEntries != nil { + if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil { + return moveChildrenErr + } + } + + // delete old entry + deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false, false) + if deleteErr != nil { + return deleteErr + } + + events.oldEntries = append(events.oldEntries, entry) + + return nil + +} + +type MoveEvents struct { + oldEntries []*filer2.Entry + newEntries []*filer2.Entry +} diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go new file mode 100644 index 000000000..8ef75cf02 --- /dev/null +++ b/weed/server/filer_grpc_server_sub_meta.go @@ -0,0 +1,136 @@ +package weed_server + +import ( + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer) error { + + peerAddress := findClientAddress(stream.Context(), 0) + + clientName := fs.addClient(req.ClientName, peerAddress) + + defer fs.deleteClient(clientName) + + lastReadTime := time.Unix(0, req.SinceNs) + glog.V(0).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + + eachEventNotificationFn := eachEventNotificationFn(req, stream, clientName) + + eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn) + + processedTsNs, err := fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn) + if err != nil { + return fmt.Errorf("reading from persisted logs: %v", err) + } + + if processedTsNs != 0 { + lastReadTime = time.Unix(0, processedTsNs) + } + + err = fs.metaAggregator.MetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool { + fs.metaAggregator.ListenersLock.Lock() + fs.metaAggregator.ListenersCond.Wait() + fs.metaAggregator.ListenersLock.Unlock() + return true + }, eachLogEntryFn) + + return err + +} + +func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeLocalMetadataServer) error { + + peerAddress := findClientAddress(stream.Context(), 0) + + clientName := fs.addClient(req.ClientName, peerAddress) + + defer fs.deleteClient(clientName) + + lastReadTime := time.Unix(0, req.SinceNs) + glog.V(0).Infof(" %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + + eachEventNotificationFn := eachEventNotificationFn(req, stream, clientName) + + eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn) + + err := fs.filer.LocalMetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool { + fs.listenersLock.Lock() + fs.listenersCond.Wait() + fs.listenersLock.Unlock() + return true + }, eachLogEntryFn) + + return err + +} + +func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error) func(logEntry *filer_pb.LogEntry) error { + return func(logEntry *filer_pb.LogEntry) error { + event := &filer_pb.SubscribeMetadataResponse{} + if err := proto.Unmarshal(logEntry.Data, event); err != nil { + glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) + return fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) + } + + if err := eachEventNotificationFn(event.Directory, event.EventNotification, event.TsNs); err != nil { + return err + } + + return nil + } +} + +func eachEventNotificationFn(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer, clientName string) func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error { + return func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error { + + // get complete path to the file or directory + var entryName string + if eventNotification.OldEntry != nil { + entryName = eventNotification.OldEntry.Name + } else if eventNotification.NewEntry != nil { + entryName = eventNotification.NewEntry.Name + } + + fullpath := util.Join(dirPath, entryName) + + // skip on filer internal meta logs + if strings.HasPrefix(fullpath, filer2.SystemLogDir) { + return nil + } + + if !strings.HasPrefix(fullpath, req.PathPrefix) { + return nil + } + + message := &filer_pb.SubscribeMetadataResponse{ + Directory: dirPath, + EventNotification: eventNotification, + TsNs: tsNs, + } + if err := stream.Send(message); err != nil { + glog.V(0).Infof("=> client %v: %+v", clientName, err) + return err + } + return nil + } +} + +func (fs *FilerServer) addClient(clientType string, clientAddress string) (clientName string) { + clientName = clientType + "@" + clientAddress + glog.V(0).Infof("+ listener %v", clientName) + return +} + +func (fs *FilerServer) deleteClient(clientName string) { + glog.V(0).Infof("- listener %v", clientName) +} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 9d70e4dac..c6ab6ef0f 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -1,112 +1,185 @@ package weed_server import ( + "context" + "fmt" "net/http" "os" + "strings" + "sync" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/filer2" _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" + _ "github.com/chrislusf/seaweedfs/weed/filer2/etcd" _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" - _ "github.com/chrislusf/seaweedfs/weed/filer2/memdb" + _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb2" + _ "github.com/chrislusf/seaweedfs/weed/filer2/mongodb" _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" + _ "github.com/chrislusf/seaweedfs/weed/filer2/redis2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/notification" _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" + _ "github.com/chrislusf/seaweedfs/weed/notification/gocdk_pub_sub" _ "github.com/chrislusf/seaweedfs/weed/notification/google_pub_sub" _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" _ "github.com/chrislusf/seaweedfs/weed/notification/log" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" ) type FilerOption struct { Masters []string Collection string DefaultReplication string - RedirectOnRead bool DisableDirListing bool MaxMB int - SecretKey string DirListingLimit int DataCenter string DefaultLevelDbDir string + DisableHttp bool + Host string + Port uint32 + recursiveDelete bool + Cipher bool + Filers []string } type FilerServer struct { - option *FilerOption - secret security.Secret - filer *filer2.Filer + option *FilerOption + secret security.SigningKey + filer *filer2.Filer + metaAggregator *filer2.MetaAggregator + grpcDialOption grpc.DialOption + + // notifying clients + listenersLock sync.Mutex + listenersCond *sync.Cond + + brokers map[string]map[string]bool + brokersLock sync.Mutex } func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) { fs = &FilerServer{ - option: option, + option: option, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), + brokers: make(map[string]map[string]bool), } + fs.listenersCond = sync.NewCond(&fs.listenersLock) if len(option.Masters) == 0 { glog.Fatal("master list is required!") } - fs.filer = filer2.NewFiler(option.Masters) + fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, func() { + fs.listenersCond.Broadcast() + }) + fs.filer.Cipher = option.Cipher + + maybeStartMetrics(fs, option) go fs.filer.KeepConnectedToMaster() - v := viper.GetViper() - if !LoadConfiguration("filer", false) { - v.Set("leveldb.enabled", true) - v.Set("leveldb.dir", option.DefaultLevelDbDir) + v := util.GetViper() + if !util.LoadConfiguration("filer", false) { + v.Set("leveldb2.enabled", true) + v.Set("leveldb2.dir", option.DefaultLevelDbDir) _, err := os.Stat(option.DefaultLevelDbDir) if os.IsNotExist(err) { os.MkdirAll(option.DefaultLevelDbDir, 0755) } + glog.V(0).Infof("default to create filer store dir in %s", option.DefaultLevelDbDir) } - LoadConfiguration("notification", false) + util.LoadConfiguration("notification", false) + fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") + v.SetDefault("filer.options.buckets_folder", "/buckets") + fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder") + fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync") fs.filer.LoadConfiguration(v) - notification.LoadConfiguration(v.Sub("notification")) + notification.LoadConfiguration(v, "notification.") handleStaticResources(defaultMux) - defaultMux.HandleFunc("/", fs.filerHandler) + if !option.DisableHttp { + defaultMux.HandleFunc("/", fs.filerHandler) + } if defaultMux != readonlyMux { readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) } + // set peers + if strings.HasPrefix(fs.filer.GetStore().GetName(), "leveldb") && len(option.Filers) > 0 { + glog.Fatalf("filers using separate leveldb stores should not configure %d peers %+v", len(option.Filers), option.Filers) + } + if len(option.Filers) == 0 { + option.Filers = append(option.Filers, fmt.Sprintf("%s:%d", option.Host, option.Port)) + } + fs.metaAggregator = filer2.NewMetaAggregator(option.Filers, fs.grpcDialOption) + fs.metaAggregator.StartLoopSubscribe(time.Now().UnixNano()) + + fs.filer.LoadBuckets() + + grace.OnInterrupt(func() { + fs.filer.Shutdown() + }) + return fs, nil } -func (fs *FilerServer) jwt(fileId string) security.EncodedJwt { - return security.GenJwt(fs.secret, fileId) -} +func maybeStartMetrics(fs *FilerServer, option *FilerOption) { -func LoadConfiguration(configFileName string, required bool) (loaded bool) { - - // find a filer store - viper.SetConfigName(configFileName) // name of config file (without extension) - viper.AddConfigPath(".") // optionally look for config in the working directory - viper.AddConfigPath("$HOME/.seaweedfs") // call multiple times to add many search paths - viper.AddConfigPath("/etc/seaweedfs/") // path to look for the config file in - - glog.V(0).Infof("Reading %s.toml from %s", configFileName, viper.ConfigFileUsed()) - - if err := viper.MergeInConfig(); err != nil { // Handle errors reading the config file - glog.V(0).Infof("Reading %s: %v", viper.ConfigFileUsed(), err) - if required { - glog.Fatalf("Failed to load %s.toml file from current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/"+ - "\n\nPlease follow this example and add a filer.toml file to "+ - "current directory, or $HOME/.seaweedfs/, or /etc/seaweedfs/:\n"+ - " https://github.com/chrislusf/seaweedfs/blob/master/weed/%s.toml\n"+ - "\nOr use this command to generate the default toml file\n"+ - " weed scaffold -config=%s -output=.\n\n\n", - configFileName, configFileName, configFileName) - } else { - return false + for _, master := range option.Masters { + _, err := pb.ParseFilerGrpcAddress(master) + if err != nil { + glog.Fatalf("invalid master address %s: %v", master, err) } } - return true + isConnected := false + var metricsAddress string + var metricsIntervalSec int + var readErr error + for !isConnected { + for _, master := range option.Masters { + metricsAddress, metricsIntervalSec, readErr = readFilerConfiguration(fs.grpcDialOption, master) + if readErr == nil { + isConnected = true + } else { + time.Sleep(7 * time.Second) + } + } + } + if metricsAddress == "" && metricsIntervalSec <= 0 { + return + } + go stats.LoopPushingMetric("filer", stats.SourceName(option.Port), stats.FilerGather, + func() (addr string, intervalSeconds int) { + return metricsAddress, metricsIntervalSec + }) +} +func readFilerConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (metricsAddress string, metricsIntervalSec int, err error) { + err = operation.WithMasterServerClient(masterAddress, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get master %s configuration: %v", masterAddress, err) + } + metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds) + return nil + }) + return } diff --git a/weed/server/filer_server_handlers.go b/weed/server/filer_server_handlers.go index d76d7df8c..b6bfc3b04 100644 --- a/weed/server/filer_server_handlers.go +++ b/weed/server/filer_server_handlers.go @@ -2,28 +2,47 @@ package weed_server import ( "net/http" + "time" + + "github.com/chrislusf/seaweedfs/weed/stats" ) func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { + start := time.Now() switch r.Method { case "GET": + stats.FilerRequestCounter.WithLabelValues("get").Inc() fs.GetOrHeadHandler(w, r, true) + stats.FilerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) case "HEAD": + stats.FilerRequestCounter.WithLabelValues("head").Inc() fs.GetOrHeadHandler(w, r, false) + stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) case "DELETE": + stats.FilerRequestCounter.WithLabelValues("delete").Inc() fs.DeleteHandler(w, r) + stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds()) case "PUT": + stats.FilerRequestCounter.WithLabelValues("put").Inc() fs.PostHandler(w, r) + stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) case "POST": + stats.FilerRequestCounter.WithLabelValues("post").Inc() fs.PostHandler(w, r) + stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) } } func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Request) { + start := time.Now() switch r.Method { case "GET": + stats.FilerRequestCounter.WithLabelValues("get").Inc() fs.GetOrHeadHandler(w, r, true) + stats.FilerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) case "HEAD": + stats.FilerRequestCounter.WithLabelValues("head").Inc() fs.GetOrHeadHandler(w, r, false) + stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) } } diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index 226de640c..76c924df1 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -1,34 +1,47 @@ package weed_server import ( + "bytes" + "context" "io" "mime" - "mime/multipart" "net/http" - "net/url" - "path" + "path/filepath" "strconv" "strings" + "time" "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/images" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, isGetMethod bool) { + path := r.URL.Path - if strings.HasSuffix(path, "/") && len(path) > 1 { + isForDirectory := strings.HasSuffix(path, "/") + if isForDirectory && len(path) > 1 { path = path[:len(path)-1] } - entry, err := fs.filer.FindEntry(filer2.FullPath(path)) + entry, err := fs.filer.FindEntry(context.Background(), util.FullPath(path)) if err != nil { if path == "/" { fs.listDirectoryHandler(w, r) return } - glog.V(1).Infof("Not found %s: %v", path, err) - w.WriteHeader(http.StatusNotFound) + if err == filer_pb.ErrNotFound { + glog.V(1).Infof("Not found %s: %v", path, err) + stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc() + w.WriteHeader(http.StatusNotFound) + } else { + glog.V(0).Infof("Internal %s: %v", path, err) + stats.FilerRequestCounter.WithLabelValues("read.internalerror").Inc() + w.WriteHeader(http.StatusInternalServerError) + } return } @@ -41,212 +54,81 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, return } - if len(entry.Chunks) == 0 { - glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr) - w.WriteHeader(http.StatusNoContent) - return - } - - w.Header().Set("Accept-Ranges", "bytes") - if r.Method == "HEAD" { - w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) - w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) - return - } - - if len(entry.Chunks) == 1 { - fs.handleSingleChunk(w, r, entry) - return - } - - fs.handleMultipleChunks(w, r, entry) - -} - -func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { - - fileId := entry.Chunks[0].FileId - - urlString, err := fs.filer.MasterClient.LookupFileId(fileId) - if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err) + if isForDirectory { w.WriteHeader(http.StatusNotFound) return } - if fs.option.RedirectOnRead { - http.Redirect(w, r, urlString, http.StatusFound) - return - } - - u, _ := url.Parse(urlString) - q := u.Query() - for key, values := range r.URL.Query() { - for _, value := range values { - q.Add(key, value) - } - } - u.RawQuery = q.Encode() - request := &http.Request{ - Method: r.Method, - URL: u, - Proto: r.Proto, - ProtoMajor: r.ProtoMajor, - ProtoMinor: r.ProtoMinor, - Header: r.Header, - Body: r.Body, - Host: r.Host, - ContentLength: r.ContentLength, - } - glog.V(3).Infoln("retrieving from", u) - resp, do_err := util.Do(request) - if do_err != nil { - glog.V(0).Infoln("failing to connect to volume server", do_err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, do_err) + if len(entry.Chunks) == 0 { + glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr) + stats.FilerRequestCounter.WithLabelValues("read.nocontent").Inc() + w.WriteHeader(http.StatusNoContent) return } - defer resp.Body.Close() - for k, v := range resp.Header { - w.Header()[k] = v - } - w.WriteHeader(resp.StatusCode) - io.Copy(w, resp.Body) -} -func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { + w.Header().Set("Accept-Ranges", "bytes") + w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) - mimeType := entry.Mime + // mime type + mimeType := entry.Attr.Mime if mimeType == "" { - if ext := path.Ext(entry.Name()); ext != "" { + if ext := filepath.Ext(entry.Name()); ext != "" { mimeType = mime.TypeByExtension(ext) } } if mimeType != "" { w.Header().Set("Content-Type", mimeType) } - setEtag(w, filer2.ETag(entry.Chunks)) - totalSize := int64(filer2.TotalSize(entry.Chunks)) - - rangeReq := r.Header.Get("Range") - - if rangeReq == "" { - w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if err := fs.writeContent(w, entry, 0, int(totalSize)); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + // if modified since + if !entry.Attr.Mtime.IsZero() { + w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat)) + if r.Header.Get("If-Modified-Since") != "" { + if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil { + if t.After(entry.Attr.Mtime) { + w.WriteHeader(http.StatusNotModified) + return + } + } } - return } - //the rest is dealing with partial content request - //mostly copy from src/pkg/net/http/fs.go - ranges, err := parseRange(rangeReq, totalSize) - if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return - } - if sumRangesSize(ranges) > totalSize { - // The total number of bytes in all the ranges - // is larger than the size of the file by - // itself, so this is probably an attack, or a - // dumb client. Ignore the range request. - return - } - if len(ranges) == 0 { - return - } - if len(ranges) == 1 { - // RFC 2616, Section 14.16: - // "When an HTTP message includes the content of a single - // range (for example, a response to a request for a - // single range, or to a request for a set of ranges - // that overlap without any holes), this content is - // transmitted with a Content-Range header, and a - // Content-Length header showing the number of bytes - // actually transferred. - // ... - // A response to a request for a single range MUST NOT - // be sent using the multipart/byteranges media type." - ra := ranges[0] - w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) - w.Header().Set("Content-Range", ra.contentRange(totalSize)) - w.WriteHeader(http.StatusPartialContent) - - err = fs.writeContent(w, entry, ra.start, int(ra.length)) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } + // set etag + etag := filer2.ETagEntry(entry) + if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" { + w.WriteHeader(http.StatusNotModified) return } + setEtag(w, etag) - // process multiple ranges - for _, ra := range ranges { - if ra.start > totalSize { - http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return - } - } - sendSize := rangesMIMESize(ranges, mimeType, totalSize) - pr, pw := io.Pipe() - mw := multipart.NewWriter(pw) - w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) - sendContent := pr - defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. - go func() { - for _, ra := range ranges { - part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) - if e != nil { - pw.CloseWithError(e) - return - } - if e = fs.writeContent(part, entry, ra.start, int(ra.length)); e != nil { - pw.CloseWithError(e) - return - } - } - mw.Close() - pw.Close() - }() - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - w.WriteHeader(http.StatusPartialContent) - if _, err := io.CopyN(w, sendContent, sendSize); err != nil { - http.Error(w, "Internal Error", http.StatusInternalServerError) + if r.Method == "HEAD" { + w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) return } -} - -func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error { + filename := entry.Name() + adjustHeadersAfterHEAD(w, r, filename) - chunkViews := filer2.ViewFromChunks(entry.Chunks, offset, size) - - fileId2Url := make(map[string]string) - - for _, chunkView := range chunkViews { - - urlString, err := fs.filer.MasterClient.LookupFileId(chunkView.FileId) - if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) - return err - } - fileId2Url[chunkView.FileId] = urlString - } + totalSize := int64(filer2.TotalSize(entry.Chunks)) - for _, chunkView := range chunkViews { - urlString := fileId2Url[chunkView.FileId] - _, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) { - w.Write(data) - }) - if err != nil { - glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) - return err + if rangeReq := r.Header.Get("Range"); rangeReq == "" { + ext := filepath.Ext(filename) + width, height, mode, shouldResize := shouldResizeImages(ext, r) + if shouldResize { + data, err := filer2.ReadAll(fs.filer.MasterClient, entry.Chunks) + if err != nil { + glog.Errorf("failed to read %s: %v", path, err) + w.WriteHeader(http.StatusNotModified) + return + } + rs, _, _ := images.Resized(ext, bytes.NewReader(data), width, height, mode) + io.Copy(w, rs) + return } } - return nil + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + return filer2.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size) + }) } diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index bcf7f0eb5..ae28fc1db 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -1,13 +1,15 @@ package weed_server import ( + "context" "net/http" "strconv" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) // listDirectoryHandler lists directories and folers under a directory @@ -15,6 +17,9 @@ import ( // sub directories are listed on the first page, when "lastFileName" // is empty. func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Request) { + + stats.FilerRequestCounter.WithLabelValues("list").Inc() + path := r.URL.Path if strings.HasSuffix(path, "/") && len(path) > 1 { path = path[:len(path)-1] @@ -27,7 +32,7 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName := r.FormValue("lastFileName") - entries, err := fs.filer.ListDirectoryEntries(filer2.FullPath(path), lastFileName, false, limit) + entries, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, limit) if err != nil { glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 32f481e74..a642c502a 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -1,11 +1,18 @@ package weed_server import ( + "context" + "crypto/md5" "encoding/json" "errors" + "fmt" + "io" "io/ioutil" + "mime" "net/http" "net/url" + "os" + filenamePath "path" "strconv" "strings" "time" @@ -14,8 +21,10 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" - "os" ) var ( @@ -25,18 +34,23 @@ var ( type FilerPostResult struct { Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` + Size int64 `json:"size,omitempty"` Error string `json:"error,omitempty"` Fid string `json:"fid,omitempty"` Url string `json:"url,omitempty"` } -func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, err error) { +func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection, dataCenter, ttlString string, fsync bool) (fileId, urlLocation string, auth security.EncodedJwt, err error) { + + stats.FilerRequestCounter.WithLabelValues("assign").Inc() + start := time.Now() + defer func() { stats.FilerRequestHistogram.WithLabelValues("assign").Observe(time.Since(start).Seconds()) }() + ar := &operation.VolumeAssignRequest{ Count: 1, Replication: replication, Collection: collection, - Ttl: r.URL.Query().Get("ttl"), + Ttl: ttlString, DataCenter: dataCenter, } var altRequest *operation.VolumeAssignRequest @@ -45,12 +59,12 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, Count: 1, Replication: replication, Collection: collection, - Ttl: r.URL.Query().Get("ttl"), + Ttl: ttlString, DataCenter: "", } } - assignResult, ae := operation.Assign(fs.filer.GetMaster(), ar, altRequest) + assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest) if ae != nil { glog.Errorf("failing to assign a file id: %v", ae) writeJsonError(w, r, http.StatusInternalServerError, ae) @@ -59,166 +73,293 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, } fileId = assignResult.Fid urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid + if fsync { + urlLocation += "?fsync=true" + } + auth = assignResult.Auth return } func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + query := r.URL.Query() - replication := query.Get("replication") - if replication == "" { - replication = fs.option.DefaultReplication - } - collection := query.Get("collection") - if collection == "" { - collection = fs.option.Collection - } + collection, replication, fsync := fs.detectCollection(r.RequestURI, query.Get("collection"), query.Get("replication")) dataCenter := query.Get("dataCenter") if dataCenter == "" { dataCenter = fs.option.DataCenter } + ttlString := r.URL.Query().Get("ttl") + + // read ttl in seconds + ttl, err := needle.ReadTTL(ttlString) + ttlSeconds := int32(0) + if err == nil { + ttlSeconds = int32(ttl.Minutes()) * 60 + } + + if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync); autoChunked { + return + } + + if fs.option.Cipher { + reply, err := fs.encrypt(ctx, w, r, replication, collection, dataCenter, ttlSeconds, ttlString, fsync) + if err != nil { + writeJsonError(w, r, http.StatusInternalServerError, err) + } else if reply != nil { + writeJsonQuiet(w, r, http.StatusCreated, reply) + } - if autoChunked := fs.autoChunk(w, r, replication, collection, dataCenter); autoChunked { return } - fileId, urlLocation, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) + fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync) if err != nil || fileId == "" || urlLocation == "" { glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) + writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)) return } glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) u, _ := url.Parse(urlLocation) - - // This allows a client to generate a chunk manifest and submit it to the filer -- it is a little off - // because they need to provide FIDs instead of file paths... - cm, _ := strconv.ParseBool(query.Get("cm")) - if cm { - q := u.Query() - q.Set("cm", "true") - u.RawQuery = q.Encode() + ret, md5value, err := fs.uploadToVolumeServer(r, u, auth, w, fileId) + if err != nil { + return } - glog.V(4).Infoln("post to", u) - // send request to volume server - request := &http.Request{ - Method: r.Method, - URL: u, - Proto: r.Proto, - ProtoMajor: r.ProtoMajor, - ProtoMinor: r.ProtoMinor, - Header: r.Header, - Body: r.Body, - Host: r.Host, - ContentLength: r.ContentLength, - } - resp, do_err := util.Do(request) - if do_err != nil { - glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, do_err, r.Method) - writeJsonError(w, r, http.StatusInternalServerError, do_err) + if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, md5value, fileId, ttlSeconds); err != nil { return } - defer resp.Body.Close() - etag := resp.Header.Get("ETag") - resp_body, ra_err := ioutil.ReadAll(resp.Body) - if ra_err != nil { - glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, ra_err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, ra_err) - return + + // send back post result + reply := FilerPostResult{ + Name: ret.Name, + Size: int64(ret.Size), + Error: ret.Error, + Fid: fileId, + Url: urlLocation, } - glog.V(4).Infoln("post result", string(resp_body)) - var ret operation.UploadResult - unmarshal_err := json.Unmarshal(resp_body, &ret) - if unmarshal_err != nil { - glog.V(0).Infoln("failing to read upload resonse", r.RequestURI, string(resp_body)) - writeJsonError(w, r, http.StatusInternalServerError, unmarshal_err) - return + setEtag(w, ret.ETag) + writeJsonQuiet(w, r, http.StatusCreated, reply) +} + +// update metadata in filer store +func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, replication string, + collection string, ret *operation.UploadResult, md5value []byte, fileId string, ttlSeconds int32) (err error) { + + stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("postStoreWrite").Observe(time.Since(start).Seconds()) + }() + + modeStr := r.URL.Query().Get("mode") + if modeStr == "" { + modeStr = "0660" } - if ret.Error != "" { - glog.V(0).Infoln("failing to post to volume server", r.RequestURI, ret.Error) - writeJsonError(w, r, http.StatusInternalServerError, errors.New(ret.Error)) - return + mode, err := strconv.ParseUint(modeStr, 8, 32) + if err != nil { + glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) + mode = 0660 } - // find correct final path path := r.URL.Path if strings.HasSuffix(path, "/") { if ret.Name != "" { path += ret.Name - } else { - fs.filer.DeleteFileByFileId(fileId) - glog.V(0).Infoln("Can not to write to folder", path, "without a file name!") - writeJsonError(w, r, http.StatusInternalServerError, - errors.New("Can not to write to folder "+path+" without a file name")) - return } } - - // update metadata in filer store - existingEntry, err := fs.filer.FindEntry(filer2.FullPath(path)) + existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path)) crTime := time.Now() if err == nil && existingEntry != nil { - // glog.V(4).Infof("existing %s => %+v", path, existingEntry) - if existingEntry.IsDirectory() { - path += "/" + ret.Name - } else { - crTime = existingEntry.Crtime - } + crTime = existingEntry.Crtime } entry := &filer2.Entry{ - FullPath: filer2.FullPath(path), + FullPath: util.FullPath(path), Attr: filer2.Attr{ Mtime: time.Now(), Crtime: crTime, - Mode: 0660, + Mode: os.FileMode(mode), Uid: OS_UID, Gid: OS_GID, Replication: replication, Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), + TtlSec: ttlSeconds, + Mime: ret.Mime, + Md5: md5value, }, Chunks: []*filer_pb.FileChunk{{ FileId: fileId, Size: uint64(ret.Size), Mtime: time.Now().UnixNano(), - ETag: etag, + ETag: ret.ETag, }}, } + if entry.Attr.Mime == "" { + if ext := filenamePath.Ext(path); ext != "" { + entry.Attr.Mime = mime.TypeByExtension(ext) + } + } // glog.V(4).Infof("saving %s => %+v", path, entry) - if db_err := fs.filer.CreateEntry(entry); db_err != nil { - fs.filer.DeleteFileByFileId(fileId) - glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err) - writeJsonError(w, r, http.StatusInternalServerError, db_err) + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) + glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) + writeJsonError(w, r, http.StatusInternalServerError, dbErr) + err = dbErr return } - // send back post result - reply := FilerPostResult{ - Name: ret.Name, - Size: ret.Size, - Error: ret.Error, - Fid: fileId, - Url: urlLocation, + return nil +} + +// send request to volume server +func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret *operation.UploadResult, md5value []byte, err error) { + + stats.FilerRequestCounter.WithLabelValues("postUpload").Inc() + start := time.Now() + defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }() + + ret = &operation.UploadResult{} + + md5Hash := md5.New() + body := r.Body + if r.Method == "PUT" { + // only PUT or large chunked files has Md5 in attributes + body = ioutil.NopCloser(io.TeeReader(r.Body, md5Hash)) } - setEtag(w, etag) - writeJsonQuiet(w, r, http.StatusCreated, reply) + + request := &http.Request{ + Method: r.Method, + URL: u, + Proto: r.Proto, + ProtoMajor: r.ProtoMajor, + ProtoMinor: r.ProtoMinor, + Header: r.Header, + Body: body, + Host: r.Host, + ContentLength: r.ContentLength, + } + + if auth != "" { + request.Header.Set("Authorization", "BEARER "+string(auth)) + } + resp, doErr := util.Do(request) + if doErr != nil { + glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, doErr, r.Method) + writeJsonError(w, r, http.StatusInternalServerError, doErr) + err = doErr + return + } + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() + + respBody, raErr := ioutil.ReadAll(resp.Body) + if raErr != nil { + glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, raErr.Error()) + writeJsonError(w, r, http.StatusInternalServerError, raErr) + err = raErr + return + } + + glog.V(4).Infoln("post result", string(respBody)) + unmarshalErr := json.Unmarshal(respBody, &ret) + if unmarshalErr != nil { + glog.V(0).Infoln("failing to read upload resonse", r.RequestURI, string(respBody)) + writeJsonError(w, r, http.StatusInternalServerError, unmarshalErr) + err = unmarshalErr + return + } + if ret.Error != "" { + err = errors.New(ret.Error) + glog.V(0).Infoln("failing to post to volume server", r.RequestURI, ret.Error) + writeJsonError(w, r, http.StatusInternalServerError, err) + return + } + // find correct final path + path := r.URL.Path + if strings.HasSuffix(path, "/") { + if ret.Name != "" { + path += ret.Name + } else { + err = fmt.Errorf("can not to write to folder %s without a file name", path) + fs.filer.DeleteFileByFileId(fileId) + glog.V(0).Infoln("Can not to write to folder", path, "without a file name!") + writeJsonError(w, r, http.StatusInternalServerError, err) + return + } + } + // use filer calculated md5 ETag, instead of the volume server crc ETag + if r.Method == "PUT" { + md5value = md5Hash.Sum(nil) + } + ret.ETag = getEtag(resp) + return } // curl -X DELETE http://localhost:8888/path/to // curl -X DELETE http://localhost:8888/path/to?recursive=true +// curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true +// curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { isRecursive := r.FormValue("recursive") == "true" + if !isRecursive && fs.option.recursiveDelete { + if r.FormValue("recursive") != "false" { + isRecursive = true + } + } + ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true" + skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true" - err := fs.filer.DeleteEntryMetaAndData(filer2.FullPath(r.URL.Path), isRecursive, true) + objectPath := r.URL.Path + if len(r.URL.Path) > 1 && strings.HasSuffix(objectPath, "/") { + objectPath = objectPath[0 : len(objectPath)-1] + } + + err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false) if err != nil { - glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, err) + glog.V(1).Infoln("deleting", objectPath, ":", err.Error()) + httpStatus := http.StatusInternalServerError + if err == filer_pb.ErrNotFound { + httpStatus = http.StatusNotFound + } + writeJsonError(w, r, httpStatus, err) return } w.WriteHeader(http.StatusNoContent) } + +func (fs *FilerServer) detectCollection(requestURI, qCollection, qReplication string) (collection, replication string, fsync bool) { + // default + collection = fs.option.Collection + replication = fs.option.DefaultReplication + + // get default collection settings + if qCollection != "" { + collection = qCollection + } + if qReplication != "" { + replication = qReplication + } + + // required by buckets folder + if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") { + bucketAndObjectKey := requestURI[len(fs.filer.DirBucketsPath)+1:] + t := strings.Index(bucketAndObjectKey, "/") + if t < 0 { + collection = bucketAndObjectKey + } + if t > 0 { + collection = bucketAndObjectKey[:t] + } + replication, fsync = fs.filer.ReadBucketOption(collection) + } + + return +} diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 4b1745aaa..29546542c 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -1,7 +1,8 @@ package weed_server import ( - "bytes" + "context" + "crypto/md5" "io" "io/ioutil" "net/http" @@ -14,10 +15,13 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replication string, collection string, dataCenter string) bool { +func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, + replication string, collection string, dataCenter string, ttlSec int32, ttlString string, fsync bool) bool { if r.Method != "POST" { glog.V(4).Infoln("AutoChunking not supported for method", r.Method) return false @@ -53,7 +57,7 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica return false } - reply, err := fs.doAutoChunk(w, r, contentLength, chunkSize, replication, collection, dataCenter) + reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter, ttlSec, ttlString, fsync) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) } else if reply != nil { @@ -62,7 +66,14 @@ func (fs *FilerServer) autoChunk(w http.ResponseWriter, r *http.Request, replica return true } -func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { +func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, + contentLength int64, chunkSize int32, replication string, collection string, dataCenter string, ttlSec int32, ttlString string, fsync bool) (filerResult *FilerPostResult, replyerr error) { + + stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("postAutoChunk").Observe(time.Since(start).Seconds()) + }() multipartReader, multipartReaderErr := r.MultipartReader() if multipartReaderErr != nil { @@ -78,68 +89,46 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte if fileName != "" { fileName = path.Base(fileName) } + contentType := part1.Header.Get("Content-Type") var fileChunks []*filer_pb.FileChunk - totalBytesRead := int64(0) - tmpBufferSize := int32(1024 * 1024) - tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize)) - chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow - chunkBufOffset := int32(0) + md5Hash := md5.New() + var partReader = ioutil.NopCloser(io.TeeReader(part1, md5Hash)) + chunkOffset := int64(0) - writtenChunks := 0 - filerResult = &FilerPostResult{ - Name: fileName, - } + for chunkOffset < contentLength { + limitedReader := io.LimitReader(partReader, int64(chunkSize)) - for totalBytesRead < contentLength { - tmpBuffer.Reset() - bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize)) - readFully := readErr != nil && readErr == io.EOF - tmpBuf := tmpBuffer.Bytes() - bytesToCopy := tmpBuf[0:int(bytesRead)] - - copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy) - chunkBufOffset = chunkBufOffset + int32(bytesRead) - - if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) { - writtenChunks = writtenChunks + 1 - fileId, urlLocation, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) - if assignErr != nil { - return nil, assignErr - } - - // upload the chunk to the volume server - chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) - uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "application/octet-stream", fileId) - if uploadErr != nil { - return nil, uploadErr - } - - // Save to chunk manifest structure - fileChunks = append(fileChunks, - &filer_pb.FileChunk{ - FileId: fileId, - Offset: chunkOffset, - Size: uint64(chunkBufOffset), - Mtime: time.Now().UnixNano(), - }, - ) - - // reset variables for the next chunk - chunkBufOffset = 0 - chunkOffset = totalBytesRead + int64(bytesRead) + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync) + if assignErr != nil { + return nil, assignErr } - totalBytesRead = totalBytesRead + int64(bytesRead) + // upload the chunk to the volume server + uploadResult, uploadErr := fs.doUpload(urlLocation, w, r, limitedReader, fileName, contentType, nil, auth) + if uploadErr != nil { + return nil, uploadErr + } - if bytesRead == 0 || readFully { + // if last chunk exhausted the reader exactly at the border + if uploadResult.Size == 0 { break } - if readErr != nil { - return nil, readErr + // Save to chunk manifest structure + fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset)) + + glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d) of %d", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size), contentLength) + + // reset variables for the next chunk + chunkOffset = chunkOffset + int64(uploadResult.Size) + + // if last chunk was not at full chunk size, but already exhausted the reader + if int64(uploadResult.Size) < int64(chunkSize) { + break } } @@ -152,7 +141,7 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte glog.V(4).Infoln("saving", path) entry := &filer2.Entry{ - FullPath: filer2.FullPath(path), + FullPath: util.FullPath(path), Attr: filer2.Attr{ Mtime: time.Now(), Crtime: time.Now(), @@ -161,30 +150,37 @@ func (fs *FilerServer) doAutoChunk(w http.ResponseWriter, r *http.Request, conte Gid: OS_GID, Replication: replication, Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), + TtlSec: ttlSec, + Mime: contentType, + Md5: md5Hash.Sum(nil), }, Chunks: fileChunks, } - if db_err := fs.filer.CreateEntry(entry); db_err != nil { - replyerr = db_err - filerResult.Error = db_err.Error() - glog.V(0).Infof("failing to write %s to filer server : %v", path, db_err) + + filerResult = &FilerPostResult{ + Name: fileName, + Size: chunkOffset, + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) + replyerr = dbErr + filerResult.Error = dbErr.Error() + glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) return } return } -func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, chunkBuf []byte, fileName string, contentType string, fileId string) (err error) { - err = nil +func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error) { - ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) - uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, fs.jwt(fileId)) - if uploadResult != nil { - glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size) - } - if uploadError != nil { - err = uploadError - } - return + stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) + }() + + uploadResult, err, _ := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth) + return uploadResult, err } diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go new file mode 100644 index 000000000..17f35838d --- /dev/null +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -0,0 +1,90 @@ +package weed_server + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// handling single chunk POST or PUT upload +func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, + replication string, collection string, dataCenter string, ttlSeconds int32, ttlString string, fsync bool) (filerResult *FilerPostResult, err error) { + + fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter, ttlString, fsync) + + if err != nil || fileId == "" || urlLocation == "" { + return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) + } + + glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) + + // Note: encrypt(gzip(data)), encrypt data first, then gzip + + sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024 + + pu, err := needle.ParseUpload(r, sizeLimit) + uncompressedData := pu.Data + if pu.IsGzipped { + uncompressedData = pu.UncompressedData + } + if pu.MimeType == "" { + pu.MimeType = http.DetectContentType(uncompressedData) + // println("detect2 mimetype to", pu.MimeType) + } + + uploadResult, uploadError := operation.UploadData(urlLocation, pu.FileName, true, uncompressedData, false, pu.MimeType, pu.PairMap, auth) + if uploadError != nil { + return nil, fmt.Errorf("upload to volume server: %v", uploadError) + } + + // Save to chunk manifest structure + fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0)} + + // fmt.Printf("uploaded: %+v\n", uploadResult) + + path := r.URL.Path + if strings.HasSuffix(path, "/") { + if pu.FileName != "" { + path += pu.FileName + } + } + + entry := &filer2.Entry{ + FullPath: util.FullPath(path), + Attr: filer2.Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: 0660, + Uid: OS_UID, + Gid: OS_GID, + Replication: replication, + Collection: collection, + TtlSec: ttlSeconds, + Mime: pu.MimeType, + }, + Chunks: fileChunks, + } + + filerResult = &FilerPostResult{ + Name: pu.FileName, + Size: int64(pu.OriginalDataSize), + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) + err = dbErr + filerResult.Error = dbErr.Error() + return + } + + return +} diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go index d056a4b25..f21cce7d1 100644 --- a/weed/server/filer_ui/breadcrumb.go +++ b/weed/server/filer_ui/breadcrumb.go @@ -1,8 +1,9 @@ package master_ui import ( - "path/filepath" "strings" + + "github.com/chrislusf/seaweedfs/weed/util" ) type Breadcrumb struct { @@ -14,10 +15,14 @@ func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) { parts := strings.Split(fullpath, "/") for i := 0; i < len(parts); i++ { - crumbs = append(crumbs, Breadcrumb{ - Name: parts[i] + "/", - Link: "/" + filepath.Join(parts[0:i+1]...), - }) + crumb := Breadcrumb{ + Name: parts[i] + " /", + Link: "/" + util.Join(parts[0:i+1]...), + } + if !strings.HasSuffix(crumb.Link, "/") { + crumb.Link += "/" + } + crumbs = append(crumbs, crumb) } return diff --git a/weed/server/filer_ui/templates.go b/weed/server/filer_ui/templates.go index e31685ea0..e532b27e2 100644 --- a/weed/server/filer_ui/templates.go +++ b/weed/server/filer_ui/templates.go @@ -50,7 +50,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`

{{ range $entry := .Breadcrumbs }} - + {{ $entry.Name }} {{ end }} @@ -78,20 +78,19 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` {{end}} - + {{if $entry.IsDirectory}} {{else}} - {{ $entry.Mime }} + {{ $entry.Mime }}  {{end}} - + {{if $entry.IsDirectory}} {{else}} - {{ $entry.Size | humanizeBytes }} -     + {{ $entry.Size | humanizeBytes }}  {{end}} - + {{ $entry.Timestamp.Format "2006-01-02 15:04" }} @@ -162,7 +161,7 @@ function uploadFile(file, i) { var url = window.location.href var xhr = new XMLHttpRequest() var formData = new FormData() - xhr.open('POST', url, true) + xhr.open('POST', url, false) formData.append('file', file) xhr.send(formData) diff --git a/weed/server/master_grpc_server.go b/weed/server/master_grpc_server.go index 93dce59d8..1ee214deb 100644 --- a/weed/server/master_grpc_server.go +++ b/weed/server/master_grpc_server.go @@ -1,16 +1,20 @@ package weed_server import ( + "context" "fmt" "net" "strings" "time" "github.com/chrislusf/raft" + "google.golang.org/grpc/peer" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" - "google.golang.org/grpc/peer" ) func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error { @@ -20,8 +24,10 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ defer func() { if dn != nil { - glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port) + // if the volume server disconnects and reconnects quickly + // the unregister and register can race with each other t.UnRegisterDataNode(dn) + glog.V(0).Infof("unregister disconnected volume server %s:%d", dn.Ip, dn.Port) message := &master_pb.VolumeLocation{ Url: dn.Url(), @@ -30,6 +36,9 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ for _, v := range dn.GetVolumes() { message.DeletedVids = append(message.DeletedVids, uint32(v.Id)) } + for _, s := range dn.GetEcShards() { + message.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId)) + } if len(message.DeletedVids) > 0 { ms.clientChansLock.RLock() @@ -45,57 +54,109 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ for { heartbeat, err := stream.Recv() if err != nil { + if dn != nil { + glog.Warningf("SendHeartbeat.Recv server %s:%d : %v", dn.Ip, dn.Port, err) + } else { + glog.Warningf("SendHeartbeat.Recv: %v", err) + } return err } + t.Sequence.SetMax(heartbeat.MaxFileKey) + if dn == nil { - t.Sequence.SetMax(heartbeat.MaxFileKey) - if heartbeat.Ip == "" { - if pr, ok := peer.FromContext(stream.Context()); ok { - if pr.Addr != net.Addr(nil) { - heartbeat.Ip = pr.Addr.String()[0:strings.LastIndex(pr.Addr.String(), ":")] - glog.V(0).Infof("remote IP address is detected as %v", heartbeat.Ip) - } - } - } dcName, rackName := t.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack) dc := t.GetOrCreateDataCenter(dcName) rack := dc.GetOrCreateRack(rackName) dn = rack.GetOrCreateDataNode(heartbeat.Ip, int(heartbeat.Port), heartbeat.PublicUrl, - int(heartbeat.MaxVolumeCount)) + int64(heartbeat.MaxVolumeCount)) glog.V(0).Infof("added volume server %v:%d", heartbeat.GetIp(), heartbeat.GetPort()) if err := stream.Send(&master_pb.HeartbeatResponse{ - VolumeSizeLimit: uint64(ms.volumeSizeLimitMB) * 1024 * 1024, - SecretKey: string(ms.guard.SecretKey), + VolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024, + MetricsAddress: ms.option.MetricsAddress, + MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + StorageBackends: backend.ToPbStorageBackends(), }); err != nil { + glog.Warningf("SendHeartbeat.Send volume size to %s:%d %v", dn.Ip, dn.Port, err) return err } } + if heartbeat.MaxVolumeCount != 0 && dn.GetMaxVolumeCount() != int64(heartbeat.MaxVolumeCount) { + delta := int64(heartbeat.MaxVolumeCount) - dn.GetMaxVolumeCount() + dn.UpAdjustMaxVolumeCountDelta(delta) + } + + glog.V(4).Infof("master received heartbeat %s", heartbeat.String()) message := &master_pb.VolumeLocation{ Url: dn.Url(), PublicUrl: dn.PublicUrl, } - if len(heartbeat.NewVids) > 0 || len(heartbeat.DeletedVids) > 0 { + if len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 { // process delta volume ids if exists for fast volume id updates - message.NewVids = append(message.NewVids, heartbeat.NewVids...) - message.DeletedVids = append(message.DeletedVids, heartbeat.DeletedVids...) - } else { + for _, volInfo := range heartbeat.NewVolumes { + message.NewVids = append(message.NewVids, volInfo.Id) + } + for _, volInfo := range heartbeat.DeletedVolumes { + message.DeletedVids = append(message.DeletedVids, volInfo.Id) + } + // update master internal volume layouts + t.IncrementalSyncDataNodeRegistration(heartbeat.NewVolumes, heartbeat.DeletedVolumes, dn) + } + + if len(heartbeat.Volumes) > 0 || heartbeat.HasNoVolumes { // process heartbeat.Volumes newVolumes, deletedVolumes := t.SyncDataNodeRegistration(heartbeat.Volumes, dn) for _, v := range newVolumes { + glog.V(0).Infof("master see new volume %d from %s", uint32(v.Id), dn.Url()) message.NewVids = append(message.NewVids, uint32(v.Id)) } for _, v := range deletedVolumes { + glog.V(0).Infof("master see deleted volume %d from %s", uint32(v.Id), dn.Url()) message.DeletedVids = append(message.DeletedVids, uint32(v.Id)) } } + if len(heartbeat.NewEcShards) > 0 || len(heartbeat.DeletedEcShards) > 0 { + + // update master internal volume layouts + t.IncrementalSyncDataNodeEcShards(heartbeat.NewEcShards, heartbeat.DeletedEcShards, dn) + + for _, s := range heartbeat.NewEcShards { + message.NewVids = append(message.NewVids, s.Id) + } + for _, s := range heartbeat.DeletedEcShards { + if dn.HasVolumesById(needle.VolumeId(s.Id)) { + continue + } + message.DeletedVids = append(message.DeletedVids, s.Id) + } + + } + + if len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards { + glog.V(1).Infof("master recieved ec shards from %s: %+v", dn.Url(), heartbeat.EcShards) + newShards, deletedShards := t.SyncDataNodeEcShards(heartbeat.EcShards, dn) + + // broadcast the ec vid changes to master clients + for _, s := range newShards { + message.NewVids = append(message.NewVids, uint32(s.VolumeId)) + } + for _, s := range deletedShards { + if dn.HasVolumesById(s.VolumeId) { + continue + } + message.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId)) + } + + } + if len(message.NewVids) > 0 || len(message.DeletedVids) > 0 { ms.clientChansLock.RLock() - for _, ch := range ms.clientChans { + for host, ch := range ms.clientChans { + glog.V(0).Infof("master send to %s: %s", host, message.String()) ch <- message } ms.clientChansLock.RUnlock() @@ -103,12 +164,15 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ // tell the volume servers about the leader newLeader, err := t.Leader() - if err == nil { - if err := stream.Send(&master_pb.HeartbeatResponse{ - Leader: newLeader, - }); err != nil { - return err - } + if err != nil { + glog.Warningf("SendHeartbeat find leader: %v", err) + return err + } + if err := stream.Send(&master_pb.HeartbeatResponse{ + Leader: newLeader, + }); err != nil { + glog.Warningf("SendHeartbeat.Send response to to %s:%d %v", dn.Ip, dn.Port, err) + return err } } } @@ -123,38 +187,16 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ } if !ms.Topo.IsLeader() { - return raft.NotLeaderError - } - - // remember client address - ctx := stream.Context() - // fmt.Printf("FromContext %+v\n", ctx) - pr, ok := peer.FromContext(ctx) - if !ok { - glog.Error("failed to get peer from ctx") - return fmt.Errorf("failed to get peer from ctx") - } - if pr.Addr == net.Addr(nil) { - glog.Error("failed to get peer address") - return fmt.Errorf("failed to get peer address") + return ms.informNewLeader(stream) } - clientName := req.Name + pr.Addr.String() - glog.V(0).Infof("+ client %v", clientName) + peerAddress := findClientAddress(stream.Context(), req.GrpcPort) - messageChan := make(chan *master_pb.VolumeLocation) stopChan := make(chan bool) - ms.clientChansLock.Lock() - ms.clientChans[clientName] = messageChan - ms.clientChansLock.Unlock() + clientName, messageChan := ms.addClient(req.Name, peerAddress) - defer func() { - glog.V(0).Infof("- client %v", clientName) - ms.clientChansLock.Lock() - delete(ms.clientChans, clientName) - ms.clientChansLock.Unlock() - }() + defer ms.deleteClient(clientName) for _, message := range ms.Topo.ToVolumeLocations() { if err := stream.Send(message); err != nil { @@ -183,12 +225,79 @@ func (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServ } case <-ticker.C: if !ms.Topo.IsLeader() { - return raft.NotLeaderError + return ms.informNewLeader(stream) } case <-stopChan: return nil } } +} + +func (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error { + leader, err := ms.Topo.Leader() + if err != nil { + glog.Errorf("topo leader: %v", err) + return raft.NotLeaderError + } + if err := stream.Send(&master_pb.VolumeLocation{ + Leader: leader, + }); err != nil { + return err + } return nil } + +func (ms *MasterServer) addClient(clientType string, clientAddress string) (clientName string, messageChan chan *master_pb.VolumeLocation) { + clientName = clientType + "@" + clientAddress + glog.V(0).Infof("+ client %v", clientName) + + messageChan = make(chan *master_pb.VolumeLocation) + + ms.clientChansLock.Lock() + ms.clientChans[clientName] = messageChan + ms.clientChansLock.Unlock() + return +} + +func (ms *MasterServer) deleteClient(clientName string) { + glog.V(0).Infof("- client %v", clientName) + ms.clientChansLock.Lock() + delete(ms.clientChans, clientName) + ms.clientChansLock.Unlock() +} + +func findClientAddress(ctx context.Context, grpcPort uint32) string { + // fmt.Printf("FromContext %+v\n", ctx) + pr, ok := peer.FromContext(ctx) + if !ok { + glog.Error("failed to get peer from ctx") + return "" + } + if pr.Addr == net.Addr(nil) { + glog.Error("failed to get peer address") + return "" + } + if grpcPort == 0 { + return pr.Addr.String() + } + if tcpAddr, ok := pr.Addr.(*net.TCPAddr); ok { + externalIP := tcpAddr.IP + return fmt.Sprintf("%s:%d", externalIP, grpcPort) + } + return pr.Addr.String() + +} + +func (ms *MasterServer) ListMasterClients(ctx context.Context, req *master_pb.ListMasterClientsRequest) (*master_pb.ListMasterClientsResponse, error) { + resp := &master_pb.ListMasterClientsResponse{} + ms.clientChansLock.RLock() + defer ms.clientChansLock.RUnlock() + + for k := range ms.clientChans { + if strings.HasPrefix(k, req.ClientType+"@") { + resp.GrpcAddresses = append(resp.GrpcAddresses, k[len(req.ClientType)+1:]) + } + } + return resp, nil +} diff --git a/weed/server/master_grpc_server_admin.go b/weed/server/master_grpc_server_admin.go new file mode 100644 index 000000000..7e7dcb36b --- /dev/null +++ b/weed/server/master_grpc_server_admin.go @@ -0,0 +1,138 @@ +package weed_server + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +/* +How exclusive lock works? +----------- + +Shell +------ +When shell lock, + * lease an admin token (lockTime, token) + * start a goroutine to renew the admin token periodically + +When shell unlock + * stop the renewal goroutine + * sends a release lock request + +Master +------ +Master maintains: + * randomNumber + * lastLockTime +When master receives the lease/renew request from shell + If lastLockTime still fresh { + if is a renew and token is valid { + // for renew + generate the randomNumber => token + return + } + refuse + return + } else { + // for fresh lease request + generate the randomNumber => token + return + } + +When master receives the release lock request from shell + set the lastLockTime to zero + + +The volume server does not need to verify. +This makes the lock/unlock optional, similar to what golang code usually does. + +*/ + +const ( + LockDuration = 10 * time.Second +) + +type AdminLock struct { + accessSecret int64 + accessLockTime time.Time +} + +type AdminLocks struct { + locks map[string]*AdminLock + sync.RWMutex +} + +func NewAdminLocks() *AdminLocks { + return &AdminLocks{ + locks: make(map[string]*AdminLock), + } +} + +func (locks *AdminLocks) isLocked(lockName string) bool { + locks.RLock() + defer locks.RUnlock() + adminLock, found := locks.locks[lockName] + if !found { + return false + } + return adminLock.accessLockTime.Add(LockDuration).After(time.Now()) +} + +func (locks *AdminLocks) isValidToken(lockName string, ts time.Time, token int64) bool { + locks.RLock() + defer locks.RUnlock() + adminLock, found := locks.locks[lockName] + if !found { + return false + } + return adminLock.accessLockTime.Equal(ts) && adminLock.accessSecret == token +} + +func (locks *AdminLocks) generateToken(lockName string) (ts time.Time, token int64) { + locks.Lock() + defer locks.Unlock() + lock := &AdminLock{ + accessSecret: rand.Int63(), + accessLockTime: time.Now(), + } + locks.locks[lockName] = lock + return lock.accessLockTime, lock.accessSecret +} + +func (locks *AdminLocks) deleteLock(lockName string) { + locks.Lock() + defer locks.Unlock() + delete(locks.locks, lockName) +} + +func (ms *MasterServer) LeaseAdminToken(ctx context.Context, req *master_pb.LeaseAdminTokenRequest) (*master_pb.LeaseAdminTokenResponse, error) { + resp := &master_pb.LeaseAdminTokenResponse{} + + if ms.adminLocks.isLocked(req.LockName) { + if req.PreviousToken != 0 && ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) { + // for renew + ts, token := ms.adminLocks.generateToken(req.LockName) + resp.Token, resp.LockTsNs = token, ts.UnixNano() + return resp, nil + } + // refuse since still locked + return resp, fmt.Errorf("already locked") + } + // for fresh lease request + ts, token := ms.adminLocks.generateToken(req.LockName) + resp.Token, resp.LockTsNs = token, ts.UnixNano() + return resp, nil +} + +func (ms *MasterServer) ReleaseAdminToken(ctx context.Context, req *master_pb.ReleaseAdminTokenRequest) (*master_pb.ReleaseAdminTokenResponse, error) { + resp := &master_pb.ReleaseAdminTokenResponse{} + if ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) { + ms.adminLocks.deleteLock(req.LockName) + } + return resp, nil +} diff --git a/weed/server/master_grpc_server_collection.go b/weed/server/master_grpc_server_collection.go new file mode 100644 index 000000000..b92d6bcbe --- /dev/null +++ b/weed/server/master_grpc_server_collection.go @@ -0,0 +1,95 @@ +package weed_server + +import ( + "context" + + "github.com/chrislusf/raft" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" +) + +func (ms *MasterServer) CollectionList(ctx context.Context, req *master_pb.CollectionListRequest) (*master_pb.CollectionListResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.CollectionListResponse{} + collections := ms.Topo.ListCollections(req.IncludeNormalVolumes, req.IncludeEcVolumes) + for _, c := range collections { + resp.Collections = append(resp.Collections, &master_pb.Collection{ + Name: c, + }) + } + + return resp, nil +} + +func (ms *MasterServer) CollectionDelete(ctx context.Context, req *master_pb.CollectionDeleteRequest) (*master_pb.CollectionDeleteResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.CollectionDeleteResponse{} + + err := ms.doDeleteNormalCollection(req.Name) + + if err != nil { + return nil, err + } + + err = ms.doDeleteEcCollection(req.Name) + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (ms *MasterServer) doDeleteNormalCollection(collectionName string) error { + + collection, ok := ms.Topo.FindCollection(collectionName) + if !ok { + return nil + } + + for _, server := range collection.ListVolumeServers() { + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + Collection: collectionName, + }) + return deleteErr + }) + if err != nil { + return err + } + } + ms.Topo.DeleteCollection(collectionName) + + return nil +} + +func (ms *MasterServer) doDeleteEcCollection(collectionName string) error { + + listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName) + + for _, server := range listOfEcServers { + err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ + Collection: collectionName, + }) + return deleteErr + }) + if err != nil { + return err + } + } + + ms.Topo.DeleteEcCollection(collectionName) + + return nil +} diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go index ae0819d2d..282c75679 100644 --- a/weed/server/master_grpc_server_volume.go +++ b/weed/server/master_grpc_server_volume.go @@ -5,8 +5,11 @@ import ( "fmt" "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/topology" ) @@ -48,25 +51,26 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest } if req.Replication == "" { - req.Replication = ms.defaultReplicaPlacement + req.Replication = ms.option.DefaultReplicaPlacement } - replicaPlacement, err := storage.NewReplicaPlacementFromString(req.Replication) + replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication) if err != nil { return nil, err } - ttl, err := storage.ReadTTL(req.Ttl) + ttl, err := needle.ReadTTL(req.Ttl) if err != nil { return nil, err } option := &topology.VolumeGrowOption{ - Collection: req.Collection, - ReplicaPlacement: replicaPlacement, - Ttl: ttl, - Prealloacte: ms.preallocate, - DataCenter: req.DataCenter, - Rack: req.Rack, - DataNode: req.DataNode, + Collection: req.Collection, + ReplicaPlacement: replicaPlacement, + Ttl: ttl, + Prealloacte: ms.preallocateSize, + DataCenter: req.DataCenter, + Rack: req.Rack, + DataNode: req.DataNode, + MemoryMapMaxSizeMb: req.MemoryMapMaxSizeMb, } if !ms.Topo.HasWritableVolume(option) { @@ -75,7 +79,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest } ms.vgLock.Lock() if !ms.Topo.HasWritableVolume(option) { - if _, err = ms.vg.AutomaticGrowByType(option, ms.Topo); err != nil { + if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo, int(req.WritableVolumeCount)); err != nil { ms.vgLock.Unlock() return nil, fmt.Errorf("Cannot grow volume group! %v", err) } @@ -92,6 +96,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest Url: dn.Url(), PublicUrl: dn.PublicUrl, Count: count, + Auth: string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)), }, nil } @@ -102,13 +107,13 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic } if req.Replication == "" { - req.Replication = ms.defaultReplicaPlacement + req.Replication = ms.option.DefaultReplicaPlacement } - replicaPlacement, err := storage.NewReplicaPlacementFromString(req.Replication) + replicaPlacement, err := super_block.NewReplicaPlacementFromString(req.Replication) if err != nil { return nil, err } - ttl, err := storage.ReadTTL(req.Ttl) + ttl, err := needle.ReadTTL(req.Ttl) if err != nil { return nil, err } @@ -116,11 +121,70 @@ func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.Statistic volumeLayout := ms.Topo.GetVolumeLayout(req.Collection, replicaPlacement, ttl) stats := volumeLayout.Stats() + totalSize := ms.Topo.GetMaxVolumeCount() * int64(ms.option.VolumeSizeLimitMB) * 1024 * 1024 + resp := &master_pb.StatisticsResponse{ - TotalSize: stats.TotalSize, + TotalSize: uint64(totalSize), UsedSize: stats.UsedSize, FileCount: stats.FileCount, } return resp, nil } + +func (ms *MasterServer) VolumeList(ctx context.Context, req *master_pb.VolumeListRequest) (*master_pb.VolumeListResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.VolumeListResponse{ + TopologyInfo: ms.Topo.ToTopologyInfo(), + VolumeSizeLimitMb: uint64(ms.option.VolumeSizeLimitMB), + } + + return resp, nil +} + +func (ms *MasterServer) LookupEcVolume(ctx context.Context, req *master_pb.LookupEcVolumeRequest) (*master_pb.LookupEcVolumeResponse, error) { + + if !ms.Topo.IsLeader() { + return nil, raft.NotLeaderError + } + + resp := &master_pb.LookupEcVolumeResponse{} + + ecLocations, found := ms.Topo.LookupEcShards(needle.VolumeId(req.VolumeId)) + + if !found { + return resp, fmt.Errorf("ec volume %d not found", req.VolumeId) + } + + resp.VolumeId = req.VolumeId + + for shardId, shardLocations := range ecLocations.Locations { + var locations []*master_pb.Location + for _, dn := range shardLocations { + locations = append(locations, &master_pb.Location{ + Url: string(dn.Id()), + PublicUrl: dn.PublicUrl, + }) + } + resp.ShardIdLocations = append(resp.ShardIdLocations, &master_pb.LookupEcVolumeResponse_EcShardIdLocation{ + ShardId: uint32(shardId), + Locations: locations, + }) + } + + return resp, nil +} + +func (ms *MasterServer) GetMasterConfiguration(ctx context.Context, req *master_pb.GetMasterConfigurationRequest) (*master_pb.GetMasterConfigurationResponse, error) { + + resp := &master_pb.GetMasterConfigurationResponse{ + MetricsAddress: ms.option.MetricsAddress, + MetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec), + } + + return resp, nil +} diff --git a/weed/server/master_server.go b/weed/server/master_server.go index f22925e56..9a490bb1f 100644 --- a/weed/server/master_server.go +++ b/weed/server/master_server.go @@ -5,27 +5,52 @@ import ( "net/http" "net/http/httputil" "net/url" + "os" + "regexp" + "strconv" + "strings" "sync" + "time" "github.com/chrislusf/raft" + "github.com/gorilla/mux" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/sequence" + "github.com/chrislusf/seaweedfs/weed/shell" "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/wdclient" ) +const ( + SequencerType = "master.sequencer.type" + SequencerEtcdUrls = "master.sequencer.sequencer_etcd_urls" +) + +type MasterOption struct { + Host string + Port int + MetaFolder string + VolumeSizeLimitMB uint + VolumePreallocate bool + // PulseSeconds int + DefaultReplicaPlacement string + GarbageThreshold float64 + WhiteList []string + DisableHttp bool + MetricsAddress string + MetricsIntervalSec int +} + type MasterServer struct { - port int - metaFolder string - volumeSizeLimitMB uint - preallocate int64 - pulseSeconds int - defaultReplicaPlacement string - garbageThreshold float64 - guard *security.Guard + option *MasterOption + guard *security.Guard + + preallocateSize int64 Topo *topology.Topology vg *topology.VolumeGrowth @@ -36,56 +61,77 @@ type MasterServer struct { // notifying clients clientChansLock sync.RWMutex clientChans map[string]chan *master_pb.VolumeLocation + + grpcDialOption grpc.DialOption + + MasterClient *wdclient.MasterClient + + adminLocks *AdminLocks } -func NewMasterServer(r *mux.Router, port int, metaFolder string, - volumeSizeLimitMB uint, - preallocate bool, - pulseSeconds int, - defaultReplicaPlacement string, - garbageThreshold float64, - whiteList []string, - secureKey string, -) *MasterServer { +func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *MasterServer { + + v := util.GetViper() + signingKey := v.GetString("jwt.signing.key") + v.SetDefault("jwt.signing.expires_after_seconds", 10) + expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") + + readSigningKey := v.GetString("jwt.signing.read.key") + v.SetDefault("jwt.signing.read.expires_after_seconds", 60) + readExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds") + + v.SetDefault("master.replication.treat_replication_as_minimums", false) + replicationAsMin := v.GetBool("master.replication.treat_replication_as_minimums") var preallocateSize int64 - if preallocate { - preallocateSize = int64(volumeSizeLimitMB) * (1 << 20) + if option.VolumePreallocate { + preallocateSize = int64(option.VolumeSizeLimitMB) * (1 << 20) } + + grpcDialOption := security.LoadClientTLS(v, "grpc.master") ms := &MasterServer{ - port: port, - volumeSizeLimitMB: volumeSizeLimitMB, - preallocate: preallocateSize, - pulseSeconds: pulseSeconds, - defaultReplicaPlacement: defaultReplicaPlacement, - garbageThreshold: garbageThreshold, - clientChans: make(map[string]chan *master_pb.VolumeLocation), + option: option, + preallocateSize: preallocateSize, + clientChans: make(map[string]chan *master_pb.VolumeLocation), + grpcDialOption: grpcDialOption, + MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, peers), + adminLocks: NewAdminLocks(), } ms.bounedLeaderChan = make(chan int, 16) - seq := sequence.NewMemorySequencer() - ms.Topo = topology.NewTopology("topo", seq, uint64(volumeSizeLimitMB)*1024*1024, pulseSeconds) + + seq := ms.createSequencer(option) + if nil == seq { + glog.Fatalf("create sequencer failed.") + } + ms.Topo = topology.NewTopology("topo", seq, uint64(ms.option.VolumeSizeLimitMB)*1024*1024, 5, replicationAsMin) ms.vg = topology.NewDefaultVolumeGrowth() - glog.V(0).Infoln("Volume Size Limit is", volumeSizeLimitMB, "MB") - - ms.guard = security.NewGuard(whiteList, secureKey) - - handleStaticResources2(r) - r.HandleFunc("/", ms.uiStatusHandler) - r.HandleFunc("/ui/index.html", ms.uiStatusHandler) - r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) - r.HandleFunc("/dir/lookup", ms.proxyToLeader(ms.guard.WhiteList(ms.dirLookupHandler))) - r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler))) - r.HandleFunc("/col/delete", ms.proxyToLeader(ms.guard.WhiteList(ms.collectionDeleteHandler))) - r.HandleFunc("/vol/grow", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeGrowHandler))) - r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler))) - r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler))) - r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler)) - r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) - r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) - r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) - r.HandleFunc("/{fileId}", ms.proxyToLeader(ms.redirectHandler)) - - ms.Topo.StartRefreshWritableVolumes(garbageThreshold, ms.preallocate) + glog.V(0).Infoln("Volume Size Limit is", ms.option.VolumeSizeLimitMB, "MB") + + ms.guard = security.NewGuard(ms.option.WhiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec) + + if !ms.option.DisableHttp { + handleStaticResources2(r) + r.HandleFunc("/", ms.proxyToLeader(ms.uiStatusHandler)) + r.HandleFunc("/ui/index.html", ms.uiStatusHandler) + r.HandleFunc("/dir/assign", ms.proxyToLeader(ms.guard.WhiteList(ms.dirAssignHandler))) + r.HandleFunc("/dir/lookup", ms.guard.WhiteList(ms.dirLookupHandler)) + r.HandleFunc("/dir/status", ms.proxyToLeader(ms.guard.WhiteList(ms.dirStatusHandler))) + r.HandleFunc("/col/delete", ms.proxyToLeader(ms.guard.WhiteList(ms.collectionDeleteHandler))) + r.HandleFunc("/vol/grow", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeGrowHandler))) + r.HandleFunc("/vol/status", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeStatusHandler))) + r.HandleFunc("/vol/vacuum", ms.proxyToLeader(ms.guard.WhiteList(ms.volumeVacuumHandler))) + r.HandleFunc("/submit", ms.guard.WhiteList(ms.submitFromMasterServerHandler)) + /* + r.HandleFunc("/stats/health", ms.guard.WhiteList(statsHealthHandler)) + r.HandleFunc("/stats/counter", ms.guard.WhiteList(statsCounterHandler)) + r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) + */ + r.HandleFunc("/{fileId}", ms.redirectHandler) + } + + ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOption, ms.option.GarbageThreshold, ms.preallocateSize) + + ms.startAdminScripts() return ms } @@ -98,6 +144,9 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) { glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", ms.Topo.RaftServer.Leader(), "becomes leader.") } }) + ms.Topo.RaftServer.AddEventListener(raft.StateChangeEventType, func(e raft.Event) { + glog.V(0).Infof("state change: %+v", e) + }) if ms.Topo.IsLeader() { glog.V(0).Infoln("[", ms.Topo.RaftServer.Name(), "]", "I am the leader!") } else { @@ -107,7 +156,7 @@ func (ms *MasterServer) SetRaftServer(raftServer *RaftServer) { } } -func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { +func (ms *MasterServer) proxyToLeader(f http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if ms.Topo.IsLeader() { f(w, r) @@ -133,8 +182,107 @@ func (ms *MasterServer) proxyToLeader(f func(w http.ResponseWriter, r *http.Requ proxy.Transport = util.Transport proxy.ServeHTTP(w, r) } else { - //drop it to the floor - //writeJsonError(w, r, errors.New(ms.Topo.RaftServer.Name()+" does not know Leader yet:"+ms.Topo.RaftServer.Leader())) + // drop it to the floor + // writeJsonError(w, r, errors.New(ms.Topo.RaftServer.Name()+" does not know Leader yet:"+ms.Topo.RaftServer.Leader())) + } + } +} + +func (ms *MasterServer) startAdminScripts() { + var err error + + v := util.GetViper() + adminScripts := v.GetString("master.maintenance.scripts") + glog.V(0).Infof("adminScripts:\n%v", adminScripts) + if adminScripts == "" { + return + } + + v.SetDefault("master.maintenance.sleep_minutes", 17) + sleepMinutes := v.GetInt("master.maintenance.sleep_minutes") + + v.SetDefault("master.filer.default", "localhost:8888") + filerHostPort := v.GetString("master.filer.default") + + scriptLines := strings.Split(adminScripts, "\n") + if !strings.Contains(adminScripts, "lock") { + scriptLines = append(append([]string{}, "lock"), scriptLines...) + scriptLines = append(scriptLines, "unlock") + } + + masterAddress := "localhost:" + strconv.Itoa(ms.option.Port) + + var shellOptions shell.ShellOptions + shellOptions.GrpcDialOption = security.LoadClientTLS(v, "grpc.master") + shellOptions.Masters = &masterAddress + + shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(filerHostPort) + shellOptions.Directory = "/" + if err != nil { + glog.V(0).Infof("failed to parse master.filer.default = %s : %v\n", filerHostPort, err) + return + } + + commandEnv := shell.NewCommandEnv(shellOptions) + + reg, _ := regexp.Compile(`'.*?'|".*?"|\S+`) + + go commandEnv.MasterClient.KeepConnectedToMaster() + + go func() { + commandEnv.MasterClient.WaitUntilConnected() + + c := time.Tick(time.Duration(sleepMinutes) * time.Minute) + for range c { + if ms.Topo.IsLeader() { + for _, line := range scriptLines { + for _, c := range strings.Split(line, ";") { + processEachCmd(reg, c, commandEnv) + } + } + } + } + }() +} + +func processEachCmd(reg *regexp.Regexp, line string, commandEnv *shell.CommandEnv) { + cmds := reg.FindAllString(line, -1) + if len(cmds) == 0 { + return + } + args := make([]string, len(cmds[1:])) + for i := range args { + args[i] = strings.Trim(string(cmds[1+i]), "\"'") + } + cmd := strings.ToLower(cmds[0]) + + for _, c := range shell.Commands { + if c.Name() == cmd { + glog.V(0).Infof("executing: %s %v", cmd, args) + if err := c.Do(args, commandEnv, os.Stdout); err != nil { + glog.V(0).Infof("error: %v", err) + } + } + } +} + +func (ms *MasterServer) createSequencer(option *MasterOption) sequence.Sequencer { + var seq sequence.Sequencer + v := util.GetViper() + seqType := strings.ToLower(v.GetString(SequencerType)) + glog.V(1).Infof("[%s] : [%s]", SequencerType, seqType) + switch strings.ToLower(seqType) { + case "etcd": + var err error + urls := v.GetString(SequencerEtcdUrls) + glog.V(0).Infof("[%s] : [%s]", SequencerEtcdUrls, urls) + seq, err = sequence.NewEtcdSequencer(urls, option.MetaFolder) + if err != nil { + glog.Error(err) + seq = nil } + default: + seq = sequence.NewMemorySequencer() } + return seq } diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go index a797dddfc..ebcb7efd2 100644 --- a/weed/server/master_server_handlers.go +++ b/weed/server/master_server_handlers.go @@ -7,8 +7,9 @@ import ( "strings" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func (ms *MasterServer) lookupVolumeId(vids []string, collection string) (volumeLocations map[string]operation.LookupResult) { @@ -21,43 +22,77 @@ func (ms *MasterServer) lookupVolumeId(vids []string, collection string) (volume if _, ok := volumeLocations[vid]; ok { continue } - volumeId, err := storage.NewVolumeId(vid) - if err == nil { - machines := ms.Topo.Lookup(collection, volumeId) - if machines != nil { - var ret []operation.Location - for _, dn := range machines { - ret = append(ret, operation.Location{Url: dn.Url(), PublicUrl: dn.PublicUrl}) - } - volumeLocations[vid] = operation.LookupResult{VolumeId: vid, Locations: ret} - } else { - volumeLocations[vid] = operation.LookupResult{VolumeId: vid, Error: fmt.Sprintf("volumeId %s not found.", vid)} - } - } else { - volumeLocations[vid] = operation.LookupResult{VolumeId: vid, Error: fmt.Sprintf("Unknown volumeId format: %s", vid)} - } + volumeLocations[vid] = ms.findVolumeLocation(collection, vid) } return } -// Takes one volumeId only, can not do batch lookup +// If "fileId" is provided, this returns the fileId location and a JWT to update or delete the file. +// If "volumeId" is provided, this only returns the volumeId location func (ms *MasterServer) dirLookupHandler(w http.ResponseWriter, r *http.Request) { vid := r.FormValue("volumeId") - commaSep := strings.Index(vid, ",") - if commaSep > 0 { - vid = vid[0:commaSep] + if vid != "" { + // backward compatible + commaSep := strings.Index(vid, ",") + if commaSep > 0 { + vid = vid[0:commaSep] + } } - vids := []string{vid} - collection := r.FormValue("collection") //optional, but can be faster if too many collections - volumeLocations := ms.lookupVolumeId(vids, collection) - location := volumeLocations[vid] + fileId := r.FormValue("fileId") + if fileId != "" { + commaSep := strings.Index(fileId, ",") + if commaSep > 0 { + vid = fileId[0:commaSep] + } + } + collection := r.FormValue("collection") // optional, but can be faster if too many collections + location := ms.findVolumeLocation(collection, vid) httpStatus := http.StatusOK - if location.Error != "" { + if location.Error != "" || location.Locations == nil { httpStatus = http.StatusNotFound + } else { + forRead := r.FormValue("read") + isRead := forRead == "yes" + ms.maybeAddJwtAuthorization(w, fileId, !isRead) } writeJsonQuiet(w, r, httpStatus, location) } +// findVolumeLocation finds the volume location from master topo if it is leader, +// or from master client if not leader +func (ms *MasterServer) findVolumeLocation(collection, vid string) operation.LookupResult { + var locations []operation.Location + var err error + if ms.Topo.IsLeader() { + volumeId, newVolumeIdErr := needle.NewVolumeId(vid) + if newVolumeIdErr != nil { + err = fmt.Errorf("Unknown volume id %s", vid) + } else { + machines := ms.Topo.Lookup(collection, volumeId) + for _, loc := range machines { + locations = append(locations, operation.Location{Url: loc.Url(), PublicUrl: loc.PublicUrl}) + } + } + } else { + machines, getVidLocationsErr := ms.MasterClient.GetVidLocations(vid) + for _, loc := range machines { + locations = append(locations, operation.Location{Url: loc.Url, PublicUrl: loc.PublicUrl}) + } + err = getVidLocationsErr + } + if len(locations) == 0 && err == nil { + err = fmt.Errorf("volume id %s not found", vid) + } + ret := operation.LookupResult{ + VolumeId: vid, + Locations: locations, + } + if err != nil { + ret.Error = err.Error() + } + return ret +} + func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) { stats.AssignRequest() requestedCount, e := strconv.ParseUint(r.FormValue("count"), 10, 64) @@ -65,6 +100,11 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) requestedCount = 1 } + writableVolumeCount, e := strconv.Atoi(r.FormValue("writableVolumeCount")) + if e != nil { + writableVolumeCount = 0 + } + option, err := ms.getVolumeGrowOption(r) if err != nil { writeJsonQuiet(w, r, http.StatusNotAcceptable, operation.AssignResult{Error: err.Error()}) @@ -79,7 +119,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) ms.vgLock.Lock() defer ms.vgLock.Unlock() if !ms.Topo.HasWritableVolume(option) { - if _, err = ms.vg.AutomaticGrowByType(option, ms.Topo); err != nil { + if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo, writableVolumeCount); err != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Cannot grow volume group! %v", err)) return @@ -88,8 +128,23 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request) } fid, count, dn, err := ms.Topo.PickForWrite(requestedCount, option) if err == nil { + ms.maybeAddJwtAuthorization(w, fid, true) writeJsonQuiet(w, r, http.StatusOK, operation.AssignResult{Fid: fid, Url: dn.Url(), PublicUrl: dn.PublicUrl, Count: count}) } else { writeJsonQuiet(w, r, http.StatusNotAcceptable, operation.AssignResult{Error: err.Error()}) } } + +func (ms *MasterServer) maybeAddJwtAuthorization(w http.ResponseWriter, fileId string, isWrite bool) { + var encodedJwt security.EncodedJwt + if isWrite { + encodedJwt = security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fileId) + } else { + encodedJwt = security.GenJwt(ms.guard.ReadSigningKey, ms.guard.ReadExpiresAfterSec, fileId) + } + if encodedJwt == "" { + return + } + + w.Header().Set("Authorization", "BEARER "+string(encodedJwt)) +} diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go index 3a2662908..7595c0171 100644 --- a/weed/server/master_server_handlers_admin.go +++ b/weed/server/master_server_handlers_admin.go @@ -2,33 +2,31 @@ package weed_server import ( "context" - "errors" "fmt" "math/rand" "net/http" "strconv" - "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/util" ) func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.Request) { - collection, ok := ms.Topo.FindCollection(r.FormValue("collection")) + collectionName := r.FormValue("collection") + collection, ok := ms.Topo.FindCollection(collectionName) if !ok { - writeJsonError(w, r, http.StatusBadRequest, fmt.Errorf("collection %s does not exist", r.FormValue("collection"))) + writeJsonError(w, r, http.StatusBadRequest, fmt.Errorf("collection %s does not exist", collectionName)) return } for _, server := range collection.ListVolumeServers() { - err := operation.WithVolumeServerClient(server.Url(), func(client volume_server_pb.VolumeServerClient) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) - defer cancel() - - _, deleteErr := client.DeleteCollection(ctx, &volume_server_pb.DeleteCollectionRequest{ + err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ Collection: collection.Name, }) return deleteErr @@ -38,29 +36,33 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R return } } - ms.Topo.DeleteCollection(r.FormValue("collection")) + ms.Topo.DeleteCollection(collectionName) + + w.WriteHeader(http.StatusNoContent) + return } func (ms *MasterServer) dirStatusHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() m["Topology"] = ms.Topo.ToMap() writeJsonQuiet(w, r, http.StatusOK, m) } func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Request) { gcString := r.FormValue("garbageThreshold") - gcThreshold := ms.garbageThreshold + gcThreshold := ms.option.GarbageThreshold if gcString != "" { var err error gcThreshold, err = strconv.ParseFloat(gcString, 32) if err != nil { glog.V(0).Infof("garbageThreshold %s is not a valid float number: %v", gcString, err) + writeJsonError(w, r, http.StatusNotAcceptable, fmt.Errorf("garbageThreshold %s is not a valid float number", gcString)) return } } - glog.Infoln("garbageThreshold =", gcThreshold) - ms.Topo.Vacuum(gcThreshold, ms.preallocate) + // glog.Infoln("garbageThreshold =", gcThreshold) + ms.Topo.Vacuum(ms.grpcDialOption, gcThreshold, ms.preallocateSize) ms.dirStatusHandler(w, r) } @@ -71,17 +73,17 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request writeJsonError(w, r, http.StatusNotAcceptable, err) return } - if err == nil { - if count, err = strconv.Atoi(r.FormValue("count")); err == nil { - if ms.Topo.FreeSpace() < count*option.ReplicaPlacement.GetCopyCount() { - err = errors.New("Only " + strconv.Itoa(ms.Topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*option.ReplicaPlacement.GetCopyCount())) - } else { - count, err = ms.vg.GrowByCountAndType(count, option, ms.Topo) - } + + if count, err = strconv.Atoi(r.FormValue("count")); err == nil { + if ms.Topo.FreeSpace() < int64(count*option.ReplicaPlacement.GetCopyCount()) { + err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.FreeSpace(), count*option.ReplicaPlacement.GetCopyCount()) } else { - err = errors.New("parameter count is not found") + count, err = ms.vg.GrowByCountAndType(ms.grpcDialOption, count, option, ms.Topo) } + } else { + err = fmt.Errorf("can not parse parameter count %s", r.FormValue("count")) } + if err != nil { writeJsonError(w, r, http.StatusNotAcceptable, err) } else { @@ -91,30 +93,26 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request func (ms *MasterServer) volumeStatusHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() m["Volumes"] = ms.Topo.ToVolumeMap() writeJsonQuiet(w, r, http.StatusOK, m) } func (ms *MasterServer) redirectHandler(w http.ResponseWriter, r *http.Request) { vid, _, _, _, _ := parseURLPath(r.URL.Path) - volumeId, err := storage.NewVolumeId(vid) - if err != nil { - debug("parsing error:", err, r.URL.Path) - return - } collection := r.FormValue("collection") - machines := ms.Topo.Lookup(collection, volumeId) - if machines != nil && len(machines) > 0 { + location := ms.findVolumeLocation(collection, vid) + if location.Error == "" { + loc := location.Locations[rand.Intn(len(location.Locations))] var url string if r.URL.RawQuery != "" { - url = util.NormalizeUrl(machines[rand.Intn(len(machines))].PublicUrl) + r.URL.Path + "?" + r.URL.RawQuery + url = util.NormalizeUrl(loc.PublicUrl) + r.URL.Path + "?" + r.URL.RawQuery } else { - url = util.NormalizeUrl(machines[rand.Intn(len(machines))].PublicUrl) + r.URL.Path + url = util.NormalizeUrl(loc.PublicUrl) + r.URL.Path } http.Redirect(w, r, url, http.StatusMovedPermanently) } else { - writeJsonError(w, r, http.StatusNotFound, fmt.Errorf("volume id %d or collection %s not found", volumeId, collection)) + writeJsonError(w, r, http.StatusNotFound, fmt.Errorf("volume id %s not found: %s", vid, location.Error)) } } @@ -122,17 +120,17 @@ func (ms *MasterServer) selfUrl(r *http.Request) string { if r.Host != "" { return r.Host } - return "localhost:" + strconv.Itoa(ms.port) + return "localhost:" + strconv.Itoa(ms.option.Port) } func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) { if ms.Topo.IsLeader() { - submitForClientHandler(w, r, ms.selfUrl(r)) + submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOption) } else { masterUrl, err := ms.Topo.Leader() if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) } else { - submitForClientHandler(w, r, masterUrl) + submitForClientHandler(w, r, masterUrl, ms.grpcDialOption) } } } @@ -145,17 +143,22 @@ func (ms *MasterServer) HasWritableVolume(option *topology.VolumeGrowOption) boo func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGrowOption, error) { replicationString := r.FormValue("replication") if replicationString == "" { - replicationString = ms.defaultReplicaPlacement + replicationString = ms.option.DefaultReplicaPlacement + } + replicaPlacement, err := super_block.NewReplicaPlacementFromString(replicationString) + if err != nil { + return nil, err } - replicaPlacement, err := storage.NewReplicaPlacementFromString(replicationString) + ttl, err := needle.ReadTTL(r.FormValue("ttl")) if err != nil { return nil, err } - ttl, err := storage.ReadTTL(r.FormValue("ttl")) + memoryMapMaxSizeMb, err := memory_map.ReadMemoryMapMaxSizeMb(r.FormValue("memoryMapMaxSizeMb")) if err != nil { return nil, err } - preallocate := ms.preallocate + + preallocate := ms.preallocateSize if r.FormValue("preallocate") != "" { preallocate, err = strconv.ParseInt(r.FormValue("preallocate"), 10, 64) if err != nil { @@ -163,13 +166,14 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr } } volumeGrowOption := &topology.VolumeGrowOption{ - Collection: r.FormValue("collection"), - ReplicaPlacement: replicaPlacement, - Ttl: ttl, - Prealloacte: preallocate, - DataCenter: r.FormValue("dataCenter"), - Rack: r.FormValue("rack"), - DataNode: r.FormValue("dataNode"), + Collection: r.FormValue("collection"), + ReplicaPlacement: replicaPlacement, + Ttl: ttl, + Prealloacte: preallocate, + DataCenter: r.FormValue("dataCenter"), + Rack: r.FormValue("rack"), + DataNode: r.FormValue("dataNode"), + MemoryMapMaxSizeMb: memoryMapMaxSizeMb, } return volumeGrowOption, nil } diff --git a/weed/server/master_server_handlers_ui.go b/weed/server/master_server_handlers_ui.go index f241df87f..9cd58158b 100644 --- a/weed/server/master_server_handlers_ui.go +++ b/weed/server/master_server_handlers_ui.go @@ -2,6 +2,7 @@ package weed_server import ( "net/http" + "time" "github.com/chrislusf/raft" ui "github.com/chrislusf/seaweedfs/weed/server/master_ui" @@ -11,7 +12,7 @@ import ( func (ms *MasterServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) { infos := make(map[string]interface{}) - infos["Version"] = util.VERSION + infos["Up Time"] = time.Now().Sub(startTime).String() args := struct { Version string Topology interface{} @@ -19,7 +20,7 @@ func (ms *MasterServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) Stats map[string]interface{} Counters *stats.ServerStats }{ - util.VERSION, + util.Version(), ms.Topo.ToMap(), ms.Topo.RaftServer, infos, diff --git a/weed/server/master_ui/templates.go b/weed/server/master_ui/templates.go index f32e8e61b..7189064d0 100644 --- a/weed/server/master_ui/templates.go +++ b/weed/server/master_ui/templates.go @@ -41,7 +41,7 @@ var StatusTpl = template.Must(template.New("status").Parse(` @@ -76,6 +76,8 @@ var StatusTpl = template.Must(template.New("status").Parse(` Rack RemoteAddr #Volumes + Volume Ids + #ErasureCodingShards Max @@ -88,6 +90,8 @@ var StatusTpl = template.Must(template.New("status").Parse(` {{ $rack.Id }} {{ $dn.Url }} {{ $dn.Volumes }} + {{ $dn.VolumeIds}} + {{ $dn.EcShards }} {{ $dn.Max }} {{ end }} diff --git a/weed/server/raft_server.go b/weed/server/raft_server.go index 2cc8252b8..958680d2b 100644 --- a/weed/server/raft_server.go +++ b/weed/server/raft_server.go @@ -3,36 +3,37 @@ package weed_server import ( "encoding/json" "io/ioutil" - "math/rand" "os" "path" "reflect" "sort" - "strings" "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/raft" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/topology" - "github.com/gorilla/mux" ) type RaftServer struct { peers []string // initial peers to join with raftServer raft.Server dataDir string - httpAddr string - router *mux.Router + serverAddr string topo *topology.Topology + *raft.GrpcServer } -func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer { +func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer { s := &RaftServer{ - peers: peers, - httpAddr: httpAddr, - dataDir: dataDir, - router: r, - topo: topo, + peers: peers, + serverAddr: serverAddr, + dataDir: dataDir, + topo: topo, } if glog.V(4) { @@ -42,41 +43,40 @@ func NewRaftServer(r *mux.Router, peers []string, httpAddr string, dataDir strin raft.RegisterCommand(&topology.MaxVolumeIdCommand{}) var err error - transporter := raft.NewHTTPTransporter("/cluster", time.Second) - transporter.Transport.MaxIdleConnsPerHost = 1024 - glog.V(0).Infof("Starting RaftServer with %v", httpAddr) + transporter := raft.NewGrpcTransporter(grpcDialOption) + glog.V(0).Infof("Starting RaftServer with %v", serverAddr) + // always clear previous metadata + os.RemoveAll(path.Join(s.dataDir, "conf")) + os.RemoveAll(path.Join(s.dataDir, "log")) + os.RemoveAll(path.Join(s.dataDir, "snapshot")) // Clear old cluster configurations if peers are changed - if oldPeers, changed := isPeersChanged(s.dataDir, httpAddr, s.peers); changed { + if oldPeers, changed := isPeersChanged(s.dataDir, serverAddr, s.peers); changed { glog.V(0).Infof("Peers Change: %v => %v", oldPeers, s.peers) - os.RemoveAll(path.Join(s.dataDir, "conf")) - os.RemoveAll(path.Join(s.dataDir, "log")) - os.RemoveAll(path.Join(s.dataDir, "snapshot")) } - s.raftServer, err = raft.NewServer(s.httpAddr, s.dataDir, transporter, nil, topo, "") + s.raftServer, err = raft.NewServer(s.serverAddr, s.dataDir, transporter, nil, topo, "") if err != nil { glog.V(0).Infoln(err) return nil } - transporter.Install(s.raftServer, s) s.raftServer.SetHeartbeatInterval(500 * time.Millisecond) s.raftServer.SetElectionTimeout(time.Duration(pulseSeconds) * 500 * time.Millisecond) s.raftServer.Start() - s.router.HandleFunc("/cluster/status", s.statusHandler).Methods("GET") - for _, peer := range s.peers { - s.raftServer.AddPeer(peer, "http://"+peer) + s.raftServer.AddPeer(peer, pb.ServerToGrpcAddress(peer)) } - time.Sleep(time.Duration(1000+rand.Int31n(3000)) * time.Millisecond) - if s.raftServer.IsLogEmpty() { + + s.GrpcServer = raft.NewGrpcServer(s.raftServer) + + if s.raftServer.IsLogEmpty() && isTheFirstOne(serverAddr, s.peers) { // Initialize the server by joining itself. glog.V(0).Infoln("Initializing new cluster") _, err := s.raftServer.Do(&raft.DefaultJoinCommand{ Name: s.raftServer.Name(), - ConnectionString: "http://" + s.httpAddr, + ConnectionString: pb.ServerToGrpcAddress(s.serverAddr), }) if err != nil { @@ -94,7 +94,7 @@ func (s *RaftServer) Peers() (members []string) { peers := s.raftServer.Peers() for _, p := range peers { - members = append(members, strings.TrimPrefix(p.ConnectionString, "http://")) + members = append(members, p.Name) } return @@ -113,7 +113,7 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, } for _, p := range conf.Peers { - oldPeers = append(oldPeers, strings.TrimPrefix(p.ConnectionString, "http://")) + oldPeers = append(oldPeers, p.Name) } oldPeers = append(oldPeers, self) @@ -127,3 +127,11 @@ func isPeersChanged(dir string, self string, peers []string) (oldPeers []string, return oldPeers, !reflect.DeepEqual(peers, oldPeers) } + +func isTheFirstOne(self string, peers []string) bool { + sort.Strings(peers) + if len(peers) <= 0 { + return true + } + return self == peers[0] +} diff --git a/weed/server/raft_server_handlers.go b/weed/server/raft_server_handlers.go index 627fe354e..fd38cb977 100644 --- a/weed/server/raft_server_handlers.go +++ b/weed/server/raft_server_handlers.go @@ -1,16 +1,17 @@ package weed_server import ( - "github.com/chrislusf/seaweedfs/weed/operation" "net/http" ) -func (s *RaftServer) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) { - s.router.HandleFunc(pattern, handler) +type ClusterStatusResult struct { + IsLeader bool `json:"IsLeader,omitempty"` + Leader string `json:"Leader,omitempty"` + Peers []string `json:"Peers,omitempty"` } -func (s *RaftServer) statusHandler(w http.ResponseWriter, r *http.Request) { - ret := operation.ClusterStatusResult{ +func (s *RaftServer) StatusHandler(w http.ResponseWriter, r *http.Request) { + ret := ClusterStatusResult{ IsLeader: s.topo.IsLeader(), Peers: s.Peers(), } diff --git a/weed/server/volume_grpc_admin.go b/weed/server/volume_grpc_admin.go index 429ca9b68..27b21ac09 100644 --- a/weed/server/volume_grpc_admin.go +++ b/weed/server/volume_grpc_admin.go @@ -2,10 +2,14 @@ package weed_server import ( "context" + "fmt" + "path/filepath" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" ) func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server_pb.DeleteCollectionRequest) (*volume_server_pb.DeleteCollectionResponse, error) { @@ -24,17 +28,18 @@ func (vs *VolumeServer) DeleteCollection(ctx context.Context, req *volume_server } -func (vs *VolumeServer) AssignVolume(ctx context.Context, req *volume_server_pb.AssignVolumeRequest) (*volume_server_pb.AssignVolumeResponse, error) { +func (vs *VolumeServer) AllocateVolume(ctx context.Context, req *volume_server_pb.AllocateVolumeRequest) (*volume_server_pb.AllocateVolumeResponse, error) { - resp := &volume_server_pb.AssignVolumeResponse{} + resp := &volume_server_pb.AllocateVolumeResponse{} err := vs.store.AddVolume( - storage.VolumeId(req.VolumdId), + needle.VolumeId(req.VolumeId), req.Collection, vs.needleMapKind, req.Replication, req.Ttl, req.Preallocate, + req.MemoryMapMaxSizeMb, ) if err != nil { @@ -51,7 +56,7 @@ func (vs *VolumeServer) VolumeMount(ctx context.Context, req *volume_server_pb.V resp := &volume_server_pb.VolumeMountResponse{} - err := vs.store.MountVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)) if err != nil { glog.Errorf("volume mount %v: %v", req, err) @@ -67,7 +72,7 @@ func (vs *VolumeServer) VolumeUnmount(ctx context.Context, req *volume_server_pb resp := &volume_server_pb.VolumeUnmountResponse{} - err := vs.store.UnmountVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)) if err != nil { glog.Errorf("volume unmount %v: %v", req, err) @@ -83,7 +88,7 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb. resp := &volume_server_pb.VolumeDeleteResponse{} - err := vs.store.DeleteVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.DeleteVolume(needle.VolumeId(req.VolumeId)) if err != nil { glog.Errorf("volume delete %v: %v", req, err) @@ -94,3 +99,70 @@ func (vs *VolumeServer) VolumeDelete(ctx context.Context, req *volume_server_pb. return resp, err } + +func (vs *VolumeServer) VolumeConfigure(ctx context.Context, req *volume_server_pb.VolumeConfigureRequest) (*volume_server_pb.VolumeConfigureResponse, error) { + + resp := &volume_server_pb.VolumeConfigureResponse{} + + // check replication format + if _, err := super_block.NewReplicaPlacementFromString(req.Replication); err != nil { + resp.Error = fmt.Sprintf("volume configure replication %v: %v", req, err) + return resp, nil + } + + // unmount + if err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)); err != nil { + glog.Errorf("volume configure unmount %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure unmount %v: %v", req, err) + return resp, nil + } + + // modify the volume info file + if err := vs.store.ConfigureVolume(needle.VolumeId(req.VolumeId), req.Replication); err != nil { + glog.Errorf("volume configure %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure %v: %v", req, err) + return resp, nil + } + + // mount + if err := vs.store.MountVolume(needle.VolumeId(req.VolumeId)); err != nil { + glog.Errorf("volume configure mount %v: %v", req, err) + resp.Error = fmt.Sprintf("volume configure mount %v: %v", req, err) + return resp, nil + } + + return resp, nil + +} + +func (vs *VolumeServer) VolumeMarkReadonly(ctx context.Context, req *volume_server_pb.VolumeMarkReadonlyRequest) (*volume_server_pb.VolumeMarkReadonlyResponse, error) { + + resp := &volume_server_pb.VolumeMarkReadonlyResponse{} + + err := vs.store.MarkVolumeReadonly(needle.VolumeId(req.VolumeId)) + + if err != nil { + glog.Errorf("volume mark readonly %v: %v", req, err) + } else { + glog.V(2).Infof("volume mark readonly %v", req) + } + + return resp, err + +} + +func (vs *VolumeServer) VolumeServerStatus(ctx context.Context, req *volume_server_pb.VolumeServerStatusRequest) (*volume_server_pb.VolumeServerStatusResponse, error) { + + resp := &volume_server_pb.VolumeServerStatusResponse{} + + for _, loc := range vs.store.Locations { + if dir, e := filepath.Abs(loc.Directory); e == nil { + resp.DiskStatuses = append(resp.DiskStatuses, stats.NewDiskStatus(dir)) + } + } + + resp.MemoryStatus = stats.MemStat() + + return resp, nil + +} diff --git a/weed/server/volume_grpc_batch_delete.go b/weed/server/volume_grpc_batch_delete.go index 3554d97ae..501964191 100644 --- a/weed/server/volume_grpc_batch_delete.go +++ b/weed/server/volume_grpc_batch_delete.go @@ -7,7 +7,8 @@ import ( "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" ) func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.BatchDeleteRequest) (*volume_server_pb.BatchDeleteResponse, error) { @@ -26,18 +27,36 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B continue } - n := new(storage.Needle) - volumeId, _ := storage.NewVolumeId(vid) - n.ParsePath(id_cookie) - - cookie := n.Cookie - if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil { - resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ - FileId: fid, - Status: http.StatusNotFound, - Error: err.Error(), - }) - continue + n := new(needle.Needle) + volumeId, _ := needle.NewVolumeId(vid) + if req.SkipCookieCheck { + n.Id, err = types.ParseNeedleId(id_cookie) + if err != nil { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusBadRequest, + Error: err.Error()}) + continue + } + } else { + n.ParsePath(id_cookie) + cookie := n.Cookie + if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusNotFound, + Error: err.Error(), + }) + continue + } + if n.Cookie != cookie { + resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ + FileId: fid, + Status: http.StatusBadRequest, + Error: "File Random Cookie does not match.", + }) + break + } } if n.IsChunkedManifest() { @@ -49,16 +68,8 @@ func (vs *VolumeServer) BatchDelete(ctx context.Context, req *volume_server_pb.B continue } - if n.Cookie != cookie { - resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ - FileId: fid, - Status: http.StatusBadRequest, - Error: "File Random Cookie does not match.", - }) - break - } n.LastModified = now - if size, err := vs.store.Delete(volumeId, n); err != nil { + if size, err := vs.store.DeleteVolumeNeedle(volumeId, n); err != nil { resp.Results = append(resp.Results, &volume_server_pb.DeleteResult{ FileId: fid, Status: http.StatusInternalServerError, diff --git a/weed/server/volume_grpc_client_to_master.go b/weed/server/volume_grpc_client_to_master.go index bd3ffd7b3..7cb836344 100644 --- a/weed/server/volume_grpc_client_to_master.go +++ b/weed/server/volume_grpc_client_to_master.go @@ -2,13 +2,21 @@ package weed_server import ( "fmt" + "net" "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + + "golang.org/x/net/context" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "golang.org/x/net/context" ) func (vs *VolumeServer) GetMaster() string { @@ -16,34 +24,42 @@ func (vs *VolumeServer) GetMaster() string { } func (vs *VolumeServer) heartbeat() { - glog.V(0).Infof("Volume server start with masters: %v", vs.MasterNodes) + glog.V(0).Infof("Volume server start with seed master nodes: %v", vs.SeedMasterNodes) vs.store.SetDataCenter(vs.dataCenter) vs.store.SetRack(vs.rack) + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.volume") + var err error var newLeader string for { - for _, master := range vs.MasterNodes { + for _, master := range vs.SeedMasterNodes { if newLeader != "" { + // the new leader may actually is the same master + // need to wait a bit before adding itself + time.Sleep(3 * time.Second) master = newLeader } - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0) + masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(master) if parseErr != nil { - glog.V(0).Infof("failed to parse master grpc %v", masterGrpcAddress) + glog.V(0).Infof("failed to parse master grpc %v: %v", masterGrpcAddress, parseErr) continue } - newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, time.Duration(vs.pulseSeconds)*time.Second) + vs.store.MasterAddress = master + newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second) if err != nil { glog.V(0).Infof("heartbeat error: %v", err) time.Sleep(time.Duration(vs.pulseSeconds) * time.Second) + newLeader = "" + vs.store.MasterAddress = "" } } } } -func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepInterval time.Duration) (newLeader string, err error) { +func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) { - grpcConection, err := util.GrpcDial(masterGrpcAddress) + grpcConection, err := pb.GrpcDial(context.Background(), masterGrpcAddress, grpcDialOption) if err != nil { return "", fmt.Errorf("fail to dial %s : %v", masterNode, err) } @@ -58,9 +74,6 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepI glog.V(0).Infof("Heartbeat to: %v", masterNode) vs.currentMaster = masterNode - vs.store.Client = stream - defer func() { vs.store.Client = nil }() - doneChan := make(chan error, 1) go func() { @@ -70,18 +83,27 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepI doneChan <- err return } - if in.GetVolumeSizeLimit() != 0 { - vs.store.VolumeSizeLimit = in.GetVolumeSizeLimit() - } - if in.GetSecretKey() != "" { - vs.guard.SecretKey = security.Secret(in.GetSecretKey()) + if in.GetVolumeSizeLimit() != 0 && vs.store.GetVolumeSizeLimit() != in.GetVolumeSizeLimit() { + vs.store.SetVolumeSizeLimit(in.GetVolumeSizeLimit()) + if vs.store.MaybeAdjustVolumeMax() { + if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { + glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) + } + } } - if in.GetLeader() != "" && masterNode != in.GetLeader() { + if in.GetLeader() != "" && masterNode != in.GetLeader() && !isSameIP(in.GetLeader(), masterNode) { glog.V(0).Infof("Volume Server found a new master newLeader: %v instead of %v", in.GetLeader(), masterNode) newLeader = in.GetLeader() doneChan <- nil return } + if in.GetMetricsAddress() != "" && vs.MetricsAddress != in.GetMetricsAddress() { + vs.MetricsAddress = in.GetMetricsAddress() + vs.MetricsIntervalSec = int(in.GetMetricsIntervalSeconds()) + } + if len(in.StorageBackends) > 0 { + backend.LoadFromPbStorageBackends(in.StorageBackends) + } } }() @@ -90,33 +112,89 @@ func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepI return "", err } - tickChan := time.Tick(sleepInterval) + if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil { + glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) + return "", err + } + + volumeTickChan := time.Tick(sleepInterval) + ecShardTickChan := time.Tick(17 * sleepInterval) for { select { - case vid := <-vs.store.NewVolumeIdChan: + case volumeMessage := <-vs.store.NewVolumesChan: deltaBeat := &master_pb.Heartbeat{ - NewVids: []uint32{uint32(vid)}, + NewVolumes: []*master_pb.VolumeShortInformationMessage{ + &volumeMessage, + }, } + glog.V(1).Infof("volume server %s:%d adds volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id) if err = stream.Send(deltaBeat); err != nil { glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err) return "", err } - case vid := <-vs.store.DeletedVolumeIdChan: + case ecShardMessage := <-vs.store.NewEcShardsChan: deltaBeat := &master_pb.Heartbeat{ - DeletedVids: []uint32{uint32(vid)}, + NewEcShards: []*master_pb.VolumeEcShardInformationMessage{ + &ecShardMessage, + }, } + glog.V(1).Infof("volume server %s:%d adds ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id, + erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds()) if err = stream.Send(deltaBeat); err != nil { glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err) return "", err } - case <-tickChan: + case volumeMessage := <-vs.store.DeletedVolumesChan: + deltaBeat := &master_pb.Heartbeat{ + DeletedVolumes: []*master_pb.VolumeShortInformationMessage{ + &volumeMessage, + }, + } + glog.V(1).Infof("volume server %s:%d deletes volume %d", vs.store.Ip, vs.store.Port, volumeMessage.Id) + if err = stream.Send(deltaBeat); err != nil { + glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err) + return "", err + } + case ecShardMessage := <-vs.store.DeletedEcShardsChan: + deltaBeat := &master_pb.Heartbeat{ + DeletedEcShards: []*master_pb.VolumeEcShardInformationMessage{ + &ecShardMessage, + }, + } + glog.V(1).Infof("volume server %s:%d deletes ec shard %d:%d", vs.store.Ip, vs.store.Port, ecShardMessage.Id, + erasure_coding.ShardBits(ecShardMessage.EcIndexBits).ShardIds()) + if err = stream.Send(deltaBeat); err != nil { + glog.V(0).Infof("Volume Server Failed to update to master %s: %v", masterNode, err) + return "", err + } + case <-volumeTickChan: + glog.V(4).Infof("volume server %s:%d heartbeat", vs.store.Ip, vs.store.Port) if err = stream.Send(vs.store.CollectHeartbeat()); err != nil { glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) return "", err } + case <-ecShardTickChan: + glog.V(4).Infof("volume server %s:%d ec heartbeat", vs.store.Ip, vs.store.Port) + if err = stream.Send(vs.store.CollectErasureCodingHeartbeat()); err != nil { + glog.V(0).Infof("Volume Server Failed to talk with master %s: %v", masterNode, err) + return "", err + } case err = <-doneChan: return } } } + +func isSameIP(ip string, host string) bool { + ips, err := net.LookupIP(host) + if err != nil { + return false + } + for _, t := range ips { + if ip == t.String() { + return true + } + } + return false +} diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go new file mode 100644 index 000000000..5c7d5572c --- /dev/null +++ b/weed/server/volume_grpc_copy.go @@ -0,0 +1,286 @@ +package weed_server + +import ( + "context" + "fmt" + "io" + "math" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +const BufferSizeLimit = 1024 * 1024 * 2 + +// VolumeCopy copy the .idx .dat .vif files, and mount the volume +func (vs *VolumeServer) VolumeCopy(ctx context.Context, req *volume_server_pb.VolumeCopyRequest) (*volume_server_pb.VolumeCopyResponse, error) { + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v != nil { + + glog.V(0).Infof("volume %d already exists. deleted before copying...", req.VolumeId) + + err := vs.store.UnmountVolume(needle.VolumeId(req.VolumeId)) + if err != nil { + return nil, fmt.Errorf("failed to mount existing volume %d: %v", req.VolumeId, err) + } + + err = vs.store.DeleteVolume(needle.VolumeId(req.VolumeId)) + if err != nil { + return nil, fmt.Errorf("failed to delete existing volume %d: %v", req.VolumeId, err) + } + + glog.V(0).Infof("deleted exisitng volume %d before copying.", req.VolumeId) + } + + location := vs.store.FindFreeLocation() + if location == nil { + return nil, fmt.Errorf("no space left") + } + + // the master will not start compaction for read-only volumes, so it is safe to just copy files directly + // copy .dat and .idx files + // read .idx .dat file size and timestamp + // send .idx file + // send .dat file + // confirm size and timestamp + var volFileInfoResp *volume_server_pb.ReadVolumeFileStatusResponse + var volumeFileName, idxFileName, datFileName string + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + var err error + volFileInfoResp, err = client.ReadVolumeFileStatus(context.Background(), + &volume_server_pb.ReadVolumeFileStatusRequest{ + VolumeId: req.VolumeId, + }) + if nil != err { + return fmt.Errorf("read volume file status failed, %v", err) + } + + volumeFileName = storage.VolumeFileName(location.Directory, volFileInfoResp.Collection, int(req.VolumeId)) + + // println("source:", volFileInfoResp.String()) + // copy ecx file + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.IdxFileSize, volumeFileName, ".idx", false, false); err != nil { + return err + } + + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".dat", false, true); err != nil { + return err + } + + if err := vs.doCopyFile(client, false, req.Collection, req.VolumeId, volFileInfoResp.CompactionRevision, volFileInfoResp.DatFileSize, volumeFileName, ".vif", false, true); err != nil { + return err + } + + return nil + }) + + if err != nil { + return nil, err + } + if volumeFileName == "" { + return nil, fmt.Errorf("not found volume %d file", req.VolumeId) + } + + idxFileName = volumeFileName + ".idx" + datFileName = volumeFileName + ".dat" + + defer func() { + if err != nil && volumeFileName != "" { + os.Remove(idxFileName) + os.Remove(datFileName) + os.Remove(volumeFileName + ".vif") + } + }() + + if err = checkCopyFiles(volFileInfoResp, idxFileName, datFileName); err != nil { // added by panyc16 + return nil, err + } + + // mount the volume + err = vs.store.MountVolume(needle.VolumeId(req.VolumeId)) + if err != nil { + return nil, fmt.Errorf("failed to mount volume %d: %v", req.VolumeId, err) + } + + return &volume_server_pb.VolumeCopyResponse{ + LastAppendAtNs: volFileInfoResp.DatFileTimestampSeconds * uint64(time.Second), + }, err +} + +func (vs *VolumeServer) doCopyFile(client volume_server_pb.VolumeServerClient, isEcVolume bool, collection string, vid, compactRevision uint32, stopOffset uint64, baseFileName, ext string, isAppend, ignoreSourceFileNotFound bool) error { + + copyFileClient, err := client.CopyFile(context.Background(), &volume_server_pb.CopyFileRequest{ + VolumeId: vid, + Ext: ext, + CompactionRevision: compactRevision, + StopOffset: stopOffset, + Collection: collection, + IsEcVolume: isEcVolume, + IgnoreSourceFileNotFound: ignoreSourceFileNotFound, + }) + if err != nil { + return fmt.Errorf("failed to start copying volume %d %s file: %v", vid, ext, err) + } + + err = writeToFile(copyFileClient, baseFileName+ext, util.NewWriteThrottler(vs.compactionBytePerSecond), isAppend) + if err != nil { + return fmt.Errorf("failed to copy %s file: %v", baseFileName+ext, err) + } + + return nil + +} + +/** +only check the the differ of the file size +todo: maybe should check the received count and deleted count of the volume +*/ +func checkCopyFiles(originFileInf *volume_server_pb.ReadVolumeFileStatusResponse, idxFileName, datFileName string) error { + stat, err := os.Stat(idxFileName) + if err != nil { + return fmt.Errorf("stat idx file %s failed, %v", idxFileName, err) + } + if originFileInf.IdxFileSize != uint64(stat.Size()) { + return fmt.Errorf("idx file %s size [%v] is not same as origin file size [%v]", + idxFileName, stat.Size(), originFileInf.IdxFileSize) + } + + stat, err = os.Stat(datFileName) + if err != nil { + return fmt.Errorf("get dat file info failed, %v", err) + } + if originFileInf.DatFileSize != uint64(stat.Size()) { + return fmt.Errorf("the dat file size [%v] is not same as origin file size [%v]", + stat.Size(), originFileInf.DatFileSize) + } + return nil +} + +func writeToFile(client volume_server_pb.VolumeServer_CopyFileClient, fileName string, wt *util.WriteThrottler, isAppend bool) error { + glog.V(4).Infof("writing to %s", fileName) + flags := os.O_WRONLY | os.O_CREATE | os.O_TRUNC + if isAppend { + flags = os.O_WRONLY | os.O_CREATE + } + dst, err := os.OpenFile(fileName, flags, 0644) + if err != nil { + return nil + } + defer dst.Close() + + for { + resp, receiveErr := client.Recv() + if receiveErr == io.EOF { + break + } + if receiveErr != nil { + return fmt.Errorf("receiving %s: %v", fileName, receiveErr) + } + dst.Write(resp.FileContent) + wt.MaybeSlowdown(int64(len(resp.FileContent))) + } + return nil +} + +func (vs *VolumeServer) ReadVolumeFileStatus(ctx context.Context, req *volume_server_pb.ReadVolumeFileStatusRequest) (*volume_server_pb.ReadVolumeFileStatusResponse, error) { + resp := &volume_server_pb.ReadVolumeFileStatusResponse{} + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + resp.VolumeId = req.VolumeId + datSize, idxSize, modTime := v.FileStat() + resp.DatFileSize = datSize + resp.IdxFileSize = idxSize + resp.DatFileTimestampSeconds = uint64(modTime.Unix()) + resp.IdxFileTimestampSeconds = uint64(modTime.Unix()) + resp.FileCount = v.FileCount() + resp.CompactionRevision = uint32(v.CompactionRevision) + resp.Collection = v.Collection + return resp, nil +} + +// CopyFile client pulls the volume related file from the source server. +// if req.CompactionRevision != math.MaxUint32, it ensures the compact revision is as expected +// The copying still stop at req.StopOffset, but you can set it to math.MaxUint64 in order to read all data. +func (vs *VolumeServer) CopyFile(req *volume_server_pb.CopyFileRequest, stream volume_server_pb.VolumeServer_CopyFileServer) error { + + var fileName string + if !req.IsEcVolume { + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("not found volume id %d", req.VolumeId) + } + + if uint32(v.CompactionRevision) != req.CompactionRevision && req.CompactionRevision != math.MaxUint32 { + return fmt.Errorf("volume %d is compacted", req.VolumeId) + } + fileName = v.FileName() + req.Ext + } else { + baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + req.Ext + for _, location := range vs.store.Locations { + tName := util.Join(location.Directory, baseFileName) + if util.FileExists(tName) { + fileName = tName + } + } + if fileName == "" { + if req.IgnoreSourceFileNotFound { + return nil + } + return fmt.Errorf("CopyFile not found ec volume id %d", req.VolumeId) + } + } + + bytesToRead := int64(req.StopOffset) + + file, err := os.Open(fileName) + if err != nil { + if req.IgnoreSourceFileNotFound && err == os.ErrNotExist { + return nil + } + return err + } + defer file.Close() + + buffer := make([]byte, BufferSizeLimit) + + for bytesToRead > 0 { + bytesread, err := file.Read(buffer) + + // println(fileName, "read", bytesread, "bytes, with target", bytesToRead) + + if err != nil { + if err != io.EOF { + return err + } + // println(fileName, "read", bytesread, "bytes, with target", bytesToRead, "err", err.Error()) + break + } + + if int64(bytesread) > bytesToRead { + bytesread = int(bytesToRead) + } + err = stream.Send(&volume_server_pb.CopyFileResponse{ + FileContent: buffer[:bytesread], + }) + if err != nil { + // println("sending", bytesread, "bytes err", err.Error()) + return err + } + + bytesToRead -= int64(bytesread) + + } + + return nil +} diff --git a/weed/server/volume_grpc_copy_incremental.go b/weed/server/volume_grpc_copy_incremental.go new file mode 100644 index 000000000..6d6c3daa3 --- /dev/null +++ b/weed/server/volume_grpc_copy_incremental.go @@ -0,0 +1,66 @@ +package weed_server + +import ( + "context" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func (vs *VolumeServer) VolumeIncrementalCopy(req *volume_server_pb.VolumeIncrementalCopyRequest, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error { + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("not found volume id %d", req.VolumeId) + } + + stopOffset, _, _ := v.FileStat() + foundOffset, isLastOne, err := v.BinarySearchByAppendAtNs(req.SinceNs) + if err != nil { + return fmt.Errorf("fail to locate by appendAtNs %d: %s", req.SinceNs, err) + } + + if isLastOne { + return nil + } + + startOffset := foundOffset.ToAcutalOffset() + + buf := make([]byte, 1024*1024*2) + return sendFileContent(v.DataBackend, buf, startOffset, int64(stopOffset), stream) + +} + +func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) { + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("not found volume id %d", req.VolumeId) + } + + resp := v.GetVolumeSyncStatus() + + return resp, nil + +} + +func sendFileContent(datBackend backend.BackendStorageFile, buf []byte, startOffset, stopOffset int64, stream volume_server_pb.VolumeServer_VolumeIncrementalCopyServer) error { + var blockSizeLimit = int64(len(buf)) + for i := int64(0); i < stopOffset-startOffset; i += blockSizeLimit { + n, readErr := datBackend.ReadAt(buf, startOffset+i) + if readErr == nil || readErr == io.EOF { + resp := &volume_server_pb.VolumeIncrementalCopyResponse{} + resp.FileContent = buf[:int64(n)] + sendErr := stream.Send(resp) + if sendErr != nil { + return sendErr + } + } else { + return readErr + } + } + return nil +} diff --git a/weed/server/volume_grpc_erasure_coding.go b/weed/server/volume_grpc_erasure_coding.go new file mode 100644 index 000000000..79348c9d7 --- /dev/null +++ b/weed/server/volume_grpc_erasure_coding.go @@ -0,0 +1,391 @@ +package weed_server + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path" + "path/filepath" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" +) + +/* + +Steps to apply erasure coding to .dat .idx files +0. ensure the volume is readonly +1. client call VolumeEcShardsGenerate to generate the .ecx and .ec00 ~ .ec13 files +2. client ask master for possible servers to hold the ec files, at least 4 servers +3. client call VolumeEcShardsCopy on above target servers to copy ec files from the source server +4. target servers report the new ec files to the master +5. master stores vid -> [14]*DataNode +6. client checks master. If all 14 slices are ready, delete the original .idx, .idx files + +*/ + +// VolumeEcShardsGenerate generates the .ecx and .ec00 ~ .ec13 files +func (vs *VolumeServer) VolumeEcShardsGenerate(ctx context.Context, req *volume_server_pb.VolumeEcShardsGenerateRequest) (*volume_server_pb.VolumeEcShardsGenerateResponse, error) { + + glog.V(0).Infof("VolumeEcShardsGenerate: %v", req) + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return nil, fmt.Errorf("volume %d not found", req.VolumeId) + } + baseFileName := v.FileName() + + if v.Collection != req.Collection { + return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // write .ec00 ~ .ec13 files + if err := erasure_coding.WriteEcFiles(baseFileName); err != nil { + return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + } + + // write .ecx file + if err := erasure_coding.WriteSortedFileFromIdx(baseFileName, ".ecx"); err != nil { + return nil, fmt.Errorf("WriteSortedFileFromIdx %s: %v", baseFileName, err) + } + + // write .vif files + if err := pb.SaveVolumeInfo(baseFileName+".vif", &volume_server_pb.VolumeInfo{Version: uint32(v.Version())}); err != nil { + return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + } + + return &volume_server_pb.VolumeEcShardsGenerateResponse{}, nil +} + +// VolumeEcShardsRebuild generates the any of the missing .ec00 ~ .ec13 files +func (vs *VolumeServer) VolumeEcShardsRebuild(ctx context.Context, req *volume_server_pb.VolumeEcShardsRebuildRequest) (*volume_server_pb.VolumeEcShardsRebuildResponse, error) { + + glog.V(0).Infof("VolumeEcShardsRebuild: %v", req) + + baseFileName := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + + var rebuiltShardIds []uint32 + + for _, location := range vs.store.Locations { + if util.FileExists(path.Join(location.Directory, baseFileName+".ecx")) { + // write .ec00 ~ .ec13 files + baseFileName = path.Join(location.Directory, baseFileName) + if generatedShardIds, err := erasure_coding.RebuildEcFiles(baseFileName); err != nil { + return nil, fmt.Errorf("RebuildEcFiles %s: %v", baseFileName, err) + } else { + rebuiltShardIds = generatedShardIds + } + + if err := erasure_coding.RebuildEcxFile(baseFileName); err != nil { + return nil, fmt.Errorf("RebuildEcxFile %s: %v", baseFileName, err) + } + + break + } + } + + return &volume_server_pb.VolumeEcShardsRebuildResponse{ + RebuiltShardIds: rebuiltShardIds, + }, nil +} + +// VolumeEcShardsCopy copy the .ecx and some ec data slices +func (vs *VolumeServer) VolumeEcShardsCopy(ctx context.Context, req *volume_server_pb.VolumeEcShardsCopyRequest) (*volume_server_pb.VolumeEcShardsCopyResponse, error) { + + glog.V(0).Infof("VolumeEcShardsCopy: %v", req) + + location := vs.store.FindFreeLocation() + if location == nil { + return nil, fmt.Errorf("no space left") + } + + baseFileName := storage.VolumeFileName(location.Directory, req.Collection, int(req.VolumeId)) + + err := operation.WithVolumeServerClient(req.SourceDataNode, vs.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + + // copy ec data slices + for _, shardId := range req.ShardIds { + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, erasure_coding.ToExt(int(shardId)), false, false); err != nil { + return err + } + } + + if req.CopyEcxFile { + + // copy ecx file + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecx", false, false); err != nil { + return err + } + return nil + } + + if req.CopyEcjFile { + // copy ecj file + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".ecj", true, true); err != nil { + return err + } + } + + if req.CopyVifFile { + // copy vif file + if err := vs.doCopyFile(client, true, req.Collection, req.VolumeId, math.MaxUint32, math.MaxInt64, baseFileName, ".vif", false, true); err != nil { + return err + } + } + + return nil + }) + if err != nil { + return nil, fmt.Errorf("VolumeEcShardsCopy volume %d: %v", req.VolumeId, err) + } + + return &volume_server_pb.VolumeEcShardsCopyResponse{}, nil +} + +// VolumeEcShardsDelete local delete the .ecx and some ec data slices if not needed +// the shard should not be mounted before calling this. +func (vs *VolumeServer) VolumeEcShardsDelete(ctx context.Context, req *volume_server_pb.VolumeEcShardsDeleteRequest) (*volume_server_pb.VolumeEcShardsDeleteResponse, error) { + + baseFilename := erasure_coding.EcShardBaseFileName(req.Collection, int(req.VolumeId)) + + glog.V(0).Infof("ec volume %d shard delete %v", req.VolumeId, req.ShardIds) + + found := false + for _, location := range vs.store.Locations { + if util.FileExists(path.Join(location.Directory, baseFilename+".ecx")) { + found = true + baseFilename = path.Join(location.Directory, baseFilename) + for _, shardId := range req.ShardIds { + os.Remove(baseFilename + erasure_coding.ToExt(int(shardId))) + } + break + } + } + + if !found { + return nil, nil + } + + // check whether to delete the .ecx and .ecj file also + hasEcxFile := false + hasIdxFile := false + existingShardCount := 0 + + bName := filepath.Base(baseFilename) + for _, location := range vs.store.Locations { + fileInfos, err := ioutil.ReadDir(location.Directory) + if err != nil { + continue + } + for _, fileInfo := range fileInfos { + if fileInfo.Name() == bName+".ecx" || fileInfo.Name() == bName+".ecj" { + hasEcxFile = true + continue + } + if fileInfo.Name() == bName+".idx" { + hasIdxFile = true + continue + } + if strings.HasPrefix(fileInfo.Name(), bName+".ec") { + existingShardCount++ + } + } + } + + if hasEcxFile && existingShardCount == 0 { + if err := os.Remove(baseFilename + ".ecx"); err != nil { + return nil, err + } + os.Remove(baseFilename + ".ecj") + } + if !hasIdxFile { + // .vif is used for ec volumes and normal volumes + os.Remove(baseFilename + ".vif") + } + + return &volume_server_pb.VolumeEcShardsDeleteResponse{}, nil +} + +func (vs *VolumeServer) VolumeEcShardsMount(ctx context.Context, req *volume_server_pb.VolumeEcShardsMountRequest) (*volume_server_pb.VolumeEcShardsMountResponse, error) { + + glog.V(0).Infof("VolumeEcShardsMount: %v", req) + + for _, shardId := range req.ShardIds { + err := vs.store.MountEcShards(req.Collection, needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId)) + + if err != nil { + glog.Errorf("ec shard mount %v: %v", req, err) + } else { + glog.V(2).Infof("ec shard mount %v", req) + } + + if err != nil { + return nil, fmt.Errorf("mount %d.%d: %v", req.VolumeId, shardId, err) + } + } + + return &volume_server_pb.VolumeEcShardsMountResponse{}, nil +} + +func (vs *VolumeServer) VolumeEcShardsUnmount(ctx context.Context, req *volume_server_pb.VolumeEcShardsUnmountRequest) (*volume_server_pb.VolumeEcShardsUnmountResponse, error) { + + glog.V(0).Infof("VolumeEcShardsUnmount: %v", req) + + for _, shardId := range req.ShardIds { + err := vs.store.UnmountEcShards(needle.VolumeId(req.VolumeId), erasure_coding.ShardId(shardId)) + + if err != nil { + glog.Errorf("ec shard unmount %v: %v", req, err) + } else { + glog.V(2).Infof("ec shard unmount %v", req) + } + + if err != nil { + return nil, fmt.Errorf("unmount %d.%d: %v", req.VolumeId, shardId, err) + } + } + + return &volume_server_pb.VolumeEcShardsUnmountResponse{}, nil +} + +func (vs *VolumeServer) VolumeEcShardRead(req *volume_server_pb.VolumeEcShardReadRequest, stream volume_server_pb.VolumeServer_VolumeEcShardReadServer) error { + + ecVolume, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId)) + if !found { + return fmt.Errorf("VolumeEcShardRead not found ec volume id %d", req.VolumeId) + } + ecShard, found := ecVolume.FindEcVolumeShard(erasure_coding.ShardId(req.ShardId)) + if !found { + return fmt.Errorf("not found ec shard %d.%d", req.VolumeId, req.ShardId) + } + + if req.FileKey != 0 { + _, size, _ := ecVolume.FindNeedleFromEcx(types.Uint64ToNeedleId(req.FileKey)) + if size == types.TombstoneFileSize { + return stream.Send(&volume_server_pb.VolumeEcShardReadResponse{ + IsDeleted: true, + }) + } + } + + bufSize := req.Size + if bufSize > BufferSizeLimit { + bufSize = BufferSizeLimit + } + buffer := make([]byte, bufSize) + + startOffset, bytesToRead := req.Offset, req.Size + + for bytesToRead > 0 { + // min of bytesToRead and bufSize + bufferSize := bufSize + if bufferSize > bytesToRead { + bufferSize = bytesToRead + } + bytesread, err := ecShard.ReadAt(buffer[0:bufferSize], startOffset) + + // println("read", ecShard.FileName(), "startOffset", startOffset, bytesread, "bytes, with target", bufferSize) + if bytesread > 0 { + + if int64(bytesread) > bytesToRead { + bytesread = int(bytesToRead) + } + err = stream.Send(&volume_server_pb.VolumeEcShardReadResponse{ + Data: buffer[:bytesread], + }) + if err != nil { + // println("sending", bytesread, "bytes err", err.Error()) + return err + } + + startOffset += int64(bytesread) + bytesToRead -= int64(bytesread) + + } + + if err != nil { + if err != io.EOF { + return err + } + return nil + } + + } + + return nil + +} + +func (vs *VolumeServer) VolumeEcBlobDelete(ctx context.Context, req *volume_server_pb.VolumeEcBlobDeleteRequest) (*volume_server_pb.VolumeEcBlobDeleteResponse, error) { + + glog.V(0).Infof("VolumeEcBlobDelete: %v", req) + + resp := &volume_server_pb.VolumeEcBlobDeleteResponse{} + + for _, location := range vs.store.Locations { + if localEcVolume, found := location.FindEcVolume(needle.VolumeId(req.VolumeId)); found { + + _, size, _, err := localEcVolume.LocateEcShardNeedle(types.NeedleId(req.FileKey), needle.Version(req.Version)) + if err != nil { + return nil, fmt.Errorf("locate in local ec volume: %v", err) + } + if size == types.TombstoneFileSize { + return resp, nil + } + + err = localEcVolume.DeleteNeedleFromEcx(types.NeedleId(req.FileKey)) + if err != nil { + return nil, err + } + + break + } + } + + return resp, nil +} + +// VolumeEcShardsToVolume generates the .idx, .dat files from .ecx, .ecj and .ec01 ~ .ec14 files +func (vs *VolumeServer) VolumeEcShardsToVolume(ctx context.Context, req *volume_server_pb.VolumeEcShardsToVolumeRequest) (*volume_server_pb.VolumeEcShardsToVolumeResponse, error) { + + glog.V(0).Infof("VolumeEcShardsToVolume: %v", req) + + v, found := vs.store.FindEcVolume(needle.VolumeId(req.VolumeId)) + if !found { + return nil, fmt.Errorf("ec volume %d not found", req.VolumeId) + } + baseFileName := v.FileName() + + if v.Collection != req.Collection { + return nil, fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // calculate .dat file size + datFileSize, err := erasure_coding.FindDatFileSize(baseFileName) + if err != nil { + return nil, fmt.Errorf("FindDatFileSize %s: %v", baseFileName, err) + } + + // write .dat file from .ec00 ~ .ec09 files + if err := erasure_coding.WriteDatFile(baseFileName, datFileSize); err != nil { + return nil, fmt.Errorf("WriteEcFiles %s: %v", baseFileName, err) + } + + // write .idx file from .ecx and .ecj files + if err := erasure_coding.WriteIdxFileFromEcIndex(baseFileName); err != nil { + return nil, fmt.Errorf("WriteIdxFileFromEcIndex %s: %v", baseFileName, err) + } + + return &volume_server_pb.VolumeEcShardsToVolumeResponse{}, nil +} diff --git a/weed/server/volume_grpc_query.go b/weed/server/volume_grpc_query.go new file mode 100644 index 000000000..767e28e7b --- /dev/null +++ b/weed/server/volume_grpc_query.go @@ -0,0 +1,69 @@ +package weed_server + +import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/query/json" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/tidwall/gjson" +) + +func (vs *VolumeServer) Query(req *volume_server_pb.QueryRequest, stream volume_server_pb.VolumeServer_QueryServer) error { + + for _, fid := range req.FromFileIds { + + vid, id_cookie, err := operation.ParseFileId(fid) + if err != nil { + glog.V(0).Infof("volume query failed to parse fid %s: %v", fid, err) + return err + } + + n := new(needle.Needle) + volumeId, _ := needle.NewVolumeId(vid) + n.ParsePath(id_cookie) + + cookie := n.Cookie + if _, err := vs.store.ReadVolumeNeedle(volumeId, n); err != nil { + glog.V(0).Infof("volume query failed to read fid %s: %v", fid, err) + return err + } + + if n.Cookie != cookie { + glog.V(0).Infof("volume query failed to read fid cookie %s: %v", fid, err) + return err + } + + if req.InputSerialization.CsvInput != nil { + + } + + if req.InputSerialization.JsonInput != nil { + + stripe := &volume_server_pb.QueriedStripe{ + Records: nil, + } + + filter := json.Query{ + Field: req.Filter.Field, + Op: req.Filter.Operand, + Value: req.Filter.Value, + } + gjson.ForEachLine(string(n.Data), func(line gjson.Result) bool { + passedFilter, values := json.QueryJson(line.Raw, req.Selections, filter) + if !passedFilter { + return true + } + stripe.Records = json.ToJson(stripe.Records, req.Selections, values) + return true + }) + err = stream.Send(stripe) + if err != nil { + return err + } + } + + } + + return nil +} diff --git a/weed/server/volume_grpc_sync.go b/weed/server/volume_grpc_sync.go deleted file mode 100644 index 5f56ec17d..000000000 --- a/weed/server/volume_grpc_sync.go +++ /dev/null @@ -1,101 +0,0 @@ -package weed_server - -import ( - "context" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" - "github.com/chrislusf/seaweedfs/weed/storage/types" -) - -func (vs *VolumeServer) VolumeSyncStatus(ctx context.Context, req *volume_server_pb.VolumeSyncStatusRequest) (*volume_server_pb.VolumeSyncStatusResponse, error) { - - v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) - if v == nil { - return nil, fmt.Errorf("Not Found Volume Id %d", req.VolumdId) - } - - resp := v.GetVolumeSyncStatus() - - glog.V(2).Infof("volume sync status %d", req.VolumdId) - - return resp, nil - -} - -func (vs *VolumeServer) VolumeSyncIndex(req *volume_server_pb.VolumeSyncIndexRequest, stream volume_server_pb.VolumeServer_VolumeSyncIndexServer) error { - - v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) - if v == nil { - return fmt.Errorf("Not Found Volume Id %d", req.VolumdId) - } - - content, err := v.IndexFileContent() - - if err != nil { - glog.Errorf("sync volume %d index: %v", req.VolumdId, err) - } else { - glog.V(2).Infof("sync volume %d index", req.VolumdId) - } - - const blockSizeLimit = 1024 * 1024 * 2 - for i := 0; i < len(content); i += blockSizeLimit { - blockSize := len(content) - i - if blockSize > blockSizeLimit { - blockSize = blockSizeLimit - } - resp := &volume_server_pb.VolumeSyncIndexResponse{} - resp.IndexFileContent = content[i : i+blockSize] - stream.Send(resp) - } - - return nil - -} - -func (vs *VolumeServer) VolumeSyncData(req *volume_server_pb.VolumeSyncDataRequest, stream volume_server_pb.VolumeServer_VolumeSyncDataServer) error { - - v := vs.store.GetVolume(storage.VolumeId(req.VolumdId)) - if v == nil { - return fmt.Errorf("Not Found Volume Id %d", req.VolumdId) - } - - if uint32(v.SuperBlock.CompactRevision) != req.Revision { - return fmt.Errorf("Requested Volume Revision is %d, but current revision is %d", req.Revision, v.SuperBlock.CompactRevision) - } - - content, err := storage.ReadNeedleBlob(v.DataFile(), int64(req.Offset)*types.NeedlePaddingSize, req.Size, v.Version()) - if err != nil { - return fmt.Errorf("read offset:%d size:%d", req.Offset, req.Size) - } - - id, err := types.ParseNeedleId(req.NeedleId) - if err != nil { - return fmt.Errorf("parsing needle id %s: %v", req.NeedleId, err) - } - n := new(storage.Needle) - n.ParseNeedleHeader(content) - if id != n.Id { - return fmt.Errorf("Expected file entry id %d, but found %d", id, n.Id) - } - - if err != nil { - glog.Errorf("sync volume %d data: %v", req.VolumdId, err) - } - - const blockSizeLimit = 1024 * 1024 * 2 - for i := 0; i < len(content); i += blockSizeLimit { - blockSize := len(content) - i - if blockSize > blockSizeLimit { - blockSize = blockSizeLimit - } - resp := &volume_server_pb.VolumeSyncDataResponse{} - resp.FileContent = content[i : i+blockSize] - stream.Send(resp) - } - - return nil - -} diff --git a/weed/server/volume_grpc_tail.go b/weed/server/volume_grpc_tail.go new file mode 100644 index 000000000..2dde5b69c --- /dev/null +++ b/weed/server/volume_grpc_tail.go @@ -0,0 +1,136 @@ +package weed_server + +import ( + "context" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/super_block" +) + +func (vs *VolumeServer) VolumeTailSender(req *volume_server_pb.VolumeTailSenderRequest, stream volume_server_pb.VolumeServer_VolumeTailSenderServer) error { + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("not found volume id %d", req.VolumeId) + } + + defer glog.V(1).Infof("tailing volume %d finished", v.Id) + + lastTimestampNs := req.SinceNs + drainingSeconds := req.IdleTimeoutSeconds + + for { + lastProcessedTimestampNs, err := sendNeedlesSince(stream, v, lastTimestampNs) + if err != nil { + glog.Infof("sendNeedlesSince: %v", err) + return fmt.Errorf("streamFollow: %v", err) + } + time.Sleep(2 * time.Second) + + if req.IdleTimeoutSeconds == 0 { + lastTimestampNs = lastProcessedTimestampNs + continue + } + if lastProcessedTimestampNs == lastTimestampNs { + drainingSeconds-- + if drainingSeconds <= 0 { + return nil + } + glog.V(1).Infof("tailing volume %d drains requests with %d seconds remaining", v.Id, drainingSeconds) + } else { + lastTimestampNs = lastProcessedTimestampNs + drainingSeconds = req.IdleTimeoutSeconds + glog.V(1).Infof("tailing volume %d resets draining wait time to %d seconds", v.Id, drainingSeconds) + } + + } + +} + +func sendNeedlesSince(stream volume_server_pb.VolumeServer_VolumeTailSenderServer, v *storage.Volume, lastTimestampNs uint64) (lastProcessedTimestampNs uint64, err error) { + + foundOffset, isLastOne, err := v.BinarySearchByAppendAtNs(lastTimestampNs) + if err != nil { + return 0, fmt.Errorf("fail to locate by appendAtNs %d: %s", lastTimestampNs, err) + } + + // log.Printf("reading ts %d offset %d isLast %v", lastTimestampNs, foundOffset, isLastOne) + + if isLastOne { + // need to heart beat to the client to ensure the connection health + sendErr := stream.Send(&volume_server_pb.VolumeTailSenderResponse{IsLastChunk: true}) + return lastTimestampNs, sendErr + } + + scanner := &VolumeFileScanner4Tailing{ + stream: stream, + } + + err = storage.ScanVolumeFileFrom(v.Version(), v.DataBackend, foundOffset.ToAcutalOffset(), scanner) + + return scanner.lastProcessedTimestampNs, err + +} + +func (vs *VolumeServer) VolumeTailReceiver(ctx context.Context, req *volume_server_pb.VolumeTailReceiverRequest) (*volume_server_pb.VolumeTailReceiverResponse, error) { + + resp := &volume_server_pb.VolumeTailReceiverResponse{} + + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return resp, fmt.Errorf("receiver not found volume id %d", req.VolumeId) + } + + defer glog.V(1).Infof("receive tailing volume %d finished", v.Id) + + return resp, operation.TailVolumeFromSource(req.SourceVolumeServer, vs.grpcDialOption, v.Id, req.SinceNs, int(req.IdleTimeoutSeconds), func(n *needle.Needle) error { + _, err := vs.store.WriteVolumeNeedle(v.Id, n, false) + return err + }) + +} + +// generate the volume idx +type VolumeFileScanner4Tailing struct { + stream volume_server_pb.VolumeServer_VolumeTailSenderServer + lastProcessedTimestampNs uint64 +} + +func (scanner *VolumeFileScanner4Tailing) VisitSuperBlock(superBlock super_block.SuperBlock) error { + return nil + +} +func (scanner *VolumeFileScanner4Tailing) ReadNeedleBody() bool { + return true +} + +func (scanner *VolumeFileScanner4Tailing) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { + isLastChunk := false + + // need to send body by chunks + for i := 0; i < len(needleBody); i += BufferSizeLimit { + stopOffset := i + BufferSizeLimit + if stopOffset >= len(needleBody) { + isLastChunk = true + stopOffset = len(needleBody) + } + + sendErr := scanner.stream.Send(&volume_server_pb.VolumeTailSenderResponse{ + NeedleHeader: needleHeader, + NeedleBody: needleBody[i:stopOffset], + IsLastChunk: isLastChunk, + }) + if sendErr != nil { + return sendErr + } + } + + scanner.lastProcessedTimestampNs = n.AppendAtNs + return nil +} diff --git a/weed/server/volume_grpc_tier_download.go b/weed/server/volume_grpc_tier_download.go new file mode 100644 index 000000000..7b3982e40 --- /dev/null +++ b/weed/server/volume_grpc_tier_download.go @@ -0,0 +1,85 @@ +package weed_server + +import ( + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +// VolumeTierMoveDatFromRemote copy dat file from a remote tier to local volume server +func (vs *VolumeServer) VolumeTierMoveDatFromRemote(req *volume_server_pb.VolumeTierMoveDatFromRemoteRequest, stream volume_server_pb.VolumeServer_VolumeTierMoveDatFromRemoteServer) error { + + // find existing volume + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("volume %d not found", req.VolumeId) + } + + // verify the collection + if v.Collection != req.Collection { + return fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // locate the disk file + storageName, storageKey := v.RemoteStorageNameKey() + if storageName == "" || storageKey == "" { + return fmt.Errorf("volume %d is already on local disk", req.VolumeId) + } + + // check whether the local .dat already exists + _, ok := v.DataBackend.(*backend.DiskFile) + if ok { + return fmt.Errorf("volume %d is already on local disk", req.VolumeId) + } + + // check valid storage backend type + backendStorage, found := backend.BackendStorages[storageName] + if !found { + var keys []string + for key := range backend.BackendStorages { + keys = append(keys, key) + } + return fmt.Errorf("remote storage %s not found from suppported: %v", storageName, keys) + } + + startTime := time.Now() + fn := func(progressed int64, percentage float32) error { + now := time.Now() + if now.Sub(startTime) < time.Second { + return nil + } + startTime = now + return stream.Send(&volume_server_pb.VolumeTierMoveDatFromRemoteResponse{ + Processed: progressed, + ProcessedPercentage: percentage, + }) + } + // copy the data file + _, err := backendStorage.DownloadFile(v.FileName()+".dat", storageKey, fn) + if err != nil { + return fmt.Errorf("backend %s copy file %s: %v", storageName, v.FileName()+".dat", err) + } + + if req.KeepRemoteDatFile { + return nil + } + + // remove remote file + if err := backendStorage.DeleteFile(storageKey); err != nil { + return fmt.Errorf("volume %d fail to delete remote file %s: %v", v.Id, storageKey, err) + } + + // forget remote file + v.GetVolumeInfo().Files = v.GetVolumeInfo().Files[1:] + if err := v.SaveVolumeInfo(); err != nil { + return fmt.Errorf("volume %d fail to save remote file info: %v", v.Id, err) + } + + v.DataBackend.Close() + v.DataBackend = nil + + return nil +} diff --git a/weed/server/volume_grpc_tier_upload.go b/weed/server/volume_grpc_tier_upload.go new file mode 100644 index 000000000..c9694df59 --- /dev/null +++ b/weed/server/volume_grpc_tier_upload.go @@ -0,0 +1,100 @@ +package weed_server + +import ( + "fmt" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +// VolumeTierMoveDatToRemote copy dat file to a remote tier +func (vs *VolumeServer) VolumeTierMoveDatToRemote(req *volume_server_pb.VolumeTierMoveDatToRemoteRequest, stream volume_server_pb.VolumeServer_VolumeTierMoveDatToRemoteServer) error { + + // find existing volume + v := vs.store.GetVolume(needle.VolumeId(req.VolumeId)) + if v == nil { + return fmt.Errorf("volume %d not found", req.VolumeId) + } + + // verify the collection + if v.Collection != req.Collection { + return fmt.Errorf("existing collection:%v unexpected input: %v", v.Collection, req.Collection) + } + + // locate the disk file + diskFile, ok := v.DataBackend.(*backend.DiskFile) + if !ok { + return fmt.Errorf("volume %d is not on local disk", req.VolumeId) + } + + // check valid storage backend type + backendStorage, found := backend.BackendStorages[req.DestinationBackendName] + if !found { + var keys []string + for key := range backend.BackendStorages { + keys = append(keys, key) + } + return fmt.Errorf("destination %s not found, suppported: %v", req.DestinationBackendName, keys) + } + + // check whether the existing backend storage is the same as requested + // if same, skip + backendType, backendId := backend.BackendNameToTypeId(req.DestinationBackendName) + for _, remoteFile := range v.GetVolumeInfo().GetFiles() { + if remoteFile.BackendType == backendType && remoteFile.BackendId == backendId { + return fmt.Errorf("destination %s already exists", req.DestinationBackendName) + } + } + + startTime := time.Now() + fn := func(progressed int64, percentage float32) error { + now := time.Now() + if now.Sub(startTime) < time.Second { + return nil + } + startTime = now + return stream.Send(&volume_server_pb.VolumeTierMoveDatToRemoteResponse{ + Processed: progressed, + ProcessedPercentage: percentage, + }) + } + + // remember the file original source + attributes := make(map[string]string) + attributes["volumeId"] = v.Id.String() + attributes["collection"] = v.Collection + attributes["ext"] = ".dat" + // copy the data file + key, size, err := backendStorage.CopyFile(diskFile.File, attributes, fn) + if err != nil { + return fmt.Errorf("backend %s copy file %s: %v", req.DestinationBackendName, diskFile.Name(), err) + } + + // save the remote file to volume tier info + v.GetVolumeInfo().Files = append(v.GetVolumeInfo().GetFiles(), &volume_server_pb.RemoteFile{ + BackendType: backendType, + BackendId: backendId, + Key: key, + Offset: 0, + FileSize: uint64(size), + ModifiedTime: uint64(time.Now().Unix()), + Extension: ".dat", + }) + + if err := v.SaveVolumeInfo(); err != nil { + return fmt.Errorf("volume %d fail to save remote file info: %v", v.Id, err) + } + + if err := v.LoadRemoteFile(); err != nil { + return fmt.Errorf("volume %d fail to load remote file: %v", v.Id, err) + } + + if !req.KeepLocalDatFile { + os.Remove(v.FileName() + ".dat") + } + + return nil +} diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go index f0c87b582..b87de4b5b 100644 --- a/weed/server/volume_grpc_vacuum.go +++ b/weed/server/volume_grpc_vacuum.go @@ -5,19 +5,19 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/storage/needle" ) func (vs *VolumeServer) VacuumVolumeCheck(ctx context.Context, req *volume_server_pb.VacuumVolumeCheckRequest) (*volume_server_pb.VacuumVolumeCheckResponse, error) { resp := &volume_server_pb.VacuumVolumeCheckResponse{} - garbageRatio, err := vs.store.CheckCompactVolume(storage.VolumeId(req.VolumdId)) + garbageRatio, err := vs.store.CheckCompactVolume(needle.VolumeId(req.VolumeId)) resp.GarbageRatio = garbageRatio if err != nil { - glog.V(3).Infof("check volume %d: %v", req.VolumdId, err) + glog.V(3).Infof("check volume %d: %v", req.VolumeId, err) } return resp, err @@ -28,12 +28,12 @@ func (vs *VolumeServer) VacuumVolumeCompact(ctx context.Context, req *volume_ser resp := &volume_server_pb.VacuumVolumeCompactResponse{} - err := vs.store.CompactVolume(storage.VolumeId(req.VolumdId), req.Preallocate) + err := vs.store.CompactVolume(needle.VolumeId(req.VolumeId), req.Preallocate, vs.compactionBytePerSecond) if err != nil { - glog.Errorf("compact volume %d: %v", req.VolumdId, err) + glog.Errorf("compact volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("compact volume %d", req.VolumdId) + glog.V(1).Infof("compact volume %d", req.VolumeId) } return resp, err @@ -44,12 +44,17 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv resp := &volume_server_pb.VacuumVolumeCommitResponse{} - err := vs.store.CommitCompactVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("commit volume %d: %v", req.VolumdId, err) + glog.Errorf("commit volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("commit volume %d", req.VolumdId) + glog.V(1).Infof("commit volume %d", req.VolumeId) + } + if err == nil { + if vs.store.GetVolume(needle.VolumeId(req.VolumeId)).IsReadOnly() { + resp.IsReadOnly = true + } } return resp, err @@ -60,12 +65,12 @@ func (vs *VolumeServer) VacuumVolumeCleanup(ctx context.Context, req *volume_ser resp := &volume_server_pb.VacuumVolumeCleanupResponse{} - err := vs.store.CommitCleanupVolume(storage.VolumeId(req.VolumdId)) + err := vs.store.CommitCleanupVolume(needle.VolumeId(req.VolumeId)) if err != nil { - glog.Errorf("cleanup volume %d: %v", req.VolumdId, err) + glog.Errorf("cleanup volume %d: %v", req.VolumeId, err) } else { - glog.V(1).Infof("cleanup volume %d", req.VolumdId) + glog.V(1).Infof("cleanup volume %d", req.VolumeId) } return resp, err diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go index 0914e81b0..b7ed81be0 100644 --- a/weed/server/volume_server.go +++ b/weed/server/volume_server.go @@ -1,55 +1,84 @@ package weed_server import ( + "fmt" "net/http" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage" ) type VolumeServer struct { - MasterNodes []string - currentMaster string - pulseSeconds int - dataCenter string - rack string - store *storage.Store - guard *security.Guard + SeedMasterNodes []string + currentMaster string + pulseSeconds int + dataCenter string + rack string + store *storage.Store + guard *security.Guard + grpcDialOption grpc.DialOption - needleMapKind storage.NeedleMapType - FixJpgOrientation bool - ReadRedirect bool + needleMapKind storage.NeedleMapType + ReadRedirect bool + compactionBytePerSecond int64 + MetricsAddress string + MetricsIntervalSec int + fileSizeLimitBytes int64 } func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, port int, publicUrl string, - folders []string, maxCounts []int, + folders []string, maxCounts []int, minFreeSpacePercents []float32, needleMapKind storage.NeedleMapType, masterNodes []string, pulseSeconds int, dataCenter string, rack string, whiteList []string, - fixJpgOrientation bool, - readRedirect bool) *VolumeServer { + readRedirect bool, + compactionMBPerSecond int, + fileSizeLimitMB int, +) *VolumeServer { + + v := util.GetViper() + signingKey := v.GetString("jwt.signing.key") + v.SetDefault("jwt.signing.expires_after_seconds", 10) + expiresAfterSec := v.GetInt("jwt.signing.expires_after_seconds") + enableUiAccess := v.GetBool("access.ui") + + readSigningKey := v.GetString("jwt.signing.read.key") + v.SetDefault("jwt.signing.read.expires_after_seconds", 60) + readExpiresAfterSec := v.GetInt("jwt.signing.read.expires_after_seconds") + vs := &VolumeServer{ - pulseSeconds: pulseSeconds, - dataCenter: dataCenter, - rack: rack, - needleMapKind: needleMapKind, - FixJpgOrientation: fixJpgOrientation, - ReadRedirect: readRedirect, + pulseSeconds: pulseSeconds, + dataCenter: dataCenter, + rack: rack, + needleMapKind: needleMapKind, + ReadRedirect: readRedirect, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.volume"), + compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024, + fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024, } - vs.MasterNodes = masterNodes - vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) - - vs.guard = security.NewGuard(whiteList, "") + vs.SeedMasterNodes = masterNodes + vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpacePercents, vs.needleMapKind) + vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec) handleStaticResources(adminMux) - adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler) - adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler)) - adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) - adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) - adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + if signingKey == "" || enableUiAccess { + // only expose the volume server details for safe environments + adminMux.HandleFunc("/ui/index.html", vs.uiStatusHandler) + adminMux.HandleFunc("/status", vs.guard.WhiteList(vs.statusHandler)) + /* + adminMux.HandleFunc("/stats/counter", vs.guard.WhiteList(statsCounterHandler)) + adminMux.HandleFunc("/stats/memory", vs.guard.WhiteList(statsMemoryHandler)) + adminMux.HandleFunc("/stats/disk", vs.guard.WhiteList(vs.statsDiskHandler)) + */ + } adminMux.HandleFunc("/", vs.privateStoreHandler) if publicMux != adminMux { // separated admin and public port @@ -58,6 +87,11 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, } go vs.heartbeat() + hostAddress := fmt.Sprintf("%s:%d", ip, port) + go stats.LoopPushingMetric("volumeServer", hostAddress, stats.VolumeServerGather, + func() (addr string, intervalSeconds int) { + return vs.MetricsAddress, vs.MetricsIntervalSec + }) return vs } @@ -67,7 +101,3 @@ func (vs *VolumeServer) Shutdown() { vs.store.Close() glog.V(0).Infoln("Shut down successfully!") } - -func (vs *VolumeServer) jwt(fileId string) security.EncodedJwt { - return security.GenJwt(vs.guard.SecretKey, fileId) -} diff --git a/weed/server/volume_server_handlers.go b/weed/server/volume_server_handlers.go index 77b1274fd..14ad27d42 100644 --- a/weed/server/volume_server_handlers.go +++ b/weed/server/volume_server_handlers.go @@ -2,7 +2,10 @@ package weed_server import ( "net/http" + "strings" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" ) @@ -45,3 +48,47 @@ func (vs *VolumeServer) publicReadOnlyHandler(w http.ResponseWriter, r *http.Req vs.GetOrHeadHandler(w, r) } } + +func (vs *VolumeServer) maybeCheckJwtAuthorization(r *http.Request, vid, fid string, isWrite bool) bool { + + var signingKey security.SigningKey + + if isWrite { + if len(vs.guard.SigningKey) == 0 { + return true + } else { + signingKey = vs.guard.SigningKey + } + } else { + if len(vs.guard.ReadSigningKey) == 0 { + return true + } else { + signingKey = vs.guard.ReadSigningKey + } + } + + tokenStr := security.GetJwt(r) + if tokenStr == "" { + glog.V(1).Infof("missing jwt from %s", r.RemoteAddr) + return false + } + + token, err := security.DecodeJwt(signingKey, tokenStr) + if err != nil { + glog.V(1).Infof("jwt verification error from %s: %v", r.RemoteAddr, err) + return false + } + if !token.Valid { + glog.V(1).Infof("jwt invalid from %s: %v", r.RemoteAddr, tokenStr) + return false + } + + if sc, ok := token.Claims.(*security.SeaweedFileIdClaims); ok { + if sepIndex := strings.LastIndex(fid, "_"); sepIndex > 0 { + fid = fid[:sepIndex] + } + return sc.Fid == vid+","+fid + } + glog.V(1).Infof("unexpected jwt from %s: %v", r.RemoteAddr, tokenStr) + return false +} diff --git a/weed/server/volume_server_handlers_admin.go b/weed/server/volume_server_handlers_admin.go index 25b6582f7..34655d833 100644 --- a/weed/server/volume_server_handlers_admin.go +++ b/weed/server/volume_server_handlers_admin.go @@ -11,14 +11,21 @@ import ( func (vs *VolumeServer) statusHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION - m["Volumes"] = vs.store.Status() + m["Version"] = util.Version() + var ds []*volume_server_pb.DiskStatus + for _, loc := range vs.store.Locations { + if dir, e := filepath.Abs(loc.Directory); e == nil { + ds = append(ds, stats.NewDiskStatus(dir)) + } + } + m["DiskStatuses"] = ds + m["Volumes"] = vs.store.VolumeInfos() writeJsonQuiet(w, r, http.StatusOK, m) } func (vs *VolumeServer) statsDiskHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() var ds []*volume_server_pb.DiskStatus for _, loc := range vs.store.Locations { if dir, e := filepath.Abs(loc.Directory); e == nil { diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go index 92c728141..7ef1170b3 100644 --- a/weed/server/volume_server_handlers_read.go +++ b/weed/server/volume_server_handlers_read.go @@ -2,45 +2,59 @@ package weed_server import ( "bytes" + "encoding/json" + "errors" + "fmt" "io" "mime" - "mime/multipart" "net/http" "net/url" - "path" + "path/filepath" "strconv" "strings" "time" - "encoding/json" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/images" "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) { - n := new(storage.Needle) + + stats.VolumeServerRequestCounter.WithLabelValues("get").Inc() + start := time.Now() + defer func() { stats.VolumeServerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) }() + + n := new(needle.Needle) vid, fid, filename, ext, _ := parseURLPath(r.URL.Path) - volumeId, err := storage.NewVolumeId(vid) + + if !vs.maybeCheckJwtAuthorization(r, vid, fid, false) { + writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt")) + return + } + + volumeId, err := needle.NewVolumeId(vid) if err != nil { - glog.V(2).Infoln("parsing error:", err, r.URL.Path) + glog.V(2).Infof("parsing vid %s: %v", r.URL.Path, err) w.WriteHeader(http.StatusBadRequest) return } err = n.ParsePath(fid) if err != nil { - glog.V(2).Infoln("parsing fid error:", err, r.URL.Path) + glog.V(2).Infof("parsing fid %s: %v", r.URL.Path, err) w.WriteHeader(http.StatusBadRequest) return } - glog.V(4).Infoln("volume", volumeId, "reading", n) - if !vs.store.HasVolume(volumeId) { + // glog.V(4).Infoln("volume", volumeId, "reading", n) + hasVolume := vs.store.HasVolume(volumeId) + _, hasEcVolume := vs.store.FindEcVolume(volumeId) + if !hasVolume && !hasEcVolume { if !vs.ReadRedirect { glog.V(2).Infoln("volume is not local:", err, r.URL.Path) w.WriteHeader(http.StatusNotFound) @@ -50,7 +64,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err) if err == nil && len(lookupResult.Locations) > 0 { u, _ := url.Parse(util.NormalizeUrl(lookupResult.Locations[0].PublicUrl)) - u.Path = r.URL.Path + u.Path = fmt.Sprintf("%s/%s,%s", u.Path, vid, fid) arg := url.Values{} if c := r.FormValue("collection"); c != "" { arg.Set("collection", c) @@ -65,10 +79,15 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) return } cookie := n.Cookie - count, e := vs.store.ReadVolumeNeedle(volumeId, n) - glog.V(4).Infoln("read bytes", count, "error", e) - if e != nil || count < 0 { - glog.V(0).Infof("read %s error: %v", r.URL.Path, e) + var count int + if hasVolume { + count, err = vs.store.ReadVolumeNeedle(volumeId, n) + } else if hasEcVolume { + count, err = vs.store.ReadEcShardNeedle(volumeId, n) + } + // glog.V(4).Infoln("read bytes", count, "error", err) + if err != nil || count < 0 { + glog.V(0).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err) w.WriteHeader(http.StatusNotFound) return } @@ -92,11 +111,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) w.WriteHeader(http.StatusNotModified) return } - if r.Header.Get("ETag-MD5") == "True" { - setEtag(w, n.MD5()) - } else { - setEtag(w, n.Etag()) - } + setEtag(w, n.Etag()) if n.HasPairs() { pairMap := make(map[string]string) @@ -109,14 +124,14 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } } - if vs.tryHandleChunkedFile(n, filename, w, r) { + if vs.tryHandleChunkedFile(n, filename, ext, w, r) { return } if n.NameSize > 0 && filename == "" { filename = string(n.Name) if ext == "" { - ext = path.Ext(filename) + ext = filepath.Ext(filename) } } mtype := "" @@ -127,13 +142,19 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } } - if ext != ".gz" { - if n.IsGzipped() { - if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { + if ext != ".gz" && ext != ".zst" { + if n.IsCompressed() { + if _, _, _, shouldResize := shouldResizeImages(ext, r); shouldResize { + if n.Data, err = util.DecompressData(n.Data); err != nil { + glog.V(0).Infoln("ungzip error:", err, r.URL.Path) + } + } else if strings.Contains(r.Header.Get("Accept-Encoding"), "zstd") && util.IsZstdContent(n.Data) { + w.Header().Set("Content-Encoding", "zstd") + } else if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") && util.IsGzippedContent(n.Data) { w.Header().Set("Content-Encoding", "gzip") } else { - if n.Data, err = operation.UnGzipData(n.Data); err != nil { - glog.V(0).Infoln("ungzip error:", err, r.URL.Path) + if n.Data, err = util.DecompressData(n.Data); err != nil { + glog.V(0).Infoln("uncompress error:", err, r.URL.Path) } } } @@ -146,12 +167,12 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) } } -func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w http.ResponseWriter, r *http.Request) (processed bool) { +func (vs *VolumeServer) tryHandleChunkedFile(n *needle.Needle, fileName string, ext string, w http.ResponseWriter, r *http.Request) (processed bool) { if !n.IsChunkedManifest() || r.URL.Query().Get("cm") == "false" { return false } - chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped()) + chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsCompressed()) if e != nil { glog.V(0).Infof("load chunked manifest (%s) error: %v", r.URL.Path, e) return false @@ -160,7 +181,9 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, fileName = chunkManifest.Name } - ext := path.Ext(fileName) + if ext == "" { + ext = filepath.Ext(fileName) + } mType := "" if chunkManifest.Mime != "" { @@ -172,10 +195,7 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, w.Header().Set("X-File-Store", "chunked") - chunkedFileReader := &operation.ChunkedFileReader{ - Manifest: chunkManifest, - Master: vs.GetMaster(), - } + chunkedFileReader := operation.NewChunkedFileReader(chunkManifest.Chunks, vs.GetMaster()) defer chunkedFileReader.Close() rs := conditionallyResizeImages(chunkedFileReader, ext, r) @@ -188,132 +208,56 @@ func (vs *VolumeServer) tryHandleChunkedFile(n *storage.Needle, fileName string, func conditionallyResizeImages(originalDataReaderSeeker io.ReadSeeker, ext string, r *http.Request) io.ReadSeeker { rs := originalDataReaderSeeker + + width, height, mode, shouldResize := shouldResizeImages(ext, r) + if shouldResize { + rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, mode) + } + return rs +} + +func shouldResizeImages(ext string, r *http.Request) (width, height int, mode string, shouldResize bool) { if len(ext) > 0 { ext = strings.ToLower(ext) } if ext == ".png" || ext == ".jpg" || ext == ".jpeg" || ext == ".gif" { - width, height := 0, 0 if r.FormValue("width") != "" { width, _ = strconv.Atoi(r.FormValue("width")) } if r.FormValue("height") != "" { height, _ = strconv.Atoi(r.FormValue("height")) } - rs, _, _ = images.Resized(ext, originalDataReaderSeeker, width, height, r.FormValue("mode")) } - return rs + mode = r.FormValue("mode") + shouldResize = width > 0 || height > 0 + return } func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.ResponseWriter, r *http.Request) error { totalSize, e := rs.Seek(0, 2) if mimeType == "" { - if ext := path.Ext(filename); ext != "" { + if ext := filepath.Ext(filename); ext != "" { mimeType = mime.TypeByExtension(ext) } } if mimeType != "" { w.Header().Set("Content-Type", mimeType) } - if filename != "" { - contentDisposition := "inline" - if r.FormValue("dl") != "" { - if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl { - contentDisposition = "attachment" - } - } - w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`) - } w.Header().Set("Accept-Ranges", "bytes") + if r.Method == "HEAD" { w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) return nil } - rangeReq := r.Header.Get("Range") - if rangeReq == "" { - w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if _, e = rs.Seek(0, 0); e != nil { - return e - } - _, e = io.Copy(w, rs) - return e - } - //the rest is dealing with partial content request - //mostly copy from src/pkg/net/http/fs.go - ranges, err := parseRange(rangeReq, totalSize) - if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return nil - } - if sumRangesSize(ranges) > totalSize { - // The total number of bytes in all the ranges - // is larger than the size of the file by - // itself, so this is probably an attack, or a - // dumb client. Ignore the range request. - return nil - } - if len(ranges) == 0 { - return nil - } - if len(ranges) == 1 { - // RFC 2616, Section 14.16: - // "When an HTTP message includes the content of a single - // range (for example, a response to a request for a - // single range, or to a request for a set of ranges - // that overlap without any holes), this content is - // transmitted with a Content-Range header, and a - // Content-Length header showing the number of bytes - // actually transferred. - // ... - // A response to a request for a single range MUST NOT - // be sent using the multipart/byteranges media type." - ra := ranges[0] - w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) - w.Header().Set("Content-Range", ra.contentRange(totalSize)) - w.WriteHeader(http.StatusPartialContent) - if _, e = rs.Seek(ra.start, 0); e != nil { + adjustHeadersAfterHEAD(w, r, filename) + + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + if _, e = rs.Seek(offset, 0); e != nil { return e } - - _, e = io.CopyN(w, rs, ra.length) + _, e = io.CopyN(writer, rs, size) return e - } - // process multiple ranges - for _, ra := range ranges { - if ra.start > totalSize { - http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return nil - } - } - sendSize := rangesMIMESize(ranges, mimeType, totalSize) - pr, pw := io.Pipe() - mw := multipart.NewWriter(pw) - w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) - sendContent := pr - defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. - go func() { - for _, ra := range ranges { - part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) - if e != nil { - pw.CloseWithError(e) - return - } - if _, e = rs.Seek(ra.start, 0); e != nil { - pw.CloseWithError(e) - return - } - if _, e = io.CopyN(part, rs, ra.length); e != nil { - pw.CloseWithError(e) - return - } - } - mw.Close() - pw.Close() - }() - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - w.WriteHeader(http.StatusPartialContent) - _, e = io.CopyN(w, sendContent, sendSize) - return e + }) + return nil } diff --git a/weed/server/volume_server_handlers_ui.go b/weed/server/volume_server_handlers_ui.go index b3d9a21fd..8b2027e7b 100644 --- a/weed/server/volume_server_handlers_ui.go +++ b/weed/server/volume_server_handlers_ui.go @@ -8,6 +8,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ui "github.com/chrislusf/seaweedfs/weed/server/volume_server_ui" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -20,17 +21,30 @@ func (vs *VolumeServer) uiStatusHandler(w http.ResponseWriter, r *http.Request) ds = append(ds, stats.NewDiskStatus(dir)) } } + volumeInfos := vs.store.VolumeInfos() + var normalVolumeInfos, remoteVolumeInfos []*storage.VolumeInfo + for _, vinfo := range volumeInfos { + if vinfo.IsRemote() { + remoteVolumeInfos = append(remoteVolumeInfos, vinfo) + } else { + normalVolumeInfos = append(normalVolumeInfos, vinfo) + } + } args := struct { - Version string - Masters []string - Volumes interface{} - DiskStatuses interface{} - Stats interface{} - Counters *stats.ServerStats + Version string + Masters []string + Volumes interface{} + EcVolumes interface{} + RemoteVolumes interface{} + DiskStatuses interface{} + Stats interface{} + Counters *stats.ServerStats }{ - util.VERSION, - vs.MasterNodes, - vs.store.Status(), + util.Version(), + vs.SeedMasterNodes, + normalVolumeInfos, + vs.store.EcVolumes(), + remoteVolumeInfos, ds, infos, serverStats, diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go index fd93142e1..5ece46ed0 100644 --- a/weed/server/volume_server_handlers_write.go +++ b/weed/server/volume_server_handlers_write.go @@ -10,56 +10,99 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/storage" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/topology" ) func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) { + + stats.VolumeServerRequestCounter.WithLabelValues("post").Inc() + start := time.Now() + defer func() { + stats.VolumeServerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) + }() + if e := r.ParseForm(); e != nil { glog.V(0).Infoln("form parse error:", e) writeJsonError(w, r, http.StatusBadRequest, e) return } - vid, _, _, _, _ := parseURLPath(r.URL.Path) - volumeId, ve := storage.NewVolumeId(vid) + + vid, fid, _, _, _ := parseURLPath(r.URL.Path) + volumeId, ve := needle.NewVolumeId(vid) if ve != nil { glog.V(0).Infoln("NewVolumeId error:", ve) writeJsonError(w, r, http.StatusBadRequest, ve) return } - needle, originalSize, ne := storage.CreateNeedleFromRequest(r, vs.FixJpgOrientation) + + if !vs.maybeCheckJwtAuthorization(r, vid, fid, true) { + writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt")) + return + } + + reqNeedle, originalSize, ne := needle.CreateNeedleFromRequest(r, vs.fileSizeLimitBytes) if ne != nil { writeJsonError(w, r, http.StatusBadRequest, ne) return } ret := operation.UploadResult{} - _, errorStatus := topology.ReplicatedWrite(vs.GetMaster(), - vs.store, volumeId, needle, r) + isUnchanged, writeError := topology.ReplicatedWrite(vs.GetMaster(), vs.store, volumeId, reqNeedle, r) + + // http 204 status code does not allow body + if writeError == nil && isUnchanged { + setEtag(w, reqNeedle.Etag()) + w.WriteHeader(http.StatusNoContent) + return + } + httpStatus := http.StatusCreated - if errorStatus != "" { + if writeError != nil { httpStatus = http.StatusInternalServerError - ret.Error = errorStatus + ret.Error = writeError.Error() } - if needle.HasName() { - ret.Name = string(needle.Name) + if reqNeedle.HasName() { + ret.Name = string(reqNeedle.Name) } ret.Size = uint32(originalSize) - ret.ETag = needle.Etag() + ret.ETag = reqNeedle.Etag() + ret.Mime = string(reqNeedle.Mime) setEtag(w, ret.ETag) writeJsonQuiet(w, r, httpStatus, ret) } func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { - n := new(storage.Needle) + + stats.VolumeServerRequestCounter.WithLabelValues("delete").Inc() + start := time.Now() + defer func() { + stats.VolumeServerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds()) + }() + + n := new(needle.Needle) vid, fid, _, _, _ := parseURLPath(r.URL.Path) - volumeId, _ := storage.NewVolumeId(vid) + volumeId, _ := needle.NewVolumeId(vid) n.ParsePath(fid) + if !vs.maybeCheckJwtAuthorization(r, vid, fid, true) { + writeJsonError(w, r, http.StatusUnauthorized, errors.New("wrong jwt")) + return + } + // glog.V(2).Infof("volume %s deleting %s", vid, n) cookie := n.Cookie + ecVolume, hasEcVolume := vs.store.FindEcVolume(volumeId) + + if hasEcVolume { + count, err := vs.store.DeleteEcShardNeedle(ecVolume, n, cookie) + writeDeleteResult(err, count, w, r) + return + } + _, ok := vs.store.ReadVolumeNeedle(volumeId, n) if ok != nil { m := make(map[string]uint32) @@ -77,13 +120,13 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { count := int64(n.Size) if n.IsChunkedManifest() { - chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsGzipped()) + chunkManifest, e := operation.LoadChunkManifest(n.Data, n.IsCompressed()) if e != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Load chunks manifest error: %v", e)) return } // make sure all chunks had deleted before delete manifest - if e := chunkManifest.DeleteChunks(vs.GetMaster()); e != nil { + if e := chunkManifest.DeleteChunks(vs.GetMaster(), false, vs.grpcDialOption); e != nil { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e)) return } @@ -100,6 +143,11 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { _, err := topology.ReplicatedDelete(vs.GetMaster(), vs.store, volumeId, n, r) + writeDeleteResult(err, count, w, r) + +} + +func writeDeleteResult(err error, count int64, w http.ResponseWriter, r *http.Request) { if err == nil { m := make(map[string]int64) m["size"] = count @@ -107,7 +155,6 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { } else { writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Deletion Failed: %v", err)) } - } func setEtag(w http.ResponseWriter, etag string) { @@ -119,3 +166,11 @@ func setEtag(w http.ResponseWriter, etag string) { } } } + +func getEtag(resp *http.Response) (etag string) { + etag = resp.Header.Get("ETag") + if strings.HasPrefix(etag, "\"") && strings.HasSuffix(etag, "\"") { + return etag[1 : len(etag)-1] + } + return +} diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go index b9740510f..8705bc088 100644 --- a/weed/server/volume_server_ui/templates.go +++ b/weed/server/volume_server_ui/templates.go @@ -1,11 +1,17 @@ package master_ui import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" "html/template" "strconv" "strings" ) +func percentFrom(total uint64, part_of uint64) string { + return fmt.Sprintf("%.2f", (float64(part_of)/float64(total))*100) +} + func join(data []int64) string { var ret []string for _, d := range data { @@ -15,7 +21,9 @@ func join(data []int64) string { } var funcMap = template.FuncMap{ - "join": join, + "join": join, + "bytesToHumanReadable": util.BytesToHumanReadable, + "percentFrom": percentFrom, } var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` @@ -57,13 +65,25 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`

Disk Stats

- +
+ + + + + + + + + {{ range .DiskStatuses }} - - + + + + {{ end }} +
PathTotalFreeUsage
{{ .Dir }}{{ .Free }} Bytes Free{{ .Dir }}{{ bytesToHumanReadable .All }}{{ bytesToHumanReadable .Free }}{{ percentFrom .All .Used}}%
@@ -107,10 +127,11 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` Id Collection - Size + Data Size Files Trash TTL + ReadOnly @@ -118,10 +139,67 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` {{ .Id }} {{ .Collection }} - {{ .Size }} Bytes + {{ bytesToHumanReadable .Size }} {{ .FileCount }} - {{ .DeleteCount }} / {{.DeletedByteCount}} Bytes + {{ .DeleteCount }} / {{bytesToHumanReadable .DeletedByteCount}} {{ .Ttl }} + {{ .ReadOnly }} + + {{ end }} + + +
+ +
+

Remote Volumes

+ + + + + + + + + + + + + + {{ range .RemoteVolumes }} + + + + + + + + + + {{ end }} + +
IdCollectionSizeFilesTrashRemoteKey
{{ .Id }}{{ .Collection }}{{ bytesToHumanReadable .Size }}{{ .FileCount }}{{ .DeleteCount }} / {{bytesToHumanReadable .DeletedByteCount}}{{ .RemoteStorageName }}{{ .RemoteStorageKey }}
+
+ +
+

Erasure Coding Shards

+ + + + + + + + + + + + {{ range .EcVolumes }} + + + + + + {{ end }} diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go new file mode 100644 index 000000000..e8bedd352 --- /dev/null +++ b/weed/server/webdav_server.go @@ -0,0 +1,573 @@ +package weed_server + +import ( + "context" + "fmt" + "io" + "math" + "os" + "path" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + "golang.org/x/net/webdav" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/security" +) + +type WebDavOption struct { + Filer string + FilerGrpcAddress string + DomainName string + BucketsPath string + GrpcDialOption grpc.DialOption + Collection string + Uid uint32 + Gid uint32 + Cipher bool + CacheDir string + CacheSizeMB int64 +} + +type WebDavServer struct { + option *WebDavOption + secret security.SigningKey + filer *filer2.Filer + grpcDialOption grpc.DialOption + Handler *webdav.Handler +} + +func NewWebDavServer(option *WebDavOption) (ws *WebDavServer, err error) { + + fs, _ := NewWebDavFileSystem(option) + + ws = &WebDavServer{ + option: option, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), + Handler: &webdav.Handler{ + FileSystem: fs, + LockSystem: webdav.NewMemLS(), + }, + } + + return ws, nil +} + +// adapted from https://github.com/mattn/davfs/blob/master/plugin/mysql/mysql.go + +type WebDavFileSystem struct { + option *WebDavOption + secret security.SigningKey + filer *filer2.Filer + grpcDialOption grpc.DialOption + chunkCache *chunk_cache.ChunkCache +} + +type FileInfo struct { + name string + size int64 + mode os.FileMode + modifiledTime time.Time + isDirectory bool +} + +func (fi *FileInfo) Name() string { return fi.name } +func (fi *FileInfo) Size() int64 { return fi.size } +func (fi *FileInfo) Mode() os.FileMode { return fi.mode } +func (fi *FileInfo) ModTime() time.Time { return fi.modifiledTime } +func (fi *FileInfo) IsDir() bool { return fi.isDirectory } +func (fi *FileInfo) Sys() interface{} { return nil } + +type WebDavFile struct { + fs *WebDavFileSystem + name string + isDirectory bool + off int64 + entry *filer_pb.Entry + entryViewCache []filer2.VisibleInterval + reader io.ReaderAt +} + +func NewWebDavFileSystem(option *WebDavOption) (webdav.FileSystem, error) { + + chunkCache := chunk_cache.NewChunkCache(256, option.CacheDir, option.CacheSizeMB) + grace.OnInterrupt(func() { + chunkCache.Shutdown() + }) + return &WebDavFileSystem{ + option: option, + chunkCache: chunkCache, + }, nil +} + +var _ = filer_pb.FilerClient(&WebDavFileSystem{}) + +func (fs *WebDavFileSystem) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, fs.option.FilerGrpcAddress, fs.option.GrpcDialOption) + +} +func (fs *WebDavFileSystem) AdjustedUrl(hostAndPort string) string { + return hostAndPort +} + +func clearName(name string) (string, error) { + slashed := strings.HasSuffix(name, "/") + name = path.Clean(name) + if !strings.HasSuffix(name, "/") && slashed { + name += "/" + } + if !strings.HasPrefix(name, "/") { + return "", os.ErrInvalid + } + return name, nil +} + +func (fs *WebDavFileSystem) Mkdir(ctx context.Context, fullDirPath string, perm os.FileMode) error { + + glog.V(2).Infof("WebDavFileSystem.Mkdir %v", fullDirPath) + + if !strings.HasSuffix(fullDirPath, "/") { + fullDirPath += "/" + } + + var err error + if fullDirPath, err = clearName(fullDirPath); err != nil { + return err + } + + _, err = fs.stat(ctx, fullDirPath) + if err == nil { + return os.ErrExist + } + + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + dir, name := util.FullPath(fullDirPath).DirAndName() + request := &filer_pb.CreateEntryRequest{ + Directory: dir, + Entry: &filer_pb.Entry{ + Name: name, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(perm | os.ModeDir), + Uid: fs.option.Uid, + Gid: fs.option.Gid, + }, + }, + } + + glog.V(1).Infof("mkdir: %v", request) + if err := filer_pb.CreateEntry(client, request); err != nil { + return fmt.Errorf("mkdir %s/%s: %v", dir, name, err) + } + + return nil + }) +} + +func (fs *WebDavFileSystem) OpenFile(ctx context.Context, fullFilePath string, flag int, perm os.FileMode) (webdav.File, error) { + + glog.V(2).Infof("WebDavFileSystem.OpenFile %v %x", fullFilePath, flag) + + var err error + if fullFilePath, err = clearName(fullFilePath); err != nil { + return nil, err + } + + if flag&os.O_CREATE != 0 { + // file should not have / suffix. + if strings.HasSuffix(fullFilePath, "/") { + return nil, os.ErrInvalid + } + _, err = fs.stat(ctx, fullFilePath) + if err == nil { + if flag&os.O_EXCL != 0 { + return nil, os.ErrExist + } + fs.removeAll(ctx, fullFilePath) + } + + dir, name := util.FullPath(fullFilePath).DirAndName() + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ + Directory: dir, + Entry: &filer_pb.Entry{ + Name: name, + IsDirectory: perm&os.ModeDir > 0, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(perm), + Uid: fs.option.Uid, + Gid: fs.option.Gid, + Collection: fs.option.Collection, + Replication: "000", + TtlSec: 0, + }, + }, + }); err != nil { + return fmt.Errorf("create %s: %v", fullFilePath, err) + } + return nil + }) + if err != nil { + return nil, err + } + return &WebDavFile{ + fs: fs, + name: fullFilePath, + isDirectory: false, + }, nil + } + + fi, err := fs.stat(ctx, fullFilePath) + if err != nil { + return nil, os.ErrNotExist + } + if !strings.HasSuffix(fullFilePath, "/") && fi.IsDir() { + fullFilePath += "/" + } + + return &WebDavFile{ + fs: fs, + name: fullFilePath, + isDirectory: false, + }, nil + +} + +func (fs *WebDavFileSystem) removeAll(ctx context.Context, fullFilePath string) error { + var err error + if fullFilePath, err = clearName(fullFilePath); err != nil { + return err + } + + dir, name := util.FullPath(fullFilePath).DirAndName() + + return filer_pb.Remove(fs, dir, name, true, false, false, false) + +} + +func (fs *WebDavFileSystem) RemoveAll(ctx context.Context, name string) error { + + glog.V(2).Infof("WebDavFileSystem.RemoveAll %v", name) + + return fs.removeAll(ctx, name) +} + +func (fs *WebDavFileSystem) Rename(ctx context.Context, oldName, newName string) error { + + glog.V(2).Infof("WebDavFileSystem.Rename %v to %v", oldName, newName) + + var err error + if oldName, err = clearName(oldName); err != nil { + return err + } + if newName, err = clearName(newName); err != nil { + return err + } + + of, err := fs.stat(ctx, oldName) + if err != nil { + return os.ErrExist + } + if of.IsDir() { + if strings.HasSuffix(oldName, "/") { + oldName = strings.TrimRight(oldName, "/") + } + if strings.HasSuffix(newName, "/") { + newName = strings.TrimRight(newName, "/") + } + } + + _, err = fs.stat(ctx, newName) + if err == nil { + return os.ErrExist + } + + oldDir, oldBaseName := util.FullPath(oldName).DirAndName() + newDir, newBaseName := util.FullPath(newName).DirAndName() + + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AtomicRenameEntryRequest{ + OldDirectory: oldDir, + OldName: oldBaseName, + NewDirectory: newDir, + NewName: newBaseName, + } + + _, err := client.AtomicRenameEntry(ctx, request) + if err != nil { + return fmt.Errorf("renaming %s/%s => %s/%s: %v", oldDir, oldBaseName, newDir, newBaseName, err) + } + + return nil + + }) +} + +func (fs *WebDavFileSystem) stat(ctx context.Context, fullFilePath string) (os.FileInfo, error) { + var err error + if fullFilePath, err = clearName(fullFilePath); err != nil { + return nil, err + } + + fullpath := util.FullPath(fullFilePath) + + var fi FileInfo + entry, err := filer_pb.GetEntry(fs, fullpath) + if entry == nil { + return nil, os.ErrNotExist + } + if err != nil { + return nil, err + } + fi.size = int64(filer2.TotalSize(entry.GetChunks())) + fi.name = string(fullpath) + fi.mode = os.FileMode(entry.Attributes.FileMode) + fi.modifiledTime = time.Unix(entry.Attributes.Mtime, 0) + fi.isDirectory = entry.IsDirectory + + if fi.name == "/" { + fi.modifiledTime = time.Now() + fi.isDirectory = true + } + return &fi, nil +} + +func (fs *WebDavFileSystem) Stat(ctx context.Context, name string) (os.FileInfo, error) { + + glog.V(2).Infof("WebDavFileSystem.Stat %v", name) + + return fs.stat(ctx, name) +} + +func (f *WebDavFile) Write(buf []byte) (int, error) { + + glog.V(2).Infof("WebDavFileSystem.Write %v", f.name) + + dir, _ := util.FullPath(f.name).DirAndName() + + var err error + ctx := context.Background() + if f.entry == nil { + f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name)) + } + + if f.entry == nil { + return 0, err + } + if err != nil { + return 0, err + } + + var fileId, host string + var auth security.EncodedJwt + var collection, replication string + + if err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: "", + Collection: f.fs.option.Collection, + ParentPath: dir, + } + + resp, err := client.AssignVolume(ctx, request) + if err != nil { + glog.V(0).Infof("assign volume failure %v: %v", request, err) + return err + } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } + + fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) + collection, replication = resp.Collection, resp.Replication + + return nil + }); err != nil { + return 0, fmt.Errorf("filerGrpcAddress assign volume: %v", err) + } + + fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) + uploadResult, err := operation.UploadData(fileUrl, f.name, f.fs.option.Cipher, buf, false, "", nil, auth) + if err != nil { + glog.V(0).Infof("upload data %v to %s: %v", f.name, fileUrl, err) + return 0, fmt.Errorf("upload data: %v", err) + } + if uploadResult.Error != "" { + glog.V(0).Infof("upload failure %v to %s: %v", f.name, fileUrl, err) + return 0, fmt.Errorf("upload result: %v", uploadResult.Error) + } + + f.entry.Chunks = append(f.entry.Chunks, uploadResult.ToPbFileChunk(fileId, f.off)) + + err = f.fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + f.entry.Attributes.Mtime = time.Now().Unix() + f.entry.Attributes.Collection = collection + f.entry.Attributes.Replication = replication + + request := &filer_pb.UpdateEntryRequest{ + Directory: dir, + Entry: f.entry, + } + + if _, err := client.UpdateEntry(ctx, request); err != nil { + return fmt.Errorf("update %s: %v", f.name, err) + } + + return nil + }) + + if err == nil { + glog.V(3).Infof("WebDavFileSystem.Write %v: written [%d,%d)", f.name, f.off, f.off+int64(len(buf))) + f.off += int64(len(buf)) + } + + return len(buf), err +} + +func (f *WebDavFile) Close() error { + + glog.V(2).Infof("WebDavFileSystem.Close %v", f.name) + + if f.entry != nil { + f.entry = nil + f.entryViewCache = nil + } + + return nil +} + +func (f *WebDavFile) Read(p []byte) (readSize int, err error) { + + glog.V(2).Infof("WebDavFileSystem.Read %v", f.name) + + if f.entry == nil { + f.entry, err = filer_pb.GetEntry(f.fs, util.FullPath(f.name)) + } + if f.entry == nil { + return 0, err + } + if err != nil { + return 0, err + } + if len(f.entry.Chunks) == 0 { + return 0, io.EOF + } + if f.entryViewCache == nil { + f.entryViewCache = filer2.NonOverlappingVisibleIntervals(f.entry.Chunks) + f.reader = nil + } + if f.reader == nil { + chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32) + f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache) + } + + readSize, err = f.reader.ReadAt(p, f.off) + + glog.V(3).Infof("WebDavFileSystem.Read %v: [%d,%d)", f.name, f.off, f.off+int64(readSize)) + f.off += int64(readSize) + + if err == io.EOF { + err = nil + } + + if err != nil { + glog.Errorf("file read %s: %v", f.name, err) + } + + return + +} + +func (f *WebDavFile) Readdir(count int) (ret []os.FileInfo, err error) { + + glog.V(2).Infof("WebDavFileSystem.Readdir %v count %d", f.name, count) + + dir, _ := util.FullPath(f.name).DirAndName() + + err = filer_pb.ReadDirAllEntries(f.fs, util.FullPath(dir), "", func(entry *filer_pb.Entry, isLast bool) error { + fi := FileInfo{ + size: int64(filer2.TotalSize(entry.GetChunks())), + name: entry.Name, + mode: os.FileMode(entry.Attributes.FileMode), + modifiledTime: time.Unix(entry.Attributes.Mtime, 0), + isDirectory: entry.IsDirectory, + } + + if !strings.HasSuffix(fi.name, "/") && fi.IsDir() { + fi.name += "/" + } + glog.V(4).Infof("entry: %v", fi.name) + ret = append(ret, &fi) + return nil + }) + + old := f.off + if old >= int64(len(ret)) { + if count > 0 { + return nil, io.EOF + } + return nil, nil + } + if count > 0 { + f.off += int64(count) + if f.off > int64(len(ret)) { + f.off = int64(len(ret)) + } + } else { + f.off = int64(len(ret)) + old = 0 + } + + return ret[old:f.off], nil +} + +func (f *WebDavFile) Seek(offset int64, whence int) (int64, error) { + + glog.V(2).Infof("WebDavFile.Seek %v %v %v", f.name, offset, whence) + + ctx := context.Background() + + var err error + switch whence { + case 0: + f.off = 0 + case 2: + if fi, err := f.fs.stat(ctx, f.name); err != nil { + return 0, err + } else { + f.off = fi.Size() + } + } + f.off += offset + return f.off, err +} + +func (f *WebDavFile) Stat() (os.FileInfo, error) { + + glog.V(2).Infof("WebDavFile.Stat %v", f.name) + + ctx := context.Background() + + return f.fs.stat(ctx, f.name) +} diff --git a/weed/shell/command_bucket_create.go b/weed/shell/command_bucket_create.go new file mode 100644 index 000000000..52d96e4c3 --- /dev/null +++ b/weed/shell/command_bucket_create.go @@ -0,0 +1,83 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandBucketCreate{}) +} + +type commandBucketCreate struct { +} + +func (c *commandBucketCreate) Name() string { + return "bucket.create" +} + +func (c *commandBucketCreate) Help() string { + return `create a bucket with a given name + + Example: + bucket.create -name -replication 001 +` +} + +func (c *commandBucketCreate) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + bucketName := bucketCommand.String("name", "", "bucket name") + replication := bucketCommand.String("replication", "", "replication setting for the bucket") + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + if *bucketName == "" { + return fmt.Errorf("empty bucket name") + } + + err = commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer configuration: %v", err) + } + filerBucketsPath := resp.DirBuckets + + println("create bucket under", filerBucketsPath) + + entry := &filer_pb.Entry{ + Name: *bucketName, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0777 | os.ModeDir), + Collection: *bucketName, + Replication: *replication, + }, + } + + if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ + Directory: filerBucketsPath, + Entry: entry, + }); err != nil { + return err + } + + println("created bucket", *bucketName) + + return nil + + }) + + return err + +} diff --git a/weed/shell/command_bucket_delete.go b/weed/shell/command_bucket_delete.go new file mode 100644 index 000000000..8f5f63b46 --- /dev/null +++ b/weed/shell/command_bucket_delete.go @@ -0,0 +1,54 @@ +package shell + +import ( + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandBucketDelete{}) +} + +type commandBucketDelete struct { +} + +func (c *commandBucketDelete) Name() string { + return "bucket.delete" +} + +func (c *commandBucketDelete) Help() string { + return `delete a bucket by a given name + + bucket.delete -name +` +} + +func (c *commandBucketDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + bucketName := bucketCommand.String("name", "", "bucket name") + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + if *bucketName == "" { + return fmt.Errorf("empty bucket name") + } + + _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + if parseErr != nil { + return parseErr + } + + var filerBucketsPath string + filerBucketsPath, err = readFilerBucketsPath(commandEnv) + if err != nil { + return fmt.Errorf("read buckets: %v", err) + } + + return filer_pb.Remove(commandEnv, filerBucketsPath, *bucketName, false, true, true, false) + +} diff --git a/weed/shell/command_bucket_list.go b/weed/shell/command_bucket_list.go new file mode 100644 index 000000000..2e446b6b2 --- /dev/null +++ b/weed/shell/command_bucket_list.go @@ -0,0 +1,78 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "math" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func init() { + Commands = append(Commands, &commandBucketList{}) +} + +type commandBucketList struct { +} + +func (c *commandBucketList) Name() string { + return "bucket.list" +} + +func (c *commandBucketList) Help() string { + return `list all buckets + +` +} + +func (c *commandBucketList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + bucketCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if err = bucketCommand.Parse(args); err != nil { + return nil + } + + _, parseErr := commandEnv.parseUrl(findInputDirectory(bucketCommand.Args())) + if parseErr != nil { + return parseErr + } + + var filerBucketsPath string + filerBucketsPath, err = readFilerBucketsPath(commandEnv) + if err != nil { + return fmt.Errorf("read buckets: %v", err) + } + + err = filer_pb.List(commandEnv, filerBucketsPath, "", func(entry *filer_pb.Entry, isLast bool) error { + if entry.Attributes.Replication == "" || entry.Attributes.Replication == "000" { + fmt.Fprintf(writer, " %s\n", entry.Name) + } else { + fmt.Fprintf(writer, " %s\t\t\treplication: %s\n", entry.Name, entry.Attributes.Replication) + } + return nil + }, "", false, math.MaxUint32) + if err != nil { + return fmt.Errorf("list buckets under %v: %v", filerBucketsPath, err) + } + + return err + +} + +func readFilerBucketsPath(filerClient filer_pb.FilerClient) (filerBucketsPath string, err error) { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer configuration: %v", err) + } + filerBucketsPath = resp.DirBuckets + + return nil + + }) + + return filerBucketsPath, err +} diff --git a/weed/shell/command_collection_delete.go b/weed/shell/command_collection_delete.go new file mode 100644 index 000000000..4b3d7f0be --- /dev/null +++ b/weed/shell/command_collection_delete.go @@ -0,0 +1,50 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "io" +) + +func init() { + Commands = append(Commands, &commandCollectionDelete{}) +} + +type commandCollectionDelete struct { +} + +func (c *commandCollectionDelete) Name() string { + return "collection.delete" +} + +func (c *commandCollectionDelete) Help() string { + return `delete specified collection + + collection.delete + +` +} + +func (c *commandCollectionDelete) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if len(args) == 0 { + return nil + } + + collectionName := args[0] + + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err = client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ + Name: collectionName, + }) + return err + }) + if err != nil { + return + } + + fmt.Fprintf(writer, "collection %s is deleted.\n", collectionName) + + return nil +} diff --git a/weed/shell/command_collection_list.go b/weed/shell/command_collection_list.go new file mode 100644 index 000000000..2a114e61b --- /dev/null +++ b/weed/shell/command_collection_list.go @@ -0,0 +1,58 @@ +package shell + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "io" +) + +func init() { + Commands = append(Commands, &commandCollectionList{}) +} + +type commandCollectionList struct { +} + +func (c *commandCollectionList) Name() string { + return "collection.list" +} + +func (c *commandCollectionList) Help() string { + return `list all collections` +} + +func (c *commandCollectionList) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + collections, err := ListCollectionNames(commandEnv, true, true) + + if err != nil { + return err + } + + for _, c := range collections { + fmt.Fprintf(writer, "collection:\"%s\"\n", c) + } + + fmt.Fprintf(writer, "Total %d collections.\n", len(collections)) + + return nil +} + +func ListCollectionNames(commandEnv *CommandEnv, includeNormalVolumes, includeEcVolumes bool) (collections []string, err error) { + var resp *master_pb.CollectionListResponse + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.CollectionList(context.Background(), &master_pb.CollectionListRequest{ + IncludeNormalVolumes: includeNormalVolumes, + IncludeEcVolumes: includeEcVolumes, + }) + return err + }) + if err != nil { + return + } + for _, c := range resp.Collections { + collections = append(collections, c.Name) + } + return +} diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go new file mode 100644 index 000000000..1ddb6a490 --- /dev/null +++ b/weed/shell/command_ec_balance.go @@ -0,0 +1,519 @@ +package shell + +import ( + "flag" + "fmt" + "io" + "sort" + + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandEcBalance{}) +} + +type commandEcBalance struct { +} + +func (c *commandEcBalance) Name() string { + return "ec.balance" +} + +func (c *commandEcBalance) Help() string { + return `balance all ec shards among all racks and volume servers + + ec.balance [-c EACH_COLLECTION|] [-force] [-dataCenter ] + + Algorithm: + + For each type of volume server (different max volume count limit){ + for each collection: + balanceEcVolumes(collectionName) + for each rack: + balanceEcRack(rack) + } + + func balanceEcVolumes(collectionName){ + for each volume: + doDeduplicateEcShards(volumeId) + + tracks rack~shardCount mapping + for each volume: + doBalanceEcShardsAcrossRacks(volumeId) + + for each volume: + doBalanceEcShardsWithinRacks(volumeId) + } + + // spread ec shards into more racks + func doBalanceEcShardsAcrossRacks(volumeId){ + tracks rack~volumeIdShardCount mapping + averageShardsPerEcRack = totalShardNumber / numRacks // totalShardNumber is 14 for now, later could varies for each dc + ecShardsToMove = select overflown ec shards from racks with ec shard counts > averageShardsPerEcRack + for each ecShardsToMove { + destRack = pickOneRack(rack~shardCount, rack~volumeIdShardCount, averageShardsPerEcRack) + destVolumeServers = volume servers on the destRack + pickOneEcNodeAndMoveOneShard(destVolumeServers) + } + } + + func doBalanceEcShardsWithinRacks(volumeId){ + racks = collect all racks that the volume id is on + for rack, shards := range racks + doBalanceEcShardsWithinOneRack(volumeId, shards, rack) + } + + // move ec shards + func doBalanceEcShardsWithinOneRack(volumeId, shards, rackId){ + tracks volumeServer~volumeIdShardCount mapping + averageShardCount = len(shards) / numVolumeServers + volumeServersOverAverage = volume servers with volumeId's ec shard counts > averageShardsPerEcRack + ecShardsToMove = select overflown ec shards from volumeServersOverAverage + for each ecShardsToMove { + destVolumeServer = pickOneVolumeServer(volumeServer~shardCount, volumeServer~volumeIdShardCount, averageShardCount) + pickOneEcNodeAndMoveOneShard(destVolumeServers) + } + } + + // move ec shards while keeping shard distribution for the same volume unchanged or more even + func balanceEcRack(rack){ + averageShardCount = total shards / numVolumeServers + for hasMovedOneEcShard { + sort all volume servers ordered by the number of local ec shards + pick the volume server A with the lowest number of ec shards x + pick the volume server B with the highest number of ec shards y + if y > averageShardCount and x +1 <= averageShardCount { + if B has a ec shard with volume id v that A does not have { + move one ec shard v from B to A + hasMovedOneEcShard = true + } + } + } + } + +` +} + +func (c *commandEcBalance) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + collection := balanceCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection") + dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter") + applyBalancing := balanceCommand.Bool("force", false, "apply the balancing plan") + if err = balanceCommand.Parse(args); err != nil { + return nil + } + + // collect all ec nodes + allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, *dc) + if err != nil { + return err + } + if totalFreeEcSlots < 1 { + return fmt.Errorf("no free ec shard slots. only %d left", totalFreeEcSlots) + } + + racks := collectRacks(allEcNodes) + + if *collection == "EACH_COLLECTION" { + collections, err := ListCollectionNames(commandEnv, false, true) + if err != nil { + return err + } + fmt.Printf("balanceEcVolumes collections %+v\n", len(collections)) + for _, c := range collections { + fmt.Printf("balanceEcVolumes collection %+v\n", c) + if err = balanceEcVolumes(commandEnv, c, allEcNodes, racks, *applyBalancing); err != nil { + return err + } + } + } else { + if err = balanceEcVolumes(commandEnv, *collection, allEcNodes, racks, *applyBalancing); err != nil { + return err + } + } + + if err := balanceEcRacks(commandEnv, racks, *applyBalancing); err != nil { + return fmt.Errorf("balance ec racks: %v", err) + } + + return nil +} + +func collectRacks(allEcNodes []*EcNode) map[RackId]*EcRack { + // collect racks info + racks := make(map[RackId]*EcRack) + for _, ecNode := range allEcNodes { + if racks[ecNode.rack] == nil { + racks[ecNode.rack] = &EcRack{ + ecNodes: make(map[EcNodeId]*EcNode), + } + } + racks[ecNode.rack].ecNodes[EcNodeId(ecNode.info.Id)] = ecNode + racks[ecNode.rack].freeEcSlot += ecNode.freeEcSlot + } + return racks +} + +func balanceEcVolumes(commandEnv *CommandEnv, collection string, allEcNodes []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { + + fmt.Printf("balanceEcVolumes %s\n", collection) + + if err := deleteDuplicatedEcShards(commandEnv, allEcNodes, collection, applyBalancing); err != nil { + return fmt.Errorf("delete duplicated collection %s ec shards: %v", collection, err) + } + + if err := balanceEcShardsAcrossRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { + return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err) + } + + if err := balanceEcShardsWithinRacks(commandEnv, allEcNodes, racks, collection, applyBalancing); err != nil { + return fmt.Errorf("balance across racks collection %s ec shards: %v", collection, err) + } + + return nil +} + +func deleteDuplicatedEcShards(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, applyBalancing bool) error { + // vid => []ecNode + vidLocations := collectVolumeIdToEcNodes(allEcNodes) + // deduplicate ec shards + for vid, locations := range vidLocations { + if err := doDeduplicateEcShards(commandEnv, collection, vid, locations, applyBalancing); err != nil { + return err + } + } + return nil +} + +func doDeduplicateEcShards(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, applyBalancing bool) error { + + // check whether this volume has ecNodes that are over average + shardToLocations := make([][]*EcNode, erasure_coding.TotalShardsCount) + for _, ecNode := range locations { + shardBits := findEcVolumeShards(ecNode, vid) + for _, shardId := range shardBits.ShardIds() { + shardToLocations[shardId] = append(shardToLocations[shardId], ecNode) + } + } + for shardId, ecNodes := range shardToLocations { + if len(ecNodes) <= 1 { + continue + } + sortEcNodesByFreeslotsAscending(ecNodes) + fmt.Printf("ec shard %d.%d has %d copies, keeping %v\n", vid, shardId, len(ecNodes), ecNodes[0].info.Id) + if !applyBalancing { + continue + } + + duplicatedShardIds := []uint32{uint32(shardId)} + for _, ecNode := range ecNodes[1:] { + if err := unmountEcShards(commandEnv.option.GrpcDialOption, vid, ecNode.info.Id, duplicatedShardIds); err != nil { + return err + } + if err := sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil { + return err + } + ecNode.deleteEcVolumeShards(vid, duplicatedShardIds) + } + } + return nil +} + +func balanceEcShardsAcrossRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { + // collect vid => []ecNode, since previous steps can change the locations + vidLocations := collectVolumeIdToEcNodes(allEcNodes) + // spread the ec shards evenly + for vid, locations := range vidLocations { + if err := doBalanceEcShardsAcrossRacks(commandEnv, collection, vid, locations, racks, applyBalancing); err != nil { + return err + } + } + return nil +} + +func doBalanceEcShardsAcrossRacks(commandEnv *CommandEnv, collection string, vid needle.VolumeId, locations []*EcNode, racks map[RackId]*EcRack, applyBalancing bool) error { + + // calculate average number of shards an ec rack should have for one volume + averageShardsPerEcRack := ceilDivide(erasure_coding.TotalShardsCount, len(racks)) + + // see the volume's shards are in how many racks, and how many in each rack + rackToShardCount := groupByCount(locations, func(ecNode *EcNode) (id string, count int) { + shardBits := findEcVolumeShards(ecNode, vid) + return string(ecNode.rack), shardBits.ShardIdCount() + }) + rackEcNodesWithVid := groupBy(locations, func(ecNode *EcNode) string { + return string(ecNode.rack) + }) + + // ecShardsToMove = select overflown ec shards from racks with ec shard counts > averageShardsPerEcRack + ecShardsToMove := make(map[erasure_coding.ShardId]*EcNode) + for rackId, count := range rackToShardCount { + if count > averageShardsPerEcRack { + possibleEcNodes := rackEcNodesWithVid[rackId] + for shardId, ecNode := range pickNEcShardsToMoveFrom(possibleEcNodes, vid, count-averageShardsPerEcRack) { + ecShardsToMove[shardId] = ecNode + } + } + } + + for shardId, ecNode := range ecShardsToMove { + rackId := pickOneRack(racks, rackToShardCount, averageShardsPerEcRack) + if rackId == "" { + fmt.Printf("ec shard %d.%d at %s can not find a destination rack\n", vid, shardId, ecNode.info.Id) + continue + } + var possibleDestinationEcNodes []*EcNode + for _, n := range racks[rackId].ecNodes { + possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) + } + err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcRack, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) + if err != nil { + return err + } + rackToShardCount[string(rackId)] += 1 + rackToShardCount[string(ecNode.rack)] -= 1 + racks[rackId].freeEcSlot -= 1 + racks[ecNode.rack].freeEcSlot += 1 + } + + return nil +} + +func pickOneRack(rackToEcNodes map[RackId]*EcRack, rackToShardCount map[string]int, averageShardsPerEcRack int) RackId { + + // TODO later may need to add some randomness + + for rackId, rack := range rackToEcNodes { + if rackToShardCount[string(rackId)] >= averageShardsPerEcRack { + continue + } + + if rack.freeEcSlot <= 0 { + continue + } + + return rackId + } + + return "" +} + +func balanceEcShardsWithinRacks(commandEnv *CommandEnv, allEcNodes []*EcNode, racks map[RackId]*EcRack, collection string, applyBalancing bool) error { + // collect vid => []ecNode, since previous steps can change the locations + vidLocations := collectVolumeIdToEcNodes(allEcNodes) + + // spread the ec shards evenly + for vid, locations := range vidLocations { + + // see the volume's shards are in how many racks, and how many in each rack + rackToShardCount := groupByCount(locations, func(ecNode *EcNode) (id string, count int) { + shardBits := findEcVolumeShards(ecNode, vid) + return string(ecNode.rack), shardBits.ShardIdCount() + }) + rackEcNodesWithVid := groupBy(locations, func(ecNode *EcNode) string { + return string(ecNode.rack) + }) + + for rackId, _ := range rackToShardCount { + + var possibleDestinationEcNodes []*EcNode + for _, n := range racks[RackId(rackId)].ecNodes { + possibleDestinationEcNodes = append(possibleDestinationEcNodes, n) + } + sourceEcNodes := rackEcNodesWithVid[rackId] + averageShardsPerEcNode := ceilDivide(rackToShardCount[rackId], len(possibleDestinationEcNodes)) + if err := doBalanceEcShardsWithinOneRack(commandEnv, averageShardsPerEcNode, collection, vid, sourceEcNodes, possibleDestinationEcNodes, applyBalancing); err != nil { + return err + } + } + } + return nil +} + +func doBalanceEcShardsWithinOneRack(commandEnv *CommandEnv, averageShardsPerEcNode int, collection string, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { + + for _, ecNode := range existingLocations { + + shardBits := findEcVolumeShards(ecNode, vid) + overLimitCount := shardBits.ShardIdCount() - averageShardsPerEcNode + + for _, shardId := range shardBits.ShardIds() { + + if overLimitCount <= 0 { + break + } + + fmt.Printf("%s has %d overlimit, moving ec shard %d.%d\n", ecNode.info.Id, overLimitCount, vid, shardId) + + err := pickOneEcNodeAndMoveOneShard(commandEnv, averageShardsPerEcNode, ecNode, collection, vid, shardId, possibleDestinationEcNodes, applyBalancing) + if err != nil { + return err + } + + overLimitCount-- + } + } + + return nil +} + +func balanceEcRacks(commandEnv *CommandEnv, racks map[RackId]*EcRack, applyBalancing bool) error { + + // balance one rack for all ec shards + for _, ecRack := range racks { + if err := doBalanceEcRack(commandEnv, ecRack, applyBalancing); err != nil { + return err + } + } + return nil +} + +func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool) error { + + if len(ecRack.ecNodes) <= 1 { + return nil + } + + var rackEcNodes []*EcNode + for _, node := range ecRack.ecNodes { + rackEcNodes = append(rackEcNodes, node) + } + + ecNodeIdToShardCount := groupByCount(rackEcNodes, func(node *EcNode) (id string, count int) { + for _, ecShardInfo := range node.info.EcShardInfos { + count += erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIdCount() + } + return node.info.Id, count + }) + + var totalShardCount int + for _, count := range ecNodeIdToShardCount { + totalShardCount += count + } + + averageShardCount := ceilDivide(totalShardCount, len(rackEcNodes)) + + hasMove := true + for hasMove { + hasMove = false + sort.Slice(rackEcNodes, func(i, j int) bool { + return rackEcNodes[i].freeEcSlot > rackEcNodes[j].freeEcSlot + }) + emptyNode, fullNode := rackEcNodes[0], rackEcNodes[len(rackEcNodes)-1] + emptyNodeShardCount, fullNodeShardCount := ecNodeIdToShardCount[emptyNode.info.Id], ecNodeIdToShardCount[fullNode.info.Id] + if fullNodeShardCount > averageShardCount && emptyNodeShardCount+1 <= averageShardCount { + + emptyNodeIds := make(map[uint32]bool) + for _, shards := range emptyNode.info.EcShardInfos { + emptyNodeIds[shards.Id] = true + } + for _, shards := range fullNode.info.EcShardInfos { + if _, found := emptyNodeIds[shards.Id]; !found { + for _, shardId := range erasure_coding.ShardBits(shards.EcIndexBits).ShardIds() { + + fmt.Printf("%s moves ec shards %d.%d to %s\n", fullNode.info.Id, shards.Id, shardId, emptyNode.info.Id) + + err := moveMountedShardToEcNode(commandEnv, fullNode, shards.Collection, needle.VolumeId(shards.Id), shardId, emptyNode, applyBalancing) + if err != nil { + return err + } + + ecNodeIdToShardCount[emptyNode.info.Id]++ + ecNodeIdToShardCount[fullNode.info.Id]-- + hasMove = true + break + } + break + } + } + } + } + + return nil +} + +func pickOneEcNodeAndMoveOneShard(commandEnv *CommandEnv, averageShardsPerEcNode int, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { + + sortEcNodesByFreeslotsDecending(possibleDestinationEcNodes) + + for _, destEcNode := range possibleDestinationEcNodes { + if destEcNode.info.Id == existingLocation.info.Id { + continue + } + + if destEcNode.freeEcSlot <= 0 { + continue + } + if findEcVolumeShards(destEcNode, vid).ShardIdCount() >= averageShardsPerEcNode { + continue + } + + fmt.Printf("%s moves ec shard %d.%d to %s\n", existingLocation.info.Id, vid, shardId, destEcNode.info.Id) + + err := moveMountedShardToEcNode(commandEnv, existingLocation, collection, vid, shardId, destEcNode, applyBalancing) + if err != nil { + return err + } + + return nil + } + + return nil +} + +func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[erasure_coding.ShardId]*EcNode { + picked := make(map[erasure_coding.ShardId]*EcNode) + var candidateEcNodes []*CandidateEcNode + for _, ecNode := range ecNodes { + shardBits := findEcVolumeShards(ecNode, vid) + if shardBits.ShardIdCount() > 0 { + candidateEcNodes = append(candidateEcNodes, &CandidateEcNode{ + ecNode: ecNode, + shardCount: shardBits.ShardIdCount(), + }) + } + } + sort.Slice(candidateEcNodes, func(i, j int) bool { + return candidateEcNodes[i].shardCount > candidateEcNodes[j].shardCount + }) + for i := 0; i < n; i++ { + selectedEcNodeIndex := -1 + for i, candidateEcNode := range candidateEcNodes { + shardBits := findEcVolumeShards(candidateEcNode.ecNode, vid) + if shardBits > 0 { + selectedEcNodeIndex = i + for _, shardId := range shardBits.ShardIds() { + candidateEcNode.shardCount-- + picked[shardId] = candidateEcNode.ecNode + candidateEcNode.ecNode.deleteEcVolumeShards(vid, []uint32{uint32(shardId)}) + break + } + break + } + } + if selectedEcNodeIndex >= 0 { + ensureSortedEcNodes(candidateEcNodes, selectedEcNodeIndex, func(i, j int) bool { + return candidateEcNodes[i].shardCount > candidateEcNodes[j].shardCount + }) + } + + } + return picked +} + +func collectVolumeIdToEcNodes(allEcNodes []*EcNode) map[needle.VolumeId][]*EcNode { + vidLocations := make(map[needle.VolumeId][]*EcNode) + for _, ecNode := range allEcNodes { + for _, shardInfo := range ecNode.info.EcShardInfos { + vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode) + } + } + return vidLocations +} diff --git a/weed/shell/command_ec_common.go b/weed/shell/command_ec_common.go new file mode 100644 index 000000000..0db119d3c --- /dev/null +++ b/weed/shell/command_ec_common.go @@ -0,0 +1,337 @@ +package shell + +import ( + "context" + "fmt" + "math" + "sort" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "google.golang.org/grpc" +) + +func moveMountedShardToEcNode(commandEnv *CommandEnv, existingLocation *EcNode, collection string, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) (err error) { + + copiedShardIds := []uint32{uint32(shardId)} + + if applyBalancing { + + // ask destination node to copy shard and the ecx file from source node, and mount it + copiedShardIds, err = oneServerCopyAndMountEcShardsFromSource(commandEnv.option.GrpcDialOption, destinationEcNode, []uint32{uint32(shardId)}, vid, collection, existingLocation.info.Id) + if err != nil { + return err + } + + // unmount the to be deleted shards + err = unmountEcShards(commandEnv.option.GrpcDialOption, vid, existingLocation.info.Id, copiedShardIds) + if err != nil { + return err + } + + // ask source node to delete the shard, and maybe the ecx file + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds) + if err != nil { + return err + } + + fmt.Printf("moved ec shard %d.%d %s => %s\n", vid, shardId, existingLocation.info.Id, destinationEcNode.info.Id) + + } + + destinationEcNode.addEcVolumeShards(vid, collection, copiedShardIds) + existingLocation.deleteEcVolumeShards(vid, copiedShardIds) + + return nil + +} + +func oneServerCopyAndMountEcShardsFromSource(grpcDialOption grpc.DialOption, + targetServer *EcNode, shardIdsToCopy []uint32, + volumeId needle.VolumeId, collection string, existingLocation string) (copiedShardIds []uint32, err error) { + + fmt.Printf("allocate %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) + + err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + + if targetServer.info.Id != existingLocation { + + fmt.Printf("copy %d.%v %s => %s\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id) + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + ShardIds: shardIdsToCopy, + CopyEcxFile: true, + CopyEcjFile: true, + CopyVifFile: true, + SourceDataNode: existingLocation, + }) + if copyErr != nil { + return fmt.Errorf("copy %d.%v %s => %s : %v\n", volumeId, shardIdsToCopy, existingLocation, targetServer.info.Id, copyErr) + } + } + + fmt.Printf("mount %d.%v on %s\n", volumeId, shardIdsToCopy, targetServer.info.Id) + _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + ShardIds: shardIdsToCopy, + }) + if mountErr != nil { + return fmt.Errorf("mount %d.%v on %s : %v\n", volumeId, shardIdsToCopy, targetServer.info.Id, mountErr) + } + + if targetServer.info.Id != existingLocation { + copiedShardIds = shardIdsToCopy + glog.V(0).Infof("%s ec volume %d deletes shards %+v", existingLocation, volumeId, copiedShardIds) + } + + return nil + }) + + if err != nil { + return + } + + return +} + +func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId, dn *master_pb.DataNodeInfo)) { + for _, dc := range topo.DataCenterInfos { + for _, rack := range dc.RackInfos { + for _, dn := range rack.DataNodeInfos { + fn(dc.Id, RackId(rack.Id), dn) + } + } + } +} + +func sortEcNodesByFreeslotsDecending(ecNodes []*EcNode) { + sort.Slice(ecNodes, func(i, j int) bool { + return ecNodes[i].freeEcSlot > ecNodes[j].freeEcSlot + }) +} + +func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) { + sort.Slice(ecNodes, func(i, j int) bool { + return ecNodes[i].freeEcSlot < ecNodes[j].freeEcSlot + }) +} + +type CandidateEcNode struct { + ecNode *EcNode + shardCount int +} + +// if the index node changed the freeEcSlot, need to keep every EcNode still sorted +func ensureSortedEcNodes(data []*CandidateEcNode, index int, lessThan func(i, j int) bool) { + for i := index - 1; i >= 0; i-- { + if lessThan(i+1, i) { + swap(data, i, i+1) + } else { + break + } + } + for i := index + 1; i < len(data); i++ { + if lessThan(i, i-1) { + swap(data, i, i-1) + } else { + break + } + } +} + +func swap(data []*CandidateEcNode, i, j int) { + t := data[i] + data[i] = data[j] + data[j] = t +} + +func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (count int) { + for _, ecShardInfo := range ecShardInfos { + shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits) + count += shardBits.ShardIdCount() + } + return +} + +func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) { + return int(dn.MaxVolumeCount-dn.ActiveVolumeCount)*erasure_coding.DataShardsCount - countShards(dn.EcShardInfos) +} + +type RackId string +type EcNodeId string + +type EcNode struct { + info *master_pb.DataNodeInfo + dc string + rack RackId + freeEcSlot int +} + +type EcRack struct { + ecNodes map[EcNodeId]*EcNode + freeEcSlot int +} + +func collectEcNodes(commandEnv *CommandEnv, selectedDataCenter string) (ecNodes []*EcNode, totalFreeEcSlots int, err error) { + + // list all possible locations + var resp *master_pb.VolumeListResponse + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return nil, 0, err + } + + // find out all volume servers with one slot left. + eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + if selectedDataCenter != "" && selectedDataCenter != dc { + return + } + + freeEcSlots := countFreeShardSlots(dn) + ecNodes = append(ecNodes, &EcNode{ + info: dn, + dc: dc, + rack: rack, + freeEcSlot: int(freeEcSlots), + }) + totalFreeEcSlots += freeEcSlots + }) + + sortEcNodesByFreeslotsDecending(ecNodes) + + return +} + +func sourceServerDeleteEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeDeletedShardIds []uint32) error { + + fmt.Printf("delete %d.%v from %s\n", volumeId, toBeDeletedShardIds, sourceLocation) + + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, deleteErr := volumeServerClient.VolumeEcShardsDelete(context.Background(), &volume_server_pb.VolumeEcShardsDeleteRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + ShardIds: toBeDeletedShardIds, + }) + return deleteErr + }) + +} + +func unmountEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, sourceLocation string, toBeUnmountedhardIds []uint32) error { + + fmt.Printf("unmount %d.%v from %s\n", volumeId, toBeUnmountedhardIds, sourceLocation) + + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, deleteErr := volumeServerClient.VolumeEcShardsUnmount(context.Background(), &volume_server_pb.VolumeEcShardsUnmountRequest{ + VolumeId: uint32(volumeId), + ShardIds: toBeUnmountedhardIds, + }) + return deleteErr + }) +} + +func mountEcShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string, toBeMountedhardIds []uint32) error { + + fmt.Printf("mount %d.%v on %s\n", volumeId, toBeMountedhardIds, sourceLocation) + + return operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, mountErr := volumeServerClient.VolumeEcShardsMount(context.Background(), &volume_server_pb.VolumeEcShardsMountRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + ShardIds: toBeMountedhardIds, + }) + return mountErr + }) +} + +func ceilDivide(total, n int) int { + return int(math.Ceil(float64(total) / float64(n))) +} + +func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits { + + for _, shardInfo := range ecNode.info.EcShardInfos { + if needle.VolumeId(shardInfo.Id) == vid { + return erasure_coding.ShardBits(shardInfo.EcIndexBits) + } + } + + return 0 +} + +func (ecNode *EcNode) addEcVolumeShards(vid needle.VolumeId, collection string, shardIds []uint32) *EcNode { + + foundVolume := false + for _, shardInfo := range ecNode.info.EcShardInfos { + if needle.VolumeId(shardInfo.Id) == vid { + oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits) + newShardBits := oldShardBits + for _, shardId := range shardIds { + newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId)) + } + shardInfo.EcIndexBits = uint32(newShardBits) + ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount() + foundVolume = true + break + } + } + + if !foundVolume { + var newShardBits erasure_coding.ShardBits + for _, shardId := range shardIds { + newShardBits = newShardBits.AddShardId(erasure_coding.ShardId(shardId)) + } + ecNode.info.EcShardInfos = append(ecNode.info.EcShardInfos, &master_pb.VolumeEcShardInformationMessage{ + Id: uint32(vid), + Collection: collection, + EcIndexBits: uint32(newShardBits), + }) + ecNode.freeEcSlot -= len(shardIds) + } + + return ecNode +} + +func (ecNode *EcNode) deleteEcVolumeShards(vid needle.VolumeId, shardIds []uint32) *EcNode { + + for _, shardInfo := range ecNode.info.EcShardInfos { + if needle.VolumeId(shardInfo.Id) == vid { + oldShardBits := erasure_coding.ShardBits(shardInfo.EcIndexBits) + newShardBits := oldShardBits + for _, shardId := range shardIds { + newShardBits = newShardBits.RemoveShardId(erasure_coding.ShardId(shardId)) + } + shardInfo.EcIndexBits = uint32(newShardBits) + ecNode.freeEcSlot -= newShardBits.ShardIdCount() - oldShardBits.ShardIdCount() + } + } + + return ecNode +} + +func groupByCount(data []*EcNode, identifierFn func(*EcNode) (id string, count int)) map[string]int { + countMap := make(map[string]int) + for _, d := range data { + id, count := identifierFn(d) + countMap[id] += count + } + return countMap +} + +func groupBy(data []*EcNode, identifierFn func(*EcNode) (id string)) map[string][]*EcNode { + groupMap := make(map[string][]*EcNode) + for _, d := range data { + id := identifierFn(d) + groupMap[id] = append(groupMap[id], d) + } + return groupMap +} diff --git a/weed/shell/command_ec_decode.go b/weed/shell/command_ec_decode.go new file mode 100644 index 000000000..5f03df58c --- /dev/null +++ b/weed/shell/command_ec_decode.go @@ -0,0 +1,268 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + Commands = append(Commands, &commandEcDecode{}) +} + +type commandEcDecode struct { +} + +func (c *commandEcDecode) Name() string { + return "ec.decode" +} + +func (c *commandEcDecode) Help() string { + return `decode a erasure coded volume into a normal volume + + ec.decode [-collection=""] [-volumeId=] + +` +} + +func (c *commandEcDecode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := encodeCommand.Int("volumeId", 0, "the volume id") + collection := encodeCommand.String("collection", "", "the collection name") + if err = encodeCommand.Parse(args); err != nil { + return nil + } + + vid := needle.VolumeId(*volumeId) + + // collect topology information + topologyInfo, err := collectTopologyInfo(commandEnv) + if err != nil { + return err + } + + // volumeId is provided + if vid != 0 { + return doEcDecode(commandEnv, topologyInfo, *collection, vid) + } + + // apply to all volumes in the collection + volumeIds := collectEcShardIds(topologyInfo, *collection) + fmt.Printf("ec encode volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doEcDecode(commandEnv, topologyInfo, *collection, vid); err != nil { + return err + } + } + + return nil +} + +func doEcDecode(commandEnv *CommandEnv, topoInfo *master_pb.TopologyInfo, collection string, vid needle.VolumeId) (err error) { + // find volume location + nodeToEcIndexBits := collectEcNodeShardBits(topoInfo, vid) + + fmt.Printf("ec volume %d shard locations: %+v\n", vid, nodeToEcIndexBits) + + // collect ec shards to the server with most space + targetNodeLocation, err := collectEcShards(commandEnv, nodeToEcIndexBits, collection, vid) + if err != nil { + return fmt.Errorf("collectEcShards for volume %d: %v", vid, err) + } + + // generate a normal volume + err = generateNormalVolume(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, targetNodeLocation) + if err != nil { + return fmt.Errorf("generate normal volume %d on %s: %v", vid, targetNodeLocation, err) + } + + // delete the previous ec shards + err = mountVolumeAndDeleteEcShards(commandEnv.option.GrpcDialOption, collection, targetNodeLocation, nodeToEcIndexBits, vid) + if err != nil { + return fmt.Errorf("delete ec shards for volume %d: %v", vid, err) + } + + return nil +} + +func mountVolumeAndDeleteEcShards(grpcDialOption grpc.DialOption, collection, targetNodeLocation string, nodeToEcIndexBits map[string]erasure_coding.ShardBits, vid needle.VolumeId) error { + + // mount volume + if err := operation.WithVolumeServerClient(targetNodeLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, mountErr := volumeServerClient.VolumeMount(context.Background(), &volume_server_pb.VolumeMountRequest{ + VolumeId: uint32(vid), + }) + return mountErr + }); err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards mount volume %d on %s: %v", vid, targetNodeLocation, err) + } + + // unmount ec shards + for location, ecIndexBits := range nodeToEcIndexBits { + fmt.Printf("unmount ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) + err := unmountEcShards(grpcDialOption, vid, location, ecIndexBits.ToUint32Slice()) + if err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards unmount ec volume %d on %s: %v", vid, location, err) + } + } + // delete ec shards + for location, ecIndexBits := range nodeToEcIndexBits { + fmt.Printf("delete ec volume %d on %s has shards: %+v\n", vid, location, ecIndexBits.ShardIds()) + err := sourceServerDeleteEcShards(grpcDialOption, collection, vid, location, ecIndexBits.ToUint32Slice()) + if err != nil { + return fmt.Errorf("mountVolumeAndDeleteEcShards delete ec volume %d on %s: %v", vid, location, err) + } + } + + return nil +} + +func generateNormalVolume(grpcDialOption grpc.DialOption, vid needle.VolumeId, collection string, sourceVolumeServer string) error { + + fmt.Printf("generateNormalVolume from ec volume %d on %s\n", vid, sourceVolumeServer) + + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, genErr := volumeServerClient.VolumeEcShardsToVolume(context.Background(), &volume_server_pb.VolumeEcShardsToVolumeRequest{ + VolumeId: uint32(vid), + Collection: collection, + }) + return genErr + }) + + return err + +} + +func collectEcShards(commandEnv *CommandEnv, nodeToEcIndexBits map[string]erasure_coding.ShardBits, collection string, vid needle.VolumeId) (targetNodeLocation string, err error) { + + maxShardCount := 0 + var exisitngEcIndexBits erasure_coding.ShardBits + for loc, ecIndexBits := range nodeToEcIndexBits { + toBeCopiedShardCount := ecIndexBits.MinusParityShards().ShardIdCount() + if toBeCopiedShardCount > maxShardCount { + maxShardCount = toBeCopiedShardCount + targetNodeLocation = loc + exisitngEcIndexBits = ecIndexBits + } + } + + fmt.Printf("collectEcShards: ec volume %d collect shards to %s from: %+v\n", vid, targetNodeLocation, nodeToEcIndexBits) + + var copiedEcIndexBits erasure_coding.ShardBits + for loc, ecIndexBits := range nodeToEcIndexBits { + if loc == targetNodeLocation { + continue + } + + needToCopyEcIndexBits := ecIndexBits.Minus(exisitngEcIndexBits).MinusParityShards() + if needToCopyEcIndexBits.ShardIdCount() == 0 { + continue + } + + err = operation.WithVolumeServerClient(targetNodeLocation, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + + fmt.Printf("copy %d.%v %s => %s\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation) + + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ + VolumeId: uint32(vid), + Collection: collection, + ShardIds: needToCopyEcIndexBits.ToUint32Slice(), + CopyEcxFile: false, + CopyEcjFile: true, + CopyVifFile: true, + SourceDataNode: loc, + }) + if copyErr != nil { + return fmt.Errorf("copy %d.%v %s => %s : %v\n", vid, needToCopyEcIndexBits.ShardIds(), loc, targetNodeLocation, copyErr) + } + + return nil + }) + + if err != nil { + break + } + + copiedEcIndexBits = copiedEcIndexBits.Plus(needToCopyEcIndexBits) + + } + + nodeToEcIndexBits[targetNodeLocation] = exisitngEcIndexBits.Plus(copiedEcIndexBits) + + return targetNodeLocation, err + +} + +func collectTopologyInfo(commandEnv *CommandEnv) (topoInfo *master_pb.TopologyInfo, err error) { + + var resp *master_pb.VolumeListResponse + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return + } + + return resp.TopologyInfo, nil + +} + +func collectEcShardInfos(topoInfo *master_pb.TopologyInfo, selectedCollection string, vid needle.VolumeId) (ecShardInfos []*master_pb.VolumeEcShardInformationMessage) { + + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Collection == selectedCollection && v.Id == uint32(vid) { + ecShardInfos = append(ecShardInfos, v) + } + } + }) + + return +} + +func collectEcShardIds(topoInfo *master_pb.TopologyInfo, selectedCollection string) (vids []needle.VolumeId) { + + vidMap := make(map[uint32]bool) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Collection == selectedCollection { + vidMap[v.Id] = true + } + } + }) + + for vid := range vidMap { + vids = append(vids, needle.VolumeId(vid)) + } + + return +} + +func collectEcNodeShardBits(topoInfo *master_pb.TopologyInfo, vid needle.VolumeId) map[string]erasure_coding.ShardBits { + + nodeToEcIndexBits := make(map[string]erasure_coding.ShardBits) + eachDataNode(topoInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.EcShardInfos { + if v.Id == uint32(vid) { + nodeToEcIndexBits[dn.Id] = erasure_coding.ShardBits(v.EcIndexBits) + } + } + }) + + return nodeToEcIndexBits +} diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go new file mode 100644 index 000000000..5a8146954 --- /dev/null +++ b/weed/shell/command_ec_encode.go @@ -0,0 +1,298 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "sync" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +func init() { + Commands = append(Commands, &commandEcEncode{}) +} + +type commandEcEncode struct { +} + +func (c *commandEcEncode) Name() string { + return "ec.encode" +} + +func (c *commandEcEncode) Help() string { + return `apply erasure coding to a volume + + ec.encode [-collection=""] [-fullPercent=95] [-quietFor=1h] + ec.encode [-collection=""] [-volumeId=] + + This command will: + 1. freeze one volume + 2. apply erasure coding to the volume + 3. move the encoded shards to multiple volume servers + + The erasure coding is 10.4. So ideally you have more than 14 volume servers, and you can afford + to lose 4 volume servers. + + If the number of volumes are not high, the worst case is that you only have 4 volume servers, + and the shards are spread as 4,4,3,3, respectively. You can afford to lose one volume server. + + If you only have less than 4 volume servers, with erasure coding, at least you can afford to + have 4 corrupted shard files. + +` +} + +func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + encodeCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + volumeId := encodeCommand.Int("volumeId", 0, "the volume id") + collection := encodeCommand.String("collection", "", "the collection name") + fullPercentage := encodeCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size") + quietPeriod := encodeCommand.Duration("quietFor", time.Hour, "select volumes without no writes for this period") + if err = encodeCommand.Parse(args); err != nil { + return nil + } + + vid := needle.VolumeId(*volumeId) + + // volumeId is provided + if vid != 0 { + return doEcEncode(commandEnv, *collection, vid) + } + + // apply to all volumes in the collection + volumeIds, err := collectVolumeIdsForEcEncode(commandEnv, *collection, *fullPercentage, *quietPeriod) + if err != nil { + return err + } + fmt.Printf("ec encode volumes: %v\n", volumeIds) + for _, vid := range volumeIds { + if err = doEcEncode(commandEnv, *collection, vid); err != nil { + return err + } + } + + return nil +} + +func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) { + // find volume location + locations, found := commandEnv.MasterClient.GetLocations(uint32(vid)) + if !found { + return fmt.Errorf("volume %d not found", vid) + } + + // fmt.Printf("found ec %d shards on %v\n", vid, locations) + + // mark the volume as readonly + err = markVolumeReadonly(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), locations) + if err != nil { + return fmt.Errorf("mark volume %d as readonly on %s: %v", vid, locations[0].Url, err) + } + + // generate ec shards + err = generateEcShards(commandEnv.option.GrpcDialOption, needle.VolumeId(vid), collection, locations[0].Url) + if err != nil { + return fmt.Errorf("generate ec shards for volume %d on %s: %v", vid, locations[0].Url, err) + } + + // balance the ec shards to current cluster + err = spreadEcShards(commandEnv, vid, collection, locations) + if err != nil { + return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err) + } + + return nil +} + +func markVolumeReadonly(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, locations []wdclient.Location) error { + + for _, location := range locations { + + fmt.Printf("markVolumeReadonly %d on %s ...\n", volumeId, location.Url) + + err := operation.WithVolumeServerClient(location.Url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, markErr := volumeServerClient.VolumeMarkReadonly(context.Background(), &volume_server_pb.VolumeMarkReadonlyRequest{ + VolumeId: uint32(volumeId), + }) + return markErr + }) + + if err != nil { + return err + } + + } + + return nil +} + +func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId, collection string, sourceVolumeServer string) error { + + fmt.Printf("generateEcShards %s %d on %s ...\n", collection, volumeId, sourceVolumeServer) + + err := operation.WithVolumeServerClient(sourceVolumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, genErr := volumeServerClient.VolumeEcShardsGenerate(context.Background(), &volume_server_pb.VolumeEcShardsGenerateRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + }) + return genErr + }) + + return err + +} + +func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { + + allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "") + if err != nil { + return err + } + + if totalFreeEcSlots < erasure_coding.TotalShardsCount { + return fmt.Errorf("not enough free ec shard slots. only %d left", totalFreeEcSlots) + } + allocatedDataNodes := allEcNodes + if len(allocatedDataNodes) > erasure_coding.TotalShardsCount { + allocatedDataNodes = allocatedDataNodes[:erasure_coding.TotalShardsCount] + } + + // calculate how many shards to allocate for these servers + allocatedEcIds := balancedEcDistribution(allocatedDataNodes) + + // ask the data nodes to copy from the source volume server + copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0]) + if err != nil { + return err + } + + // unmount the to be deleted shards + err = unmountEcShards(commandEnv.option.GrpcDialOption, volumeId, existingLocations[0].Url, copiedShardIds) + if err != nil { + return err + } + + // ask the source volume server to clean up copied ec shards + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, existingLocations[0].Url, copiedShardIds) + if err != nil { + return fmt.Errorf("source delete copied ecShards %s %d.%v: %v", existingLocations[0].Url, volumeId, copiedShardIds, err) + } + + // ask the source volume server to delete the original volume + for _, location := range existingLocations { + fmt.Printf("delete volume %d from %s\n", volumeId, location.Url) + err = deleteVolume(commandEnv.option.GrpcDialOption, volumeId, location.Url) + if err != nil { + return fmt.Errorf("deleteVolume %s volume %d: %v", location.Url, volumeId, err) + } + } + + return err + +} + +func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { + + fmt.Printf("parallelCopyEcShardsFromSource %d %s\n", volumeId, existingLocation.Url) + + // parallelize + shardIdChan := make(chan []uint32, len(targetServers)) + var wg sync.WaitGroup + for i, server := range targetServers { + if len(allocatedEcIds[i]) <= 0 { + continue + } + + wg.Add(1) + go func(server *EcNode, allocatedEcShardIds []uint32) { + defer wg.Done() + copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server, + allocatedEcShardIds, volumeId, collection, existingLocation.Url) + if copyErr != nil { + err = copyErr + } else { + shardIdChan <- copiedShardIds + server.addEcVolumeShards(volumeId, collection, copiedShardIds) + } + }(server, allocatedEcIds[i]) + } + wg.Wait() + close(shardIdChan) + + if err != nil { + return nil, err + } + + for shardIds := range shardIdChan { + actuallyCopied = append(actuallyCopied, shardIds...) + } + + return +} + +func balancedEcDistribution(servers []*EcNode) (allocated [][]uint32) { + allocated = make([][]uint32, len(servers)) + allocatedShardIdIndex := uint32(0) + serverIndex := 0 + for allocatedShardIdIndex < erasure_coding.TotalShardsCount { + if servers[serverIndex].freeEcSlot > 0 { + allocated[serverIndex] = append(allocated[serverIndex], allocatedShardIdIndex) + allocatedShardIdIndex++ + } + serverIndex++ + if serverIndex >= len(servers) { + serverIndex = 0 + } + } + + return allocated +} + +func collectVolumeIdsForEcEncode(commandEnv *CommandEnv, selectedCollection string, fullPercentage float64, quietPeriod time.Duration) (vids []needle.VolumeId, err error) { + + var resp *master_pb.VolumeListResponse + err = commandEnv.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return + } + + quietSeconds := int64(quietPeriod / time.Second) + nowUnixSeconds := time.Now().Unix() + + fmt.Printf("ec encode volumes quiet for: %d seconds\n", quietSeconds) + + vidMap := make(map[uint32]bool) + eachDataNode(resp.TopologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { + for _, v := range dn.VolumeInfos { + if v.Collection == selectedCollection && v.ModifiedAtSecond+quietSeconds < nowUnixSeconds { + if float64(v.Size) > fullPercentage/100*float64(resp.VolumeSizeLimitMb)*1024*1024 { + vidMap[v.Id] = true + } + } + } + }) + + for vid := range vidMap { + vids = append(vids, needle.VolumeId(vid)) + } + + return +} diff --git a/weed/shell/command_ec_rebuild.go b/weed/shell/command_ec_rebuild.go new file mode 100644 index 000000000..df28681fe --- /dev/null +++ b/weed/shell/command_ec_rebuild.go @@ -0,0 +1,271 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "google.golang.org/grpc" +) + +func init() { + Commands = append(Commands, &commandEcRebuild{}) +} + +type commandEcRebuild struct { +} + +func (c *commandEcRebuild) Name() string { + return "ec.rebuild" +} + +func (c *commandEcRebuild) Help() string { + return `find and rebuild missing ec shards among volume servers + + ec.rebuild [-c EACH_COLLECTION|] [-force] + + Algorithm: + + For each type of volume server (different max volume count limit){ + for each collection { + rebuildEcVolumes() + } + } + + func rebuildEcVolumes(){ + idealWritableVolumes = totalWritableVolumes / numVolumeServers + for { + sort all volume servers ordered by the number of local writable volumes + pick the volume server A with the lowest number of writable volumes x + pick the volume server B with the highest number of writable volumes y + if y > idealWritableVolumes and x +1 <= idealWritableVolumes { + if B has a writable volume id v that A does not have { + move writable volume v from A to B + } + } + } + } + +` +} + +func (c *commandEcRebuild) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + if err = commandEnv.confirmIsLocked(); err != nil { + return + } + + fixCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + collection := fixCommand.String("collection", "EACH_COLLECTION", "collection name, or \"EACH_COLLECTION\" for each collection") + applyChanges := fixCommand.Bool("force", false, "apply the changes") + if err = fixCommand.Parse(args); err != nil { + return nil + } + + // collect all ec nodes + allEcNodes, _, err := collectEcNodes(commandEnv, "") + if err != nil { + return err + } + + if *collection == "EACH_COLLECTION" { + collections, err := ListCollectionNames(commandEnv, false, true) + if err != nil { + return err + } + fmt.Printf("rebuildEcVolumes collections %+v\n", len(collections)) + for _, c := range collections { + fmt.Printf("rebuildEcVolumes collection %+v\n", c) + if err = rebuildEcVolumes(commandEnv, allEcNodes, c, writer, *applyChanges); err != nil { + return err + } + } + } else { + if err = rebuildEcVolumes(commandEnv, allEcNodes, *collection, writer, *applyChanges); err != nil { + return err + } + } + + return nil +} + +func rebuildEcVolumes(commandEnv *CommandEnv, allEcNodes []*EcNode, collection string, writer io.Writer, applyChanges bool) error { + + fmt.Printf("rebuildEcVolumes %s\n", collection) + + // collect vid => each shard locations, similar to ecShardMap in topology.go + ecShardMap := make(EcShardMap) + for _, ecNode := range allEcNodes { + ecShardMap.registerEcNode(ecNode, collection) + } + + for vid, locations := range ecShardMap { + shardCount := locations.shardCount() + if shardCount == erasure_coding.TotalShardsCount { + continue + } + if shardCount < erasure_coding.DataShardsCount { + return fmt.Errorf("ec volume %d is unrepairable with %d shards\n", vid, shardCount) + } + + sortEcNodesByFreeslotsDecending(allEcNodes) + + if allEcNodes[0].freeEcSlot < erasure_coding.TotalShardsCount { + return fmt.Errorf("disk space is not enough") + } + + if err := rebuildOneEcVolume(commandEnv, allEcNodes[0], collection, vid, locations, writer, applyChanges); err != nil { + return err + } + } + + return nil +} + +func rebuildOneEcVolume(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyChanges bool) error { + + fmt.Printf("rebuildOneEcVolume %s %d\n", collection, volumeId) + + // collect shard files to rebuilder local disk + var generatedShardIds []uint32 + copiedShardIds, _, err := prepareDataToRecover(commandEnv, rebuilder, collection, volumeId, locations, writer, applyChanges) + if err != nil { + return err + } + defer func() { + // clean up working files + + // ask the rebuilder to delete the copied shards + err = sourceServerDeleteEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, copiedShardIds) + if err != nil { + fmt.Fprintf(writer, "%s delete copied ec shards %s %d.%v\n", rebuilder.info.Id, collection, volumeId, copiedShardIds) + } + + }() + + if !applyChanges { + return nil + } + + // generate ec shards, and maybe ecx file + generatedShardIds, err = generateMissingShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id) + if err != nil { + return err + } + + // mount the generated shards + err = mountEcShards(commandEnv.option.GrpcDialOption, collection, volumeId, rebuilder.info.Id, generatedShardIds) + if err != nil { + return err + } + + rebuilder.addEcVolumeShards(volumeId, collection, generatedShardIds) + + return nil +} + +func generateMissingShards(grpcDialOption grpc.DialOption, collection string, volumeId needle.VolumeId, sourceLocation string) (rebuiltShardIds []uint32, err error) { + + err = operation.WithVolumeServerClient(sourceLocation, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + resp, rebultErr := volumeServerClient.VolumeEcShardsRebuild(context.Background(), &volume_server_pb.VolumeEcShardsRebuildRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + }) + if rebultErr == nil { + rebuiltShardIds = resp.RebuiltShardIds + } + return rebultErr + }) + return +} + +func prepareDataToRecover(commandEnv *CommandEnv, rebuilder *EcNode, collection string, volumeId needle.VolumeId, locations EcShardLocations, writer io.Writer, applyBalancing bool) (copiedShardIds []uint32, localShardIds []uint32, err error) { + + needEcxFile := true + var localShardBits erasure_coding.ShardBits + for _, ecShardInfo := range rebuilder.info.EcShardInfos { + if ecShardInfo.Collection == collection && needle.VolumeId(ecShardInfo.Id) == volumeId { + needEcxFile = false + localShardBits = erasure_coding.ShardBits(ecShardInfo.EcIndexBits) + } + } + + for shardId, ecNodes := range locations { + + if len(ecNodes) == 0 { + fmt.Fprintf(writer, "missing shard %d.%d\n", volumeId, shardId) + continue + } + + if localShardBits.HasShardId(erasure_coding.ShardId(shardId)) { + localShardIds = append(localShardIds, uint32(shardId)) + fmt.Fprintf(writer, "use existing shard %d.%d\n", volumeId, shardId) + continue + } + + var copyErr error + if applyBalancing { + copyErr = operation.WithVolumeServerClient(rebuilder.info.Id, commandEnv.option.GrpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + _, copyErr := volumeServerClient.VolumeEcShardsCopy(context.Background(), &volume_server_pb.VolumeEcShardsCopyRequest{ + VolumeId: uint32(volumeId), + Collection: collection, + ShardIds: []uint32{uint32(shardId)}, + CopyEcxFile: needEcxFile, + CopyEcjFile: needEcxFile, + CopyVifFile: needEcxFile, + SourceDataNode: ecNodes[0].info.Id, + }) + return copyErr + }) + if copyErr == nil && needEcxFile { + needEcxFile = false + } + } + if copyErr != nil { + fmt.Fprintf(writer, "%s failed to copy %d.%d from %s: %v\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id, copyErr) + } else { + fmt.Fprintf(writer, "%s copied %d.%d from %s\n", rebuilder.info.Id, volumeId, shardId, ecNodes[0].info.Id) + copiedShardIds = append(copiedShardIds, uint32(shardId)) + } + + } + + if len(copiedShardIds)+len(localShardIds) >= erasure_coding.DataShardsCount { + return copiedShardIds, localShardIds, nil + } + + return nil, nil, fmt.Errorf("%d shards are not enough to recover volume %d", len(copiedShardIds)+len(localShardIds), volumeId) + +} + +type EcShardMap map[needle.VolumeId]EcShardLocations +type EcShardLocations [][]*EcNode + +func (ecShardMap EcShardMap) registerEcNode(ecNode *EcNode, collection string) { + for _, shardInfo := range ecNode.info.EcShardInfos { + if shardInfo.Collection == collection { + existing, found := ecShardMap[needle.VolumeId(shardInfo.Id)] + if !found { + existing = make([][]*EcNode, erasure_coding.TotalShardsCount) + ecShardMap[needle.VolumeId(shardInfo.Id)] = existing + } + for _, shardId := range erasure_coding.ShardBits(shardInfo.EcIndexBits).ShardIds() { + existing[shardId] = append(existing[shardId], ecNode) + } + } + } +} + +func (ecShardLocations EcShardLocations) shardCount() (count int) { + for _, locations := range ecShardLocations { + if len(locations) > 0 { + count++ + } + } + return +} diff --git a/weed/shell/command_ec_test.go b/weed/shell/command_ec_test.go new file mode 100644 index 000000000..4fddcbea5 --- /dev/null +++ b/weed/shell/command_ec_test.go @@ -0,0 +1,139 @@ +package shell + +import ( + "fmt" + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func TestCommandEcDistribution(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100), + newEcNode("dc1", "rack2", "dn2", 100), + } + + allocated := balancedEcDistribution(allEcNodes) + + fmt.Printf("allocated: %+v", allocated) +} + +func TestCommandEcBalanceSmall(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}), + newEcNode("dc1", "rack2", "dn2", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}), + } + + racks := collectRacks(allEcNodes) + balanceEcVolumes(nil, "c1", allEcNodes, racks, false) +} + +func TestCommandEcBalanceNothingToMove(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}), + newEcNode("dc1", "rack1", "dn2", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}), + } + + racks := collectRacks(allEcNodes) + balanceEcVolumes(nil, "c1", allEcNodes, racks, false) +} + +func TestCommandEcBalanceAddNewServers(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}), + newEcNode("dc1", "rack1", "dn2", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}), + newEcNode("dc1", "rack1", "dn3", 100), + newEcNode("dc1", "rack1", "dn4", 100), + } + + racks := collectRacks(allEcNodes) + balanceEcVolumes(nil, "c1", allEcNodes, racks, false) +} + +func TestCommandEcBalanceAddNewRacks(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn1", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}), + newEcNode("dc1", "rack1", "dn2", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{7, 8, 9, 10, 11, 12, 13}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{0, 1, 2, 3, 4, 5, 6}), + newEcNode("dc1", "rack2", "dn3", 100), + newEcNode("dc1", "rack2", "dn4", 100), + } + + racks := collectRacks(allEcNodes) + balanceEcVolumes(nil, "c1", allEcNodes, racks, false) +} + +func TestCommandEcBalanceVolumeEvenButRackUneven(t *testing.T) { + + allEcNodes := []*EcNode{ + newEcNode("dc1", "rack1", "dn_shared", 100). + addEcVolumeAndShardsForTest(1, "c1", []uint32{0}). + addEcVolumeAndShardsForTest(2, "c1", []uint32{0}), + + newEcNode("dc1", "rack1", "dn_a1", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{1}), + newEcNode("dc1", "rack1", "dn_a2", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{2}), + newEcNode("dc1", "rack1", "dn_a3", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{3}), + newEcNode("dc1", "rack1", "dn_a4", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{4}), + newEcNode("dc1", "rack1", "dn_a5", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{5}), + newEcNode("dc1", "rack1", "dn_a6", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{6}), + newEcNode("dc1", "rack1", "dn_a7", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{7}), + newEcNode("dc1", "rack1", "dn_a8", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{8}), + newEcNode("dc1", "rack1", "dn_a9", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{9}), + newEcNode("dc1", "rack1", "dn_a10", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{10}), + newEcNode("dc1", "rack1", "dn_a11", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{11}), + newEcNode("dc1", "rack1", "dn_a12", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{12}), + newEcNode("dc1", "rack1", "dn_a13", 100).addEcVolumeAndShardsForTest(1, "c1", []uint32{13}), + + newEcNode("dc1", "rack1", "dn_b1", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{1}), + newEcNode("dc1", "rack1", "dn_b2", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{2}), + newEcNode("dc1", "rack1", "dn_b3", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{3}), + newEcNode("dc1", "rack1", "dn_b4", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{4}), + newEcNode("dc1", "rack1", "dn_b5", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{5}), + newEcNode("dc1", "rack1", "dn_b6", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{6}), + newEcNode("dc1", "rack1", "dn_b7", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{7}), + newEcNode("dc1", "rack1", "dn_b8", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{8}), + newEcNode("dc1", "rack1", "dn_b9", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{9}), + newEcNode("dc1", "rack1", "dn_b10", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{10}), + newEcNode("dc1", "rack1", "dn_b11", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{11}), + newEcNode("dc1", "rack1", "dn_b12", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{12}), + newEcNode("dc1", "rack1", "dn_b13", 100).addEcVolumeAndShardsForTest(2, "c1", []uint32{13}), + + newEcNode("dc1", "rack1", "dn3", 100), + } + + racks := collectRacks(allEcNodes) + balanceEcVolumes(nil, "c1", allEcNodes, racks, false) + balanceEcRacks(nil, racks, false) +} + +func newEcNode(dc string, rack string, dataNodeId string, freeEcSlot int) *EcNode { + return &EcNode{ + info: &master_pb.DataNodeInfo{ + Id: dataNodeId, + }, + dc: dc, + rack: RackId(rack), + freeEcSlot: freeEcSlot, + } +} + +func (ecNode *EcNode) addEcVolumeAndShardsForTest(vid uint32, collection string, shardIds []uint32) *EcNode { + return ecNode.addEcVolumeShards(needle.VolumeId(vid), collection, shardIds) +} diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go new file mode 100644 index 000000000..7177d8ac3 --- /dev/null +++ b/weed/shell/command_fs_cat.go @@ -0,0 +1,59 @@ +package shell + +import ( + "fmt" + "io" + "math" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + Commands = append(Commands, &commandFsCat{}) +} + +type commandFsCat struct { +} + +func (c *commandFsCat) Name() string { + return "fs.cat" +} + +func (c *commandFsCat) Help() string { + return `stream the file content on to the screen + + fs.cat /dir/file_name +` +} + +func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + path, err := commandEnv.parseUrl(findInputDirectory(args)) + if err != nil { + return err + } + + if commandEnv.isDirectory(path) { + return fmt.Errorf("%s is a directory", path) + } + + dir, name := util.FullPath(path).DirAndName() + + return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Name: name, + Directory: dir, + } + respLookupEntry, err := filer_pb.LookupEntry(client, request) + if err != nil { + return err + } + + return filer2.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64) + + }) + +} diff --git a/weed/shell/command_fs_cd.go b/weed/shell/command_fs_cd.go new file mode 100644 index 000000000..2cc28f7a2 --- /dev/null +++ b/weed/shell/command_fs_cd.go @@ -0,0 +1,50 @@ +package shell + +import ( + "io" +) + +func init() { + Commands = append(Commands, &commandFsCd{}) +} + +type commandFsCd struct { +} + +func (c *commandFsCd) Name() string { + return "fs.cd" +} + +func (c *commandFsCd) Help() string { + return `change directory to a directory /path/to/dir + + The full path can be too long to type. For example, + fs.ls /some/path/to/file_name + + can be simplified as + + fs.cd /some/path + fs.ls to/file_name +` +} + +func (c *commandFsCd) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + path, err := commandEnv.parseUrl(findInputDirectory(args)) + if err != nil { + return err + } + + if path == "/" { + commandEnv.option.Directory = "/" + return nil + } + + err = commandEnv.checkDirectory(path) + + if err == nil { + commandEnv.option.Directory = path + } + + return err +} diff --git a/weed/shell/command_fs_du.go b/weed/shell/command_fs_du.go new file mode 100644 index 000000000..96551dd5a --- /dev/null +++ b/weed/shell/command_fs_du.go @@ -0,0 +1,84 @@ +package shell + +import ( + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + Commands = append(Commands, &commandFsDu{}) +} + +type commandFsDu struct { +} + +func (c *commandFsDu) Name() string { + return "fs.du" +} + +func (c *commandFsDu) Help() string { + return `show disk usage + + fs.du /dir + fs.du /dir/file_name + fs.du /dir/file_prefix +` +} + +func (c *commandFsDu) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + path, err := commandEnv.parseUrl(findInputDirectory(args)) + if err != nil { + return err + } + + if commandEnv.isDirectory(path) { + path = path + "/" + } + + var blockCount, byteCount uint64 + dir, name := util.FullPath(path).DirAndName() + blockCount, byteCount, err = duTraverseDirectory(writer, commandEnv, dir, name) + + if name == "" && err == nil { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s\n", blockCount, byteCount, dir) + } + + return + +} + +func duTraverseDirectory(writer io.Writer, filerClient filer_pb.FilerClient, dir, name string) (blockCount, byteCount uint64, err error) { + + err = filer_pb.ReadDirAllEntries(filerClient, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) error { + + var fileBlockCount, fileByteCount uint64 + + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", dir, entry.Name) + if dir == "/" { + subDir = "/" + entry.Name + } + numBlock, numByte, err := duTraverseDirectory(writer, filerClient, subDir, "") + if err == nil { + blockCount += numBlock + byteCount += numByte + } + } else { + fileBlockCount = uint64(len(entry.Chunks)) + fileByteCount = filer2.TotalSize(entry.Chunks) + blockCount += uint64(len(entry.Chunks)) + byteCount += filer2.TotalSize(entry.Chunks) + } + + if name != "" && !entry.IsDirectory { + fmt.Fprintf(writer, "block:%4d\tbyte:%10d\t%s/%s\n", fileBlockCount, fileByteCount, dir, entry.Name) + } + return nil + }) + return +} diff --git a/weed/shell/command_fs_lock_unlock.go b/weed/shell/command_fs_lock_unlock.go new file mode 100644 index 000000000..8a6e8f71b --- /dev/null +++ b/weed/shell/command_fs_lock_unlock.go @@ -0,0 +1,54 @@ +package shell + +import ( + "io" +) + +func init() { + Commands = append(Commands, &commandUnlock{}) + Commands = append(Commands, &commandLock{}) +} + +// =========== Lock ============== +type commandLock struct { +} + +func (c *commandLock) Name() string { + return "lock" +} + +func (c *commandLock) Help() string { + return `lock in order to exclusively manage the cluster + + This is a blocking operation if there is alread another lock. +` +} + +func (c *commandLock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + commandEnv.locker.RequestLock() + + return nil +} + +// =========== Unlock ============== + +type commandUnlock struct { +} + +func (c *commandUnlock) Name() string { + return "unlock" +} + +func (c *commandUnlock) Help() string { + return `unlock the cluster-wide lock + +` +} + +func (c *commandUnlock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + commandEnv.locker.ReleaseLock() + + return nil +} diff --git a/weed/shell/command_fs_ls.go b/weed/shell/command_fs_ls.go new file mode 100644 index 000000000..36133992f --- /dev/null +++ b/weed/shell/command_fs_ls.go @@ -0,0 +1,111 @@ +package shell + +import ( + "fmt" + "io" + "os" + "os/user" + "strconv" + "strings" + + "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + Commands = append(Commands, &commandFsLs{}) +} + +type commandFsLs struct { +} + +func (c *commandFsLs) Name() string { + return "fs.ls" +} + +func (c *commandFsLs) Help() string { + return `list all files under a directory + + fs.ls [-l] [-a] /dir/ + fs.ls [-l] [-a] /dir/file_name + fs.ls [-l] [-a] /dir/file_prefix +` +} + +func (c *commandFsLs) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + var isLongFormat, showHidden bool + for _, arg := range args { + if !strings.HasPrefix(arg, "-") { + break + } + for _, t := range arg { + switch t { + case 'a': + showHidden = true + case 'l': + isLongFormat = true + } + } + } + + path, err := commandEnv.parseUrl(findInputDirectory(args)) + if err != nil { + return err + } + + if commandEnv.isDirectory(path) { + path = path + "/" + } + + dir, name := util.FullPath(path).DirAndName() + entryCount := 0 + + err = filer_pb.ReadDirAllEntries(commandEnv, util.FullPath(dir), name, func(entry *filer_pb.Entry, isLast bool) error { + + if !showHidden && strings.HasPrefix(entry.Name, ".") { + return nil + } + + entryCount++ + + if isLongFormat { + fileMode := os.FileMode(entry.Attributes.FileMode) + userName, groupNames := entry.Attributes.UserName, entry.Attributes.GroupName + if userName == "" { + if user, userErr := user.LookupId(strconv.Itoa(int(entry.Attributes.Uid))); userErr == nil { + userName = user.Username + } + } + groupName := "" + if len(groupNames) > 0 { + groupName = groupNames[0] + } + if groupName == "" { + if group, groupErr := user.LookupGroupId(strconv.Itoa(int(entry.Attributes.Gid))); groupErr == nil { + groupName = group.Name + } + } + + if strings.HasSuffix(dir, "/") { + // just for printing + dir = dir[:len(dir)-1] + } + fmt.Fprintf(writer, "%s %3d %s %s %6d %s/%s\n", + fileMode, len(entry.Chunks), + userName, groupName, + filer2.TotalSize(entry.Chunks), dir, entry.Name) + } else { + fmt.Fprintf(writer, "%s\n", entry.Name) + } + + return nil + }) + + if isLongFormat && err == nil { + fmt.Fprintf(writer, "total %d\n", entryCount) + } + + return +} diff --git a/weed/shell/command_fs_meta_cat.go b/weed/shell/command_fs_meta_cat.go new file mode 100644 index 000000000..0679ec075 --- /dev/null +++ b/weed/shell/command_fs_meta_cat.go @@ -0,0 +1,68 @@ +package shell + +import ( + "fmt" + "io" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + Commands = append(Commands, &commandFsMetaCat{}) +} + +type commandFsMetaCat struct { +} + +func (c *commandFsMetaCat) Name() string { + return "fs.meta.cat" +} + +func (c *commandFsMetaCat) Help() string { + return `print out the meta data content for a file or directory + + fs.meta.cat /dir/ + fs.meta.cat /dir/file_name +` +} + +func (c *commandFsMetaCat) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + + path, err := commandEnv.parseUrl(findInputDirectory(args)) + if err != nil { + return err + } + + dir, name := util.FullPath(path).DirAndName() + + return commandEnv.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Name: name, + Directory: dir, + } + respLookupEntry, err := filer_pb.LookupEntry(client, request) + if err != nil { + return err + } + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, marshalErr := m.MarshalToString(respLookupEntry.Entry) + if marshalErr != nil { + return fmt.Errorf("marshal meta: %v", marshalErr) + } + + fmt.Fprintf(writer, "%s\n", text) + + return nil + + }) + +} diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go new file mode 100644 index 000000000..69ae9454c --- /dev/null +++ b/weed/shell/command_fs_meta_load.go @@ -0,0 +1,100 @@ +package shell + +import ( + "fmt" + "io" + "os" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + Commands = append(Commands, &commandFsMetaLoad{}) +} + +type commandFsMetaLoad struct { +} + +func (c *commandFsMetaLoad) Name() string { + return "fs.meta.load" +} + +func (c *commandFsMetaLoad) Help() string { + return `load saved filer meta data to restore the directory and file structure + + fs.meta.load --
IdCollectionShard SizeShardsCreatedAt
{{ .VolumeId }}{{ .Collection }}{{ bytesToHumanReadable .ShardSize }}{{ .ShardIdList }}{{ .CreatedAt.Format "02 Jan 06 15:04 -0700" }}