diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 111f9aa6e..edb95fac1 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -8,6 +8,8 @@ assignees: '' --- Sponsors SeaweedFS via Patreon https://www.patreon.com/seaweedfs +Report issues here. Ask questions here https://stackoverflow.com/questions/tagged/seaweedfs +Please ask questions in https://github.com/chrislusf/seaweedfs/discussions example of a good issue report: https://github.com/chrislusf/seaweedfs/issues/1005 diff --git a/.github/workflows/cleanup.yml b/.github/workflows/cleanup.yml new file mode 100644 index 000000000..47a677e6d --- /dev/null +++ b/.github/workflows/cleanup.yml @@ -0,0 +1,22 @@ +name: Cleanup + +on: + push: + branches: [ master ] + +jobs: + + build: + name: Build + runs-on: ubuntu-latest + + steps: + + - name: Delete old release assets + uses: mknejp/delete-release-assets@v1 + with: + token: ${{ github.token }} + tag: dev + fail-if-no-assets: false + assets: | + weed-* diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 000000000..b2948a0b7 --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,37 @@ +name: Go + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + + build: + name: Build + runs-on: ubuntu-latest + steps: + + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ^1.13 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + cd weed; go get -v -t -d ./... + if [ -f Gopkg.toml ]; then + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure + fi + + - name: Build + run: cd weed; go build -v . + + - name: Test + run: cd weed; go test -v ./... diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..dba704800 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,64 @@ +name: Release + +on: + push: + branches: [ master ] + +jobs: + + build: + name: Build + runs-on: ubuntu-latest + strategy: + matrix: + goos: [linux, windows, darwin, freebsd ] + goarch: [amd64, arm] + exclude: + - goarch: arm + goos: darwin + - goarch: arm + goos: windows + + steps: + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Wait for the deletion + uses: jakejarvis/wait-action@master + with: + time: '30s' + + - name: Set BUILD_TIME env + run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV} + + - name: Go Release Binaries + uses: wangyoucao577/go-release-action@v1.14 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + goos: ${{ matrix.goos }} + goarch: ${{ matrix.goarch }} + release_tag: dev + overwrite: true + pre_command: export CGO_ENABLED=0 + build_flags: -tags 5BytesOffset # optional, default is + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} + # Where to run `go build .` + project_path: weed + binary_name: weed-large-disk + asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" + + - name: Go Release Binaries + uses: wangyoucao577/go-release-action@v1.14 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + goos: ${{ matrix.goos }} + goarch: ${{ matrix.goarch }} + release_tag: dev + overwrite: true + pre_command: export CGO_ENABLED=0 + ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}} + # Where to run `go build .` + project_path: weed + binary_name: weed + asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}" diff --git a/.travis.yml b/.travis.yml index b7467ab8a..a5ebf415f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,9 +1,8 @@ sudo: false language: go go: - - 1.11.x - - 1.12.x - - 1.13.x + - 1.15.x + - 1.16.x before_install: - export PATH=/home/travis/gopath/bin:$PATH @@ -45,4 +44,4 @@ deploy: on: tags: true repo: chrislusf/seaweedfs - go: 1.13.x + go: 1.16.x diff --git a/Makefile b/Makefile index ce20a482b..9a62eb9fe 100644 --- a/Makefile +++ b/Makefile @@ -8,11 +8,14 @@ appname := weed sources := $(wildcard *.go) -build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR) +COMMIT ?= $(shell git rev-parse --short HEAD) +LDFLAGS ?= -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${COMMIT} + +build = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR) tar = cd build && tar -cvzf $(1)_$(2).tar.gz $(appname)$(3) && rm $(appname)$(3) zip = cd build && zip $(1)_$(2).zip $(appname)$(3) && rm $(appname)$(3) -build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static" -o build/$(appname)$(3) $(SOURCE_DIR) +build_large = CGO_ENABLED=0 GOOS=$(1) GOARCH=$(2) go build -tags 5BytesOffset -ldflags "-extldflags -static $(LDFLAGS)" -o build/$(appname)$(3) $(SOURCE_DIR) tar_large = cd build && tar -cvzf $(1)_$(2)_large_disk.tar.gz $(appname)$(3) && rm $(appname)$(3) zip_large = cd build && zip $(1)_$(2)_large_disk.zip $(appname)$(3) && rm $(appname)$(3) @@ -31,13 +34,16 @@ deps: rm -rf /home/travis/gopath/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace build: deps - go build $(GO_FLAGS) -o $(BINARY) $(SOURCE_DIR) + go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o $(BINARY) $(SOURCE_DIR) + +install: deps + go install $(GO_FLAGS) -ldflags "$(LDFLAGS)" $(SOURCE_DIR) linux: deps mkdir -p linux - GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -o linux/$(BINARY) $(SOURCE_DIR) + GOOS=linux GOARCH=amd64 go build $(GO_FLAGS) -ldflags "$(LDFLAGS)" -o linux/$(BINARY) $(SOURCE_DIR) -release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_darwin_build 5_byte_windows_build +release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_build 5_byte_arm64_build 5_byte_darwin_build 5_byte_windows_build ##### LINUX BUILDS ##### 5_byte_linux_build: @@ -52,6 +58,14 @@ release: deps windows_build darwin_build linux_build bsd_build 5_byte_linux_buil $(call build_large,windows,amd64,.exe) $(call zip_large,windows,amd64,.exe) +5_byte_arm_build: $(sources) + $(call build_large,linux,arm,) + $(call tar_large,linux,arm) + +5_byte_arm64_build: $(sources) + $(call build_large,linux,arm64,) + $(call tar_large,linux,arm64) + linux_build: build/linux_arm.tar.gz build/linux_arm64.tar.gz build/linux_386.tar.gz build/linux_amd64.tar.gz build/linux_386.tar.gz: $(sources) diff --git a/README.md b/README.md index dd748878b..e463092bc 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,18 @@ # SeaweedFS +[![Slack](https://img.shields.io/badge/slack-purple)](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY) +[![Twitter](https://img.shields.io/twitter/follow/seaweedfs.svg?style=social&label=Follow)](https://twitter.com/intent/follow?screen_name=seaweedfs) [![Build Status](https://travis-ci.org/chrislusf/seaweedfs.svg?branch=master)](https://travis-ci.org/chrislusf/seaweedfs) [![GoDoc](https://godoc.org/github.com/chrislusf/seaweedfs/weed?status.svg)](https://godoc.org/github.com/chrislusf/seaweedfs/weed) [![Wiki](https://img.shields.io/badge/docs-wiki-blue.svg)](https://github.com/chrislusf/seaweedfs/wiki) -[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=604800)](https://hub.docker.com/r/chrislusf/seaweedfs/) +[![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs/) +[![SeaweedFS on Maven Central](https://img.shields.io/maven-central/v/com.github.chrislusf/seaweedfs-client)](https://search.maven.org/search?q=g:com.github.chrislusf) + ![SeaweedFS Logo](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/seaweedfs.png) -

Supporting SeaweedFS

+

Sponsor SeaweedFS via Patreon

SeaweedFS is an independent Apache-licensed open source project with its ongoing development made possible entirely thanks to the support of these awesome [backers](https://github.com/chrislusf/seaweedfs/blob/master/backers.md). @@ -17,8 +21,6 @@ If you'd like to grow SeaweedFS even stronger, please consider joining our Your support will be really appreciated by me and other supporters! -

Sponsor SeaweedFS via Patreon

- -

Gold

- - - - - - - - -
- - Add your name or icon here - -
---> +### Gold Sponsors +![shuguang](https://raw.githubusercontent.com/chrislusf/seaweedfs/master/note/shuguang.png) --- - [Download Binaries for different platforms](https://github.com/chrislusf/seaweedfs/releases/latest) - [SeaweedFS on Slack](https://join.slack.com/t/seaweedfs/shared_invite/enQtMzI4MTMwMjU2MzA3LTEyYzZmZWYzOGQ3MDJlZWMzYmI0OTE4OTJiZjJjODBmMzUxNmYwODg0YjY3MTNlMjBmZDQ1NzQ5NDJhZWI2ZmY) +- [SeaweedFS on Twitter](https://twitter.com/SeaweedFS) - [SeaweedFS Mailing List](https://groups.google.com/d/forum/seaweedfs) - [Wiki Documentation](https://github.com/chrislusf/seaweedfs/wiki) +- [SeaweedFS White Paper](https://github.com/chrislusf/seaweedfs/wiki/SeaweedFS_Architecture.pdf) - [SeaweedFS Introduction Slides](https://www.slideshare.net/chrislusf/seaweedfs-introduction) Table of Contents ================= +* [Quick Start](#quick-start) * [Introduction](#introduction) * [Features](#features) * [Additional Features](#additional-features) * [Filer Features](#filer-features) -* [Example Usage](#example-usage) +* [Example: Using Seaweed Object Store](#example-Using-Seaweed-Object-Store) * [Architecture](#architecture) * [Compared to Other File Systems](#compared-to-other-file-systems) * [Compared to HDFS](#compared-to-hdfs) @@ -74,6 +67,13 @@ Table of Contents * [Benchmark](#Benchmark) * [License](#license) + +## Quick Start ## +* Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe` +* Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway. + +Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver=":9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it! + ## Introduction ## SeaweedFS is a simple and highly scalable distributed file system. There are two objectives: @@ -81,17 +81,34 @@ SeaweedFS is a simple and highly scalable distributed file system. There are two 1. to store billions of files! 2. to serve the files fast! -SeaweedFS started as an Object Store to handle small files efficiently. Instead of managing all file metadata in a central master, the central master only manages file volumes, and it lets these volume servers manage files and their metadata. This relieves concurrency pressure from the central master and spreads file metadata into volume servers, allowing faster file access (just one disk read operation). +SeaweedFS started as an Object Store to handle small files efficiently. +Instead of managing all file metadata in a central master, +the central master only manages volumes on volume servers, +and these volume servers manage files and their metadata. +This relieves concurrency pressure from the central master and spreads file metadata into volume servers, +allowing faster file access (O(1), usually just one disk read operation). -There is only 40 bytes of disk storage overhead for each file's metadata. It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. +SeaweedFS can transparently integrate with the cloud. +With hot data on local cluster, and warm data on the cloud with O(1) access time, +SeaweedFS can achieve both fast local access time and elastic cloud storage capacity. +What's more, the cloud storage access API cost is minimized. +Faster and Cheaper than direct cloud storage! +Signup for future managed SeaweedFS cluster offering at "seaweedfilesystem at gmail dot com". -SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). Also, SeaweedFS implements erasure coding with ideas from [f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf) +There is only 40 bytes of disk storage overhead for each file's metadata. +It is so simple with O(1) disk reads that you are welcome to challenge the performance with your actual use cases. -SeaweedFS can work very well with just the object store. [[Filer]] can then be added later to support directories and POSIX attributes. Filer is a separate linearly-scalable stateless server with customizable metadata stores, e.g., MySql/Postgres/Redis/Etcd/Cassandra/LevelDB/MemSql/TiDB/CockroachDB. +SeaweedFS started by implementing [Facebook's Haystack design paper](http://www.usenix.org/event/osdi10/tech/full_papers/Beaver.pdf). +Also, SeaweedFS implements erasure coding with ideas from +[f4: Facebook’s Warm BLOB Storage System](https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-muralidhar.pdf), and has a lot of similarities with [Facebook’s Tectonic Filesystem](https://www.usenix.org/system/files/fast21-pan.pdf) -[Back to TOC](#table-of-contents) +On top of the object store, optional [Filer] can support directories and POSIX attributes. +Filer is a separate linearly-scalable stateless server with customizable metadata stores, +e.g., MySql, Postgres, Redis, Cassandra, HBase, Mongodb, Elastic Search, LevelDB, RocksDB, MemSql, TiDB, Etcd, CockroachDB, etc. -## Features ## +For any distributed key value stores, the large values can be offloaded to SeaweedFS. +With the fast access speed and linearly scalable capacity, +SeaweedFS can work as a distributed [Key-Large-Value store][KeyLargeValueStore]. [Back to TOC](#table-of-contents) @@ -100,35 +117,57 @@ SeaweedFS can work very well with just the object store. [[Filer]] can then be a * Automatic master servers failover - no single point of failure (SPOF). * Automatic Gzip compression depending on file mime type. * Automatic compaction to reclaim disk space after deletion or update. -* Servers in the same cluster can have different disk spaces, file systems, OS etc. -* Adding/Removing servers does **not** cause any data re-balancing. -* Optionally fix the orientation for jpeg pictures. +* [Automatic entry TTL expiration][VolumeServerTTL]. +* Any server with some disk spaces can add to the total storage space. +* Adding/Removing servers does **not** cause any data re-balancing unless triggered by admin commands. +* Optional picture resizing. * Support ETag, Accept-Range, Last-Modified, etc. -* Support in-memory/leveldb/boltdb/btree mode tuning for memory/performance balance. +* Support in-memory/leveldb/readonly mode tuning for memory/performance balance. * Support rebalancing the writable and readonly volumes. +* [Customizable Multiple Storage Tiers][TieredStorage]: Customizable storage disk types to balance performance and cost. +* [Transparent cloud integration][CloudTier]: unlimited capacity via tiered cloud storage for warm data. +* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. [Back to TOC](#table-of-contents) ## Filer Features ## -* [filer server][Filer] provide "normal" directories and files via http. -* [mount filer][Mount] to read and write files directly as a local directory via FUSE. -* [Amazon S3 compatible API][AmazonS3API] to access files with S3 tooling. -* [Erasure Coding for warm storage][ErasureCoding] Rack-Aware 10.4 erasure coding reduces storage cost and increases availability. -* [Hadoop Compatible File System][Hadoop] to access files from Hadoop/Spark/Flink/etc jobs. -* [Async Backup To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. -* [WebDAV] access as a mapped drive on Mac and Windows, or from mobile devices. +* [Filer server][Filer] provides "normal" directories and files via http. +* [File TTL][FilerTTL] automatically expires file metadata and actual file data. +* [Mount filer][Mount] reads and writes files directly as a local directory via FUSE. +* [Filer Store Replication][FilerStoreReplication] enables HA for filer meta data stores. +* [Active-Active Replication][ActiveActiveAsyncReplication] enables asynchronous one-way or two-way cross cluster continuous replication. +* [Amazon S3 compatible API][AmazonS3API] accesses files with S3 tooling. +* [Hadoop Compatible File System][Hadoop] accesses files from Hadoop/Spark/Flink/etc or even runs HBase. +* [Async Replication To Cloud][BackupToCloud] has extremely fast local access and backups to Amazon S3, Google Cloud Storage, Azure, BackBlaze. +* [WebDAV] accesses as a mapped drive on Mac and Windows, or from mobile devices. +* [AES256-GCM Encrypted Storage][FilerDataEncryption] safely stores the encrypted data. +* [Super Large Files][SuperLargeFiles] stores large or super large files in tens of TB. + +## Kubernetes ## +* [Kubernetes CSI Driver][SeaweedFsCsiDriver] A Container Storage Interface (CSI) Driver. [![Docker Pulls](https://img.shields.io/docker/pulls/chrislusf/seaweedfs-csi-driver.svg?maxAge=4800)](https://hub.docker.com/r/chrislusf/seaweedfs-csi-driver/) +* [SeaweedFS Operator](https://github.com/seaweedfs/seaweedfs-operator) [Filer]: https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files -[Mount]: https://github.com/chrislusf/seaweedfs/wiki/Mount +[SuperLargeFiles]: https://github.com/chrislusf/seaweedfs/wiki/Data-Structure-for-Large-Files +[Mount]: https://github.com/chrislusf/seaweedfs/wiki/FUSE-Mount [AmazonS3API]: https://github.com/chrislusf/seaweedfs/wiki/Amazon-S3-API -[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Backup-to-Cloud +[BackupToCloud]: https://github.com/chrislusf/seaweedfs/wiki/Async-Replication-to-Cloud [Hadoop]: https://github.com/chrislusf/seaweedfs/wiki/Hadoop-Compatible-File-System [WebDAV]: https://github.com/chrislusf/seaweedfs/wiki/WebDAV [ErasureCoding]: https://github.com/chrislusf/seaweedfs/wiki/Erasure-coding-for-warm-storage +[TieredStorage]: https://github.com/chrislusf/seaweedfs/wiki/Tiered-Storage +[CloudTier]: https://github.com/chrislusf/seaweedfs/wiki/Cloud-Tier +[FilerDataEncryption]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Data-Encryption +[FilerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Stores +[VolumeServerTTL]: https://github.com/chrislusf/seaweedfs/wiki/Store-file-with-a-Time-To-Live +[SeaweedFsCsiDriver]: https://github.com/seaweedfs/seaweedfs-csi-driver +[ActiveActiveAsyncReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Active-Active-cross-cluster-continuous-synchronization +[FilerStoreReplication]: https://github.com/chrislusf/seaweedfs/wiki/Filer-Store-Replication +[KeyLargeValueStore]: https://github.com/chrislusf/seaweedfs/wiki/Filer-as-a-Key-Large-Value-Store [Back to TOC](#table-of-contents) -## Example Usage ## +## Example: Using Seaweed Object Store ## By default, the master node runs on port 9333, and the volume nodes run on port 8080. Let's start one master node, and two volume nodes on port 8080 and 8081. Ideally, they should be started from different machines. We'll use localhost as an example. @@ -318,6 +357,16 @@ Each individual file size is limited to the volume size. All file meta information stored on an volume server is readable from memory without disk access. Each file takes just a 16-byte map entry of <64bit key, 32bit offset, 32bit size>. Of course, each map entry has its own space cost for the map. But usually the disk space runs out before the memory does. +### Tiered Storage to the cloud ### + +The local volume servers are much faster, while cloud storages have elastic capacity and are actually more cost-efficient if not accessed often (usually free to upload, but relatively costly to access). With the append-only structure and O(1) access time, SeaweedFS can take advantage of both local and cloud storage by offloading the warm data to the cloud. + +Usually hot data are fresh and warm data are old. SeaweedFS puts the newly created volumes on local servers, and optionally upload the older volumes on the cloud. If the older data are accessed less often, this literally gives you unlimited capacity with limited local servers, and still fast for new data. + +With the O(1) access time, the network latency cost is kept at minimum. + +If the hot/warm data is split as 20/80, with 20 servers, you can achieve storage capacity of 100 servers. That's a cost saving of 80%! Or you can repurpose the 80 servers to store new data also, and get 5X storage throughput. + [Back to TOC](#table-of-contents) ## Compared to Other File Systems ## @@ -326,6 +375,8 @@ Most other distributed file systems seem more complicated than necessary. SeaweedFS is meant to be fast and simple, in both setup and operation. If you do not understand how it works when you reach here, we've failed! Please raise an issue with any questions or update this file with clarifications. +SeaweedFS is constantly moving forward. Same with other systems. These comparisons can be outdated quickly. Please help to keep them updated. + [Back to TOC](#table-of-contents) ### Compared to HDFS ### @@ -344,15 +395,17 @@ The architectures are mostly the same. SeaweedFS aims to store and read files fa * SeaweedFS optimizes for small files, ensuring O(1) disk seek operation, and can also handle large files. * SeaweedFS statically assigns a volume id for a file. Locating file content becomes just a lookup of the volume id, which can be easily cached. -* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Cassandra, Redis, Etcd, MySql, Postgres, MemSql, TiDB, CockroachDB, etc, and is easy to customized. +* SeaweedFS Filer metadata store can be any well-known and proven data stores, e.g., Redis, Cassandra, HBase, Mongodb, Elastic Search, MySql, Postgres, MemSql, TiDB, CockroachDB, Etcd etc, and is easy to customized. * SeaweedFS Volume server also communicates directly with clients via HTTP, supporting range queries, direct uploads, etc. -| System | File Meta | File Content Read| POSIX | REST API | Optimized for small files | +| System | File Metadata | File Content Read| POSIX | REST API | Optimized for large number of small files | | ------------- | ------------------------------- | ---------------- | ------ | -------- | ------------------------- | | SeaweedFS | lookup volume id, cacheable | O(1) disk seek | | Yes | Yes | | SeaweedFS Filer| Linearly Scalable, Customizable | O(1) disk seek | FUSE | Yes | Yes | | GlusterFS | hashing | | FUSE, NFS | | | | Ceph | hashing + rules | | FUSE | Yes | | +| MooseFS | in memory | | FUSE | | No | +| MinIO | separate meta file for each file | | | Yes | No | [Back to TOC](#table-of-contents) @@ -364,6 +417,14 @@ GlusterFS hashes the path and filename into ids, and assigned to virtual volumes [Back to TOC](#table-of-contents) +### Compared to MooseFS ### + +MooseFS chooses to neglect small file issue. From moosefs 3.0 manual, "even a small file will occupy 64KiB plus additionally 4KiB of checksums and 1KiB for the header", because it "was initially designed for keeping large amounts (like several thousands) of very big files" + +MooseFS Master Server keeps all meta data in memory. Same issue as HDFS namenode. + +[Back to TOC](#table-of-contents) + ### Compared to Ceph ### Ceph can be setup similar to SeaweedFS as a key->blob store. It is much more complicated, with the need to support layers on top of it. [Here is a more detailed comparison](https://github.com/chrislusf/seaweedfs/issues/120) @@ -372,11 +433,11 @@ SeaweedFS has a centralized master group to look up free volumes, while Ceph use Same as SeaweedFS, Ceph is also based on the object store RADOS. Ceph is rather complicated with mixed reviews. -Ceph uses CRUSH hashing to automatically manage the data placement. SeaweedFS places data by assigned volumes. +Ceph uses CRUSH hashing to automatically manage the data placement, which is efficient to locate the data. But the data has to be placed according to the CRUSH algorithm. Any wrong configuration would cause data loss. SeaweedFS places data by assigning them to any writable volumes. If writes to one volume failed, just pick another volume to write. Adding more volumes are also as simple as it can be. SeaweedFS is optimized for small files. Small files are stored as one continuous block of content, with at most 8 unused bytes between files. Small file access is O(1) disk read. -SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, Cassandra, MemSql, TiDB, CockroachCB, to manage file directories. There are proven, scalable, and easier to manage. +SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Mongodb, Redis, Elastic Search, Cassandra, HBase, MemSql, TiDB, CockroachCB, Etcd, to manage file directories. These stores are proven, scalable, and easier to manage. | SeaweedFS | comparable to Ceph | advantage | | ------------- | ------------- | ---------------- | @@ -386,18 +447,30 @@ SeaweedFS Filer uses off-the-shelf stores, such as MySql, Postgres, Redis, Etcd, [Back to TOC](#table-of-contents) -## Dev Plan ## +### Compared to MinIO ### -More tools and documentation, on how to maintain and scale the system. For example, how to move volumes, automatically balancing data, how to grow volumes, how to check system status, etc. -Other key features include: Erasure Encoding, JWT security. +MinIO follows AWS S3 closely and is ideal for testing for S3 API. It has good UI, policies, versionings, etc. SeaweedFS is trying to catch up here. It is also possible to put MinIO as a gateway in front of SeaweedFS later. -This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)! +MinIO metadata are in simple files. Each file write will incur extra writes to corresponding meta file. -BTW, We suggest run the code style check script `util/gostd` before you push your branch to remote, it will make SeaweedFS easy to review, maintain and develop: +MinIO does not have optimization for lots of small files. The files are simply stored as is to local disks. +Plus the extra meta file and shards for erasure coding, it only amplifies the LOSF problem. -``` -$ ./util/gostd -``` +MinIO has multiple disk IO to read one file. SeaweedFS has O(1) disk reads, even for erasure coded files. + +MinIO has full-time erasure coding. SeaweedFS uses replication on hot data for faster speed and optionally applies erasure coding on warm data. + +MinIO does not have POSIX-like API support. + +MinIO has specific requirements on storage layout. It is not flexible to adjust capacity. In SeaweedFS, just start one volume server pointing to the master. That's all. + +## Dev Plan ## + +* More tools and documentation, on how to manage and scale the system. +* Read and write stream data. +* Support structured data. + +This is a super exciting project! And we need helpers and [support](https://www.patreon.com/seaweedfs)! [Back to TOC](#table-of-contents) @@ -412,24 +485,18 @@ https://golang.org/doc/install make sure you set up your $GOPATH -Step 2: also you may need to install Mercurial by following the instructions at: - -http://mercurial.selenic.com/downloads - +Step 2: checkout this repo: +```bash +git clone https://github.com/chrislusf/seaweedfs.git +``` Step 3: download, compile, and install the project by executing the following command ```bash -go get github.com/chrislusf/seaweedfs/weed +make install ``` Once this is done, you will find the executable "weed" in your `$GOPATH/bin` directory -Step 4: after you modify your code locally, you could start a local build by calling `go install` under - -``` -$GOPATH/src/github.com/chrislusf/seaweedfs/weed -``` - [Back to TOC](#table-of-contents) ## Disk Related Topics ## @@ -451,50 +518,49 @@ My Own Unscientific Single Machine Results on Mac Book with Solid State Disk, CP Write 1 million 1KB file: ``` Concurrency Level: 16 -Time taken for tests: 88.796 seconds +Time taken for tests: 66.753 seconds Complete requests: 1048576 Failed requests: 0 -Total transferred: 1106764659 bytes -Requests per second: 11808.87 [#/sec] -Transfer rate: 12172.05 [Kbytes/sec] +Total transferred: 1106789009 bytes +Requests per second: 15708.23 [#/sec] +Transfer rate: 16191.69 [Kbytes/sec] Connection Times (ms) min avg max std -Total: 0.2 1.3 44.8 0.9 +Total: 0.3 1.0 84.3 0.9 Percentage of the requests served within a certain time (ms) - 50% 1.1 ms - 66% 1.3 ms - 75% 1.5 ms - 80% 1.7 ms - 90% 2.1 ms - 95% 2.6 ms - 98% 3.7 ms - 99% 4.6 ms - 100% 44.8 ms + 50% 0.8 ms + 66% 1.0 ms + 75% 1.1 ms + 80% 1.2 ms + 90% 1.4 ms + 95% 1.7 ms + 98% 2.1 ms + 99% 2.6 ms + 100% 84.3 ms ``` Randomly read 1 million files: ``` Concurrency Level: 16 -Time taken for tests: 34.263 seconds +Time taken for tests: 22.301 seconds Complete requests: 1048576 Failed requests: 0 -Total transferred: 1106762945 bytes -Requests per second: 30603.34 [#/sec] -Transfer rate: 31544.49 [Kbytes/sec] +Total transferred: 1106812873 bytes +Requests per second: 47019.38 [#/sec] +Transfer rate: 48467.57 [Kbytes/sec] Connection Times (ms) min avg max std -Total: 0.0 0.5 20.7 0.7 +Total: 0.0 0.3 54.1 0.2 Percentage of the requests served within a certain time (ms) - 50% 0.4 ms - 75% 0.5 ms - 95% 0.6 ms - 98% 0.8 ms - 99% 1.2 ms - 100% 20.7 ms + 50% 0.3 ms + 90% 0.4 ms + 98% 0.6 ms + 99% 0.7 ms + 100% 54.1 ms ``` [Back to TOC](#table-of-contents) @@ -513,6 +579,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. +The text of this page is available for modification and reuse under the terms of the Creative Commons Attribution-Sharealike 3.0 Unported License and the GNU Free Documentation License (unversioned, with no invariant sections, front-cover texts, or back-cover texts). + [Back to TOC](#table-of-contents) ## Stargazers over time ## diff --git a/backers.md b/backers.md index 97a5e9081..dd8c69e67 100644 --- a/backers.md +++ b/backers.md @@ -1,3 +1,4 @@ +

Sponsors & Backers

- [Become a backer or sponsor on Patreon](https://www.patreon.com/seaweedfs). @@ -6,8 +7,10 @@ - [4Sight Imaging](https://www.4sightimaging.com/) - [Evercam Camera Management Software](https://evercam.io/) +- [Admiral](https://getadmiral.com)

Backers

- [ColorfulClouds Tech Co. Ltd.](https://caiyunai.com/) - [Haravan - Ecommerce Platform](https://www.haravan.com) +- PeterCxy - Creator of Shelter App diff --git a/docker/Dockerfile b/docker/Dockerfile index 38117a3dc..2165466ca 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,15 +1,23 @@ -FROM frolvlad/alpine-glibc +FROM alpine -# Supercronic install settings -ENV SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.8/supercronic-linux-amd64 \ - SUPERCRONIC=supercronic-linux-amd64 \ - SUPERCRONIC_SHA1SUM=be43e64c45acd6ec4fce5831e03759c89676a0ea +# 'latest' or 'dev' +ARG RELEASE=latest -# Install SeaweedFS and Supercronic ( for cron job mode ) -# Tried to use curl only (curl -o /tmp/linux_amd64.tar.gz ...), however it turned out that the following tar command failed with "gzip: stdin: not in gzip format" -RUN apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \ - wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/latest | egrep -o 'chrislusf/seaweedfs/releases/download/.*/linux_amd64.tar.gz') && \ - tar -C /usr/bin/ -xzvf /tmp/linux_amd64.tar.gz && \ +RUN \ + ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \ + elif [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "32" ]; then echo "386"; \ + elif [ $(uname -m) == "aarch64" ]; then echo "arm64"; \ + elif [ $(uname -m) == "armv7l" ]; then echo "arm"; \ + elif [ $(uname -m) == "armv6l" ]; then echo "arm"; fi;) && \ + echo "Building for $ARCH" 1>&2 && \ + SUPERCRONIC_SHA1SUM=$(echo $ARCH | sed 's/386/e0126b0102b9f388ecd55714358e3ad60d0cebdb/g' | sed 's/amd64/5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85/g' | sed 's/arm64/e2714c43e7781bf1579c85aa61259245f56dbba1/g' | sed 's/arm/47481c3341bc3a1ae91a728e0cc63c8e6d3791ad/g') && \ + SUPERCRONIC_URL=https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-$ARCH && \ + SUPERCRONIC=supercronic-linux-$ARCH && \ + # Install SeaweedFS and Supercronic ( for cron job mode ) + apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \ + apk add fuse && \ + wget -P /tmp https://github.com/$(curl -s -L https://github.com/chrislusf/seaweedfs/releases/${RELEASE} | egrep -o "chrislusf/seaweedfs/releases/download/.*/linux_$ARCH.tar.gz") && \ + tar -C /usr/bin/ -xzvf /tmp/linux_$ARCH.tar.gz && \ curl -fsSLO "$SUPERCRONIC_URL" && \ echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \ chmod +x "$SUPERCRONIC" && \ @@ -32,6 +40,8 @@ EXPOSE 19333 EXPOSE 9333 # s3 server http port EXPOSE 8333 +# webdav server http port +EXPOSE 7333 RUN mkdir -p /data/filerldb2 diff --git a/docker/Dockerfile.go_build b/docker/Dockerfile.go_build index 85cbb6143..1adf0f5ef 100644 --- a/docker/Dockerfile.go_build +++ b/docker/Dockerfile.go_build @@ -1,5 +1,20 @@ -FROM golang:latest -RUN go get github.com/chrislusf/seaweedfs/weed +FROM frolvlad/alpine-glibc as builder +RUN apk add git go g++ fuse +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +ARG BRANCH=${BRANCH:-master} +RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ + && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ + && CGO_ENABLED=0 go install -ldflags "-extldflags -static ${LDFLAGS}" + +FROM alpine AS final +LABEL author="Chris Lu" +COPY --from=builder /root/go/bin/weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh +RUN apk add fuse # for weed mount # volume server gprc port EXPOSE 18080 @@ -15,15 +30,13 @@ EXPOSE 19333 EXPOSE 9333 # s3 server http port EXPOSE 8333 +# webdav server http port +EXPOSE 7333 RUN mkdir -p /data/filerldb2 VOLUME /data -RUN mkdir -p /etc/seaweedfs -RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml -RUN cp /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh -RUN cp /go/bin/weed /usr/bin/ ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Dockerfile.go_build_large b/docker/Dockerfile.go_build_large new file mode 100644 index 000000000..48af3381d --- /dev/null +++ b/docker/Dockerfile.go_build_large @@ -0,0 +1,42 @@ +FROM frolvlad/alpine-glibc as builder +RUN apk add git go g++ fuse +RUN mkdir -p /go/src/github.com/chrislusf/ +RUN git clone https://github.com/chrislusf/seaweedfs /go/src/github.com/chrislusf/seaweedfs +ARG BRANCH=${BRANCH:-master} +RUN cd /go/src/github.com/chrislusf/seaweedfs && git checkout $BRANCH +RUN cd /go/src/github.com/chrislusf/seaweedfs/weed \ + && export LDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \ + && CGO_ENABLED=0 go install -tags 5BytesOffset -ldflags "-extldflags -static ${LDFLAGS}" + +FROM alpine AS final +LABEL author="Chris Lu" +COPY --from=builder /root/go/bin/weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/filer.toml /etc/seaweedfs/filer.toml +COPY --from=builder /go/src/github.com/chrislusf/seaweedfs/docker/entrypoint.sh /entrypoint.sh +RUN apk add fuse # for weed mount + +# volume server gprc port +EXPOSE 18080 +# volume server http port +EXPOSE 8080 +# filer server gprc port +EXPOSE 18888 +# filer server http port +EXPOSE 8888 +# master server shared gprc port +EXPOSE 19333 +# master server shared http port +EXPOSE 9333 +# s3 server http port +EXPOSE 8333 +# webdav server http port +EXPOSE 7333 + +RUN mkdir -p /data/filerldb2 + +VOLUME /data + +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Dockerfile.local b/docker/Dockerfile.local new file mode 100644 index 000000000..0a85c56f0 --- /dev/null +++ b/docker/Dockerfile.local @@ -0,0 +1,32 @@ +FROM alpine AS final +LABEL author="Chris Lu" +COPY ./weed /usr/bin/ +RUN mkdir -p /etc/seaweedfs +COPY ./filer.toml /etc/seaweedfs/filer.toml +COPY ./entrypoint.sh /entrypoint.sh +RUN apk add fuse # for weed mount + +# volume server grpc port +EXPOSE 18080 +# volume server http port +EXPOSE 8080 +# filer server grpc port +EXPOSE 18888 +# filer server http port +EXPOSE 8888 +# master server shared grpc port +EXPOSE 19333 +# master server shared http port +EXPOSE 9333 +# s3 server http port +EXPOSE 8333 +# webdav server http port +EXPOSE 7333 + +RUN mkdir -p /data/filerldb2 + +VOLUME /data + +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/docker/Dockerfile.s3tests b/docker/Dockerfile.s3tests new file mode 100644 index 000000000..5b6d762de --- /dev/null +++ b/docker/Dockerfile.s3tests @@ -0,0 +1,31 @@ +FROM ubuntu:20.04 + +RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + git \ + sudo \ + debianutils \ + python3-pip \ + python3-virtualenv \ + python3-dev \ + libevent-dev \ + libffi-dev \ + libxml2-dev \ + libxslt-dev \ + zlib1g-dev && \ + DEBIAN_FRONTEND=noninteractive apt-get clean && \ + rm -rf /var/lib/apt/lists/* && \ + git clone https://github.com/ceph/s3-tests.git /opt/s3-tests + +WORKDIR /opt/s3-tests +RUN ./bootstrap + +ENV \ + NOSETESTS_EXCLUDE="" \ + NOSETESTS_ATTR="" \ + NOSETESTS_OPTIONS="" \ + S3TEST_CONF="/s3test.conf" + +ENTRYPOINT ["/bin/bash", "-c"] +CMD ["sleep 10 && exec ./virtualenv/bin/nosetests ${NOSETESTS_OPTIONS-} ${NOSETESTS_ATTR:+-a $NOSETESTS_ATTR} ${NOSETESTS_EXCLUDE:+-e $NOSETESTS_EXCLUDE}"] \ No newline at end of file diff --git a/docker/Makefile b/docker/Makefile new file mode 100644 index 000000000..a933956b7 --- /dev/null +++ b/docker/Makefile @@ -0,0 +1,63 @@ +all: gen + +.PHONY : gen + +gen: dev + +binary: + cd ../weed; CGO_ENABLED=0 GOOS=linux go build -ldflags "-extldflags -static"; mv weed ../docker/ + +build: binary + docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local . + rm ./weed + +s3tests_build: + docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests . + +dev: build + docker-compose -f compose/local-dev-compose.yml -p seaweedfs up + +dev_tls: build certstrap + ENV_FILE="tls.env" docker-compose -f compose/local-dev-compose.yml -p seaweedfs up + +dev_mount: build + docker-compose -f compose/local-mount-compose.yml -p seaweedfs up + +profile_mount: build + docker-compose -f compose/local-mount-profile-compose.yml -p seaweedfs up + +k8s: build + docker-compose -f compose/local-k8s-compose.yml -p seaweedfs up + +dev_registry: build + docker-compose -f compose/local-registry-compose.yml -p seaweedfs up + +dev_replicate: build + docker-compose -f compose/local-replicate-compose.yml -p seaweedfs up + +cluster: build + docker-compose -f compose/local-cluster-compose.yml -p seaweedfs up + +2clusters: build + docker-compose -f compose/local-clusters-compose.yml -p seaweedfs up + +s3tests: build s3tests_build + docker-compose -f compose/local-s3tests-compose.yml -p seaweedfs up + +filer_etcd: build + docker stack deploy -c compose/swarm-etcd.yml fs + +clean: + rm ./weed + +certstrap: + go get github.com/square/certstrap + certstrap --depot-path compose/tls init --passphrase "" --common-name "SeaweedFS CA" || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name volume01.dev || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name master01.dev || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name filer01.dev || true + certstrap --depot-path compose/tls request-cert --passphrase "" --common-name client01.dev || true + certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" volume01.dev || true + certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" master01.dev || true + certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" filer01.dev || true + certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" client01.dev || true \ No newline at end of file diff --git a/docker/README.md b/docker/README.md index cfe281e71..d6e1f4928 100644 --- a/docker/README.md +++ b/docker/README.md @@ -11,11 +11,29 @@ docker-compose -f seaweedfs-compose.yml -p seaweedfs up ``` -## Development +## Try latest tip + +```bash + +wget https://raw.githubusercontent.com/chrislusf/seaweedfs/master/docker/seaweedfs-dev-compose.yml + +docker-compose -f seaweedfs-dev-compose.yml -p seaweedfs up + +``` + +## Local Development ```bash cd $GOPATH/src/github.com/chrislusf/seaweedfs/docker +make +``` -docker-compose -f dev-compose.yml -p seaweedfs up +## Build and push a multiarch build +Make sure that `docker buildx` is supported (might be an experimental docker feature) +```bash +BUILDER=$(docker buildx create --driver docker-container --use) +docker buildx build --pull --push --platform linux/386,linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6 . -t chrislusf/seaweedfs +docker buildx stop $BUILDER ``` + diff --git a/docker/compose/dev.env b/docker/compose/dev.env new file mode 100644 index 000000000..e69de29bb diff --git a/docker/compose/local-cluster-compose.yml b/docker/compose/local-cluster-compose.yml new file mode 100644 index 000000000..82095ae18 --- /dev/null +++ b/docker/compose/local-cluster-compose.yml @@ -0,0 +1,75 @@ +version: '2' + +services: + master0: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master0 -port=9333 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m1" + master1: + image: chrislusf/seaweedfs:local + ports: + - 9334:9334 + - 19334:19334 + command: "master -ip=master1 -port=9334 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m2" + master2: + image: chrislusf/seaweedfs:local + ports: + - 9335:9335 + - 19335:19335 + command: "master -ip=master2 -port=9335 -peers=master0:9333,master1:9334,master2:9335 -mdir=/data/m3" + volume1: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8080 -ip=volume1 -publicUrl=localhost:8080 -preStopSeconds=1 -disk=ssd1' + depends_on: + - master0 + - master1 + - master2 + volume2: + image: chrislusf/seaweedfs:local + ports: + - 8082:8082 + - 18082:18082 + command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8082 -ip=volume2 -publicUrl=localhost:8082 -preStopSeconds=1 -disk=ssd1' + depends_on: + - master0 + - master1 + - master2 + volume3: + image: chrislusf/seaweedfs:local + ports: + - 8083:8083 + - 18083:18083 + command: 'volume -mserver="master0:9333,master1:9334,master2:9335" -port=8083 -ip=volume3 -publicUrl=localhost:8083 -preStopSeconds=1' + depends_on: + - master0 + - master1 + - master2 + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + command: 'filer -master="master0:9333,master1:9334,master2:9335"' + depends_on: + - master0 + - master1 + - master2 + - volume1 + - volume2 + s3: + image: chrislusf/seaweedfs:local + ports: + - 8333:8333 + command: 's3 -filer="filer:8888"' + depends_on: + - master0 + - master1 + - master2 + - volume1 + - volume2 + - filer diff --git a/docker/compose/local-clusters-compose.yml b/docker/compose/local-clusters-compose.yml new file mode 100644 index 000000000..f9e9a1589 --- /dev/null +++ b/docker/compose/local-clusters-compose.yml @@ -0,0 +1,28 @@ +version: '2' + +services: + server1: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + - 8084:8080 + - 18084:18080 + - 8888:8888 + - 18888:18888 + command: "server -ip=server1 -filer -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1" + volumes: + - ./master-cloud.toml:/etc/seaweedfs/master.toml + depends_on: + - server2 + server2: + image: chrislusf/seaweedfs:local + ports: + - 9334:9333 + - 19334:19333 + - 8085:8080 + - 18085:18080 + - 8889:8888 + - 18889:18888 + - 8334:8333 + command: "server -ip=server2 -filer -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1" diff --git a/docker/compose/local-dev-compose.yml b/docker/compose/local-dev-compose.yml new file mode 100644 index 000000000..01d0594a6 --- /dev/null +++ b/docker/compose/local-dev-compose.yml @@ -0,0 +1,67 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "-v=1 master -ip=master" + volumes: + - ./tls:/etc/seaweedfs/tls + env_file: + - ${ENV_FILE:-dev.env} + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: "-v=1 volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1" + depends_on: + - master + volumes: + - ./tls:/etc/seaweedfs/tls + env_file: + - ${ENV_FILE:-dev.env} + filer: + image: chrislusf/seaweedfs:local + ports: + - 8111:8111 + - 8888:8888 + - 18888:18888 + command: '-v=1 filer -master="master:9333" -iam' + depends_on: + - master + - volume + volumes: + - ./tls:/etc/seaweedfs/tls + env_file: + - ${ENV_FILE:-dev.env} + s3: + image: chrislusf/seaweedfs:local + ports: + - 8333:8333 + command: '-v=1 s3 -filer="filer:8888"' + depends_on: + - master + - volume + - filer + volumes: + - ./tls:/etc/seaweedfs/tls + env_file: + - ${ENV_FILE:-dev.env} + mount: + image: chrislusf/seaweedfs:local + privileged: true + cap_add: + - SYS_ADMIN + mem_limit: 4096m + command: '-v=4 mount -filer="filer:8888" -dirAutoCreate -dir=/mnt/seaweedfs -cacheCapacityMB=100 -concurrentWriters=128' + volumes: + - ./tls:/etc/seaweedfs/tls + env_file: + - ${ENV_FILE:-dev.env} + depends_on: + - master + - volume + - filer diff --git a/docker/compose/local-k8s-compose.yml b/docker/compose/local-k8s-compose.yml new file mode 100644 index 000000000..9a25465c4 --- /dev/null +++ b/docker/compose/local-k8s-compose.yml @@ -0,0 +1,94 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: "volume -mserver=master:9333 -port=8080 -ip=volume" + depends_on: + - master + mysql: + image: percona/percona-server:5.7 + ports: + - 3306:3306 + volumes: + - ./seaweedfs.sql:/docker-entrypoint-initdb.d/seaweedfs.sql + environment: + - MYSQL_ROOT_PASSWORD=secret + - MYSQL_DATABASE=seaweedfs + - MYSQL_PASSWORD=secret + - MYSQL_USER=seaweedfs + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + environment: + - WEED_MYSQL_HOSTNAME=mysql + - WEED_MYSQL_PORT=3306 + - WEED_MYSQL_DATABASE=seaweedfs + - WEED_MYSQL_USERNAME=seaweedfs + - WEED_MYSQL_PASSWORD=secret + - WEED_MYSQL_ENABLED=true + - WEED_MYSQL_CONNECTION_MAX_IDLE=5 + - WEED_MYSQL_CONNECTION_MAX_OPEN=75 + # "refresh" connection every 10 minutes, eliminating mysql closing "old" connections + - WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS=600 + # enable usage of memsql as filer backend + - WEED_MYSQL_INTERPOLATEPARAMS=true + - WEED_LEVELDB2_ENABLED=false + command: '-v 9 filer -master="master:9333"' + depends_on: + - master + - volume + - mysql + ingress: + image: jwilder/nginx-proxy:alpine + ports: + - "80:80" + volumes: + - /var/run/docker.sock:/tmp/docker.sock:ro + - ./nginx/proxy.conf:/etc/nginx/proxy.conf + s3: + image: chrislusf/seaweedfs:local + ports: + - 8333:8333 + command: '-v 9 s3 -filer="filer:8888"' + depends_on: + - master + - volume + - filer + environment: + - VIRTUAL_HOST=ingress + - VIRTUAL_PORT=8333 + registry: + image: registry:2 + environment: + REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3 + REGISTRY_LOG_LEVEL: "debug" + REGISTRY_STORAGE: "s3" + REGISTRY_STORAGE_S3_REGION: "us-east-1" + REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://ingress" + REGISTRY_STORAGE_S3_BUCKET: "registry" + REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1" + REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1" + REGISTRY_STORAGE_S3_V4AUTH: "true" + REGISTRY_STORAGE_S3_SECURE: "false" + REGISTRY_STORAGE_S3_SKIPVERIFY: "true" + REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/" + REGISTRY_STORAGE_DELETE_ENABLED: "true" + REGISTRY_STORAGE_REDIRECT_DISABLE: "true" + REGISTRY_VALIDATION_DISABLED: "true" + ports: + - 5001:5001 + depends_on: + - s3 + - ingress \ No newline at end of file diff --git a/docker/compose/local-minio-gateway-compose.yml b/docker/compose/local-minio-gateway-compose.yml new file mode 100644 index 000000000..fafee59c8 --- /dev/null +++ b/docker/compose/local-minio-gateway-compose.yml @@ -0,0 +1,50 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master -volumeSizeLimitMB=1024" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1" + depends_on: + - master + s3: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + - 8333:8333 + command: '-v 1 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333' + volumes: + - ./s3.json:/etc/seaweedfs/s3.json + depends_on: + - master + - volume + minio-gateway-s3: + image: minio/minio + ports: + - 9000:9000 + command: 'minio gateway s3 http://s3:8333' + restart: on-failure + environment: + MINIO_ACCESS_KEY: "some_access_key1" + MINIO_SECRET_KEY: "some_secret_key1" + depends_on: + - s3 + minio-warp: + image: minio/warp + command: 'mixed --duration=5m --obj.size=3mb --autoterm' + restart: on-failure + environment: + WARP_HOST: "minio-gateway-s3:9000" + WARP_ACCESS_KEY: "some_access_key1" + WARP_SECRET_KEY: "some_secret_key1" + depends_on: + - minio-gateway-s3 \ No newline at end of file diff --git a/docker/compose/local-mount-compose.yml b/docker/compose/local-mount-compose.yml new file mode 100644 index 000000000..b1c579cdf --- /dev/null +++ b/docker/compose/local-mount-compose.yml @@ -0,0 +1,46 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:local + ports: + - 7455:8080 + - 9325:9325 + command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325 -preStopSeconds=1 -publicUrl=localhost:7455' + depends_on: + - master + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + - 9326:9326 + command: 'filer -master="master:9333" -metricsPort=9326' + tty: true + stdin_open: true + depends_on: + - master + - volume + mount_1: + image: chrislusf/seaweedfs:local + privileged: true + entrypoint: '/bin/sh -c "mkdir -p t1 && mkdir -p cache/t1 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t1 -dir=./t1 -filer.path=/c1 -volumeServerAccess=filerProxy"' + depends_on: + - master + - volume + - filer + mount_2: + image: chrislusf/seaweedfs:local + privileged: true + entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4 mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1 -volumeServerAcess=publicUrl"' + depends_on: + - master + - volume + - filer + - mount_1 diff --git a/docker/compose/local-mount-profile-compose.yml b/docker/compose/local-mount-profile-compose.yml new file mode 100644 index 000000000..4682591c4 --- /dev/null +++ b/docker/compose/local-mount-profile-compose.yml @@ -0,0 +1,47 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:local + ports: + - 7455:8080 + - 9325:9325 + volumes: + - /Volumes/mobile_disk/99:/data + command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325 -preStopSeconds=1 -publicUrl=localhost:7455' + depends_on: + - master + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + - 9326:9326 + volumes: + - /Volumes/mobile_disk/99:/data + command: 'filer -master="master:9333" -metricsPort=9326' + tty: true + stdin_open: true + depends_on: + - master + - volume + mount: + image: chrislusf/seaweedfs:local + privileged: true + cap_add: + - SYS_ADMIN + devices: + - fuse + volumes: + - /Volumes/mobile_disk/99:/data + entrypoint: '/bin/sh -c "mkdir -p t1 && weed mount -filer=filer:8888 -dir=./t1 -cacheCapacityMB=0 -memprofile=/data/mount.mem.pprof"' + depends_on: + - master + - volume + - filer diff --git a/docker/compose/local-registry-compose.yml b/docker/compose/local-registry-compose.yml new file mode 100644 index 000000000..b61278d66 --- /dev/null +++ b/docker/compose/local-registry-compose.yml @@ -0,0 +1,85 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master -volumeSizeLimitMB=1024" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: "volume -mserver=master:9333 -port=8080 -ip=volume -max=0 -preStopSeconds=1" + depends_on: + - master + s3: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + - 8333:8333 + command: '-v 9 filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8333' + volumes: + - ./s3.json:/etc/seaweedfs/s3.json + depends_on: + - master + - volume + minio: + image: minio/minio + ports: + - 9000:9000 + command: 'minio server /data' + environment: + MINIO_ACCESS_KEY: "some_access_key1" + MINIO_SECRET_KEY: "some_secret_key1" + depends_on: + - master + registry1: + image: registry:2 + environment: + REGISTRY_HTTP_ADDR: "0.0.0.0:5001" # seaweedfs s3 + REGISTRY_LOG_LEVEL: "debug" + REGISTRY_STORAGE: "s3" + REGISTRY_STORAGE_S3_REGION: "us-east-1" + REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://s3:8333" + REGISTRY_STORAGE_S3_BUCKET: "registry" + REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1" + REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1" + REGISTRY_STORAGE_S3_V4AUTH: "true" + REGISTRY_STORAGE_S3_SECURE: "false" + REGISTRY_STORAGE_S3_SKIPVERIFY: "true" + REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/" + REGISTRY_STORAGE_DELETE_ENABLED: "true" + REGISTRY_STORAGE_REDIRECT_DISABLE: "true" + REGISTRY_VALIDATION_DISABLED: "true" + ports: + - 5001:5001 + depends_on: + - s3 + - minio + registry2: + image: registry:2 + environment: + REGISTRY_HTTP_ADDR: "0.0.0.0:5002" # minio + REGISTRY_LOG_LEVEL: "debug" + REGISTRY_STORAGE: "s3" + REGISTRY_STORAGE_S3_REGION: "us-east-1" + REGISTRY_STORAGE_S3_REGIONENDPOINT: "http://minio:9000" + REGISTRY_STORAGE_S3_BUCKET: "registry" + REGISTRY_STORAGE_S3_ACCESSKEY: "some_access_key1" + REGISTRY_STORAGE_S3_SECRETKEY: "some_secret_key1" + REGISTRY_STORAGE_S3_V4AUTH: "true" + REGISTRY_STORAGE_S3_SECURE: "false" + REGISTRY_STORAGE_S3_SKIPVERIFY: "true" + REGISTRY_STORAGE_S3_ROOTDIRECTORY: "/" + REGISTRY_STORAGE_DELETE_ENABLED: "true" + REGISTRY_STORAGE_REDIRECT_DISABLE: "true" + REGISTRY_VALIDATION_DISABLED: "true" + ports: + - 5002:5002 + depends_on: + - s3 + - minio \ No newline at end of file diff --git a/docker/compose/local-replicate-compose.yml b/docker/compose/local-replicate-compose.yml new file mode 100644 index 000000000..8240d45a7 --- /dev/null +++ b/docker/compose/local-replicate-compose.yml @@ -0,0 +1,61 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1" + depends_on: + - master + filer: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + command: '-v=9 filer -master="master:9333"' + restart: on-failure + volumes: + - ./notification.toml:/etc/seaweedfs/notification.toml + depends_on: + - master + - volume + - rabbitmq + - replicate + environment: + RABBIT_SERVER_URL: "amqp://guest:guest@rabbitmq:5672/" + replicate: + image: chrislusf/seaweedfs:local + command: '-v=9 filer.replicate' + restart: on-failure + volumes: + - ./notification.toml:/etc/seaweedfs/notification.toml + - ./replication.toml:/etc/seaweedfs/replication.toml + depends_on: + - rabbitmq + environment: + RABBIT_SERVER_URL: "amqp://guest:guest@rabbitmq:5672/" + s3: + image: chrislusf/seaweedfs:local + ports: + - 8333:8333 + command: 's3 -filer="filer:8888"' + depends_on: + - master + - volume + - filer + rabbitmq: + image: rabbitmq:3.8.10-management-alpine + ports: + - 5672:5672 + - 15671:15671 + - 15672:15672 + environment: + RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS: "-rabbit log_levels [{connection,error},{queue,debug}]" \ No newline at end of file diff --git a/docker/compose/local-s3tests-compose.yml b/docker/compose/local-s3tests-compose.yml new file mode 100644 index 000000000..381e3eb97 --- /dev/null +++ b/docker/compose/local-s3tests-compose.yml @@ -0,0 +1,45 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:local + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master -volumeSizeLimitMB=16" + environment: + WEED_MASTER_VOLUME_GROWTH_COPY_1: 1 + WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 + volume: + image: chrislusf/seaweedfs:local + ports: + - 8080:8080 + - 18080:18080 + command: "volume -mserver=master:9333 -port=8080 -ip=volume -preStopSeconds=1" + depends_on: + - master + s3: + image: chrislusf/seaweedfs:local + ports: + - 8888:8888 + - 18888:18888 + - 8000:8000 + command: 'filer -master="master:9333" -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000' + volumes: + - ./s3.json:/etc/seaweedfs/s3.json + depends_on: + - master + - volume + s3tests: + image: chrislusf/ceph-s3-tests:local + volumes: + - ./s3tests.conf:/opt/s3-tests/s3tests.conf + environment: + S3TEST_CONF: "s3tests.conf" + NOSETESTS_OPTIONS: "--verbose --logging-level=ERROR --with-xunit --failure-detail s3tests_boto3.functional.test_s3" + NOSETESTS_ATTR: "!tagging,!fails_on_aws,!encryption,!bucket-policy,!versioning,!fails_on_rgw,!bucket-policy,!fails_with_subdomain,!policy_status,!object-lock,!lifecycle,!cors,!user-policy" + NOSETESTS_EXCLUDE: "(bucket_list_delimiter_basic|bucket_listv2_delimiter_basic|bucket_listv2_encoding_basic|bucket_list_encoding_basic|bucket_list_delimiter_prefix|bucket_listv2_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_prefix_ends_with_delimiter|bucket_list_delimiter_alt|bucket_listv2_delimiter_alt|bucket_list_delimiter_prefix_underscore|bucket_list_delimiter_percentage|bucket_listv2_delimiter_percentage|bucket_list_delimiter_whitespace|bucket_listv2_delimiter_whitespace|bucket_list_delimiter_dot|bucket_listv2_delimiter_dot|bucket_list_delimiter_unreadable|bucket_listv2_delimiter_unreadable|bucket_listv2_fetchowner_defaultempty|bucket_listv2_fetchowner_empty|bucket_list_prefix_delimiter_alt|bucket_listv2_prefix_delimiter_alt|bucket_list_prefix_delimiter_prefix_not_exist|bucket_listv2_prefix_delimiter_prefix_not_exist|bucket_list_prefix_delimiter_delimiter_not_exist|bucket_listv2_prefix_delimiter_delimiter_not_exist|bucket_list_prefix_delimiter_prefix_delimiter_not_exist|bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist|bucket_list_maxkeys_none|bucket_listv2_maxkeys_none|bucket_list_maxkeys_invalid|bucket_listv2_continuationtoken_empty|bucket_list_return_data|bucket_list_objects_anonymous|bucket_listv2_objects_anonymous|bucket_notexist|bucketv2_notexist|bucket_delete_nonempty|bucket_concurrent_set_canned_acl|object_write_to_nonexist_bucket|object_requestid_matches_header_on_error|object_write_cache_control|object_write_expires|object_set_get_metadata_none_to_good|object_set_get_metadata_none_to_empty|object_set_get_metadata_overwrite_to_empty|post_object_anonymous_request|post_object_authenticated_request|post_object_authenticated_no_content_type|post_object_authenticated_request_bad_access_key|post_object_set_success_code|post_object_set_invalid_success_code|post_object_upload_larger_than_chunk|post_object_set_key_from_filename|post_object_ignored_header|post_object_case_insensitive_condition_fields|post_object_escaped_field_values|post_object_success_redirect_action|post_object_invalid_signature|post_object_invalid_access_key|post_object_missing_policy_condition|post_object_user_specified_header|post_object_request_missing_policy_specified_field|post_object_expired_policy|post_object_invalid_request_field_value|get_object_ifmatch_failed|get_object_ifunmodifiedsince_good|put_object_ifmatch_failed|object_raw_get_bucket_gone|object_delete_key_bucket_gone|object_raw_get_bucket_acl|object_raw_get_object_acl|object_raw_response_headers|object_raw_authenticated_bucket_gone|object_raw_get_x_amz_expires_out_max_range|object_raw_get_x_amz_expires_out_positive_range|object_anon_put_write_access|object_raw_put_authenticated_expired|bucket_create_exists|bucket_create_naming_bad_short_one|bucket_create_naming_bad_short_two|bucket_get_location|bucket_acl_default|bucket_acl_canned|bucket_acl_canned_publicreadwrite|bucket_acl_canned_authenticatedread|object_acl_default|object_acl_canned_during_create|object_acl_canned|object_acl_canned_publicreadwrite|object_acl_canned_authenticatedread|object_acl_canned_bucketownerread|object_acl_canned_bucketownerfullcontrol|object_acl_full_control_verify_attributes|bucket_acl_canned_private_to_private|bucket_acl_grant_nonexist_user|bucket_acl_no_grants|bucket_acl_grant_email_not_exist|bucket_acl_revoke_all|bucket_recreate_not_overriding|object_copy_verify_contenttype|object_copy_to_itself_with_metadata|object_copy_not_owned_bucket|object_copy_not_owned_object_bucket|object_copy_retaining_metadata|object_copy_replacing_metadata|multipart_upload_empty|multipart_copy_invalid_range|multipart_copy_special_names|multipart_upload_resend_part|multipart_upload_size_too_small|abort_multipart_upload_not_found|multipart_upload_missing_part|multipart_upload_incorrect_etag|100_continue|ranged_request_invalid_range|ranged_request_empty_object|access_bucket)" + depends_on: + - master + - volume + - s3 \ No newline at end of file diff --git a/docker/compose/master-cloud.toml b/docker/compose/master-cloud.toml new file mode 100644 index 000000000..17289c114 --- /dev/null +++ b/docker/compose/master-cloud.toml @@ -0,0 +1,30 @@ + +# Put this file to one of the location, with descending priority +# ./master.toml +# $HOME/.seaweedfs/master.toml +# /etc/seaweedfs/master.toml +# this file is read by master + +[master.maintenance] +# periodically run these scripts are the same as running them from 'weed shell' +scripts = """ + lock + ec.encode -fullPercent=95 -quietFor=1h + ec.rebuild -force + ec.balance -force + volume.balance -force + volume.fix.replication + unlock +""" +sleep_minutes = 17 # sleep minutes between each script execution + +# configurations for tiered cloud storage +# old volumes are transparently moved to cloud for cost efficiency +[storage.backend] + [storage.backend.s3.default] + enabled = true + aws_access_key_id = "any" # if empty, loads from the shared credentials file (~/.aws/credentials). + aws_secret_access_key = "any" # if empty, loads from the shared credentials file (~/.aws/credentials). + region = "us-east-2" + bucket = "volume_bucket" # an existing bucket + endpoint = "http://server2:8333" diff --git a/docker/compose/notification.toml b/docker/compose/notification.toml new file mode 100644 index 000000000..dcd5f2c6f --- /dev/null +++ b/docker/compose/notification.toml @@ -0,0 +1,17 @@ +[notification.log] +# this is only for debugging perpose and does not work with "weed filer.replicate" +enabled = false + + +[notification.gocdk_pub_sub] +# The Go Cloud Development Kit (https://gocloud.dev). +# PubSub API (https://godoc.org/gocloud.dev/pubsub). +# Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ. +enabled = true +# This URL will Dial the RabbitMQ server at the URL in the environment +# variable RABBIT_SERVER_URL and open the exchange "myexchange". +# The exchange must have already been created by some other means, like +# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then +# create binding myexchange => myqueue +topic_url = "rabbit://swexchange" +sub_url = "rabbit://swqueue" \ No newline at end of file diff --git a/docker/compose/replication.toml b/docker/compose/replication.toml new file mode 100644 index 000000000..833bb1692 --- /dev/null +++ b/docker/compose/replication.toml @@ -0,0 +1,11 @@ +[source.filer] +enabled = true +grpcAddress = "filer:18888" +# all files under this directory tree are replicated. +# this is not a directory on your hard drive, but on your filer. +# i.e., all files with this "prefix" are sent to notification message queue. +directory = "/buckets" + +[sink.local_incremental] +enabled = true +directory = "/data" \ No newline at end of file diff --git a/docker/compose/s3.json b/docker/compose/s3.json new file mode 100644 index 000000000..64dedb681 --- /dev/null +++ b/docker/compose/s3.json @@ -0,0 +1,105 @@ +{ + "identities": [ + { + "name": "anonymous", + "actions": [ + "Read" + ] + }, + { + "name": "some_admin_user", + "credentials": [ + { + "accessKey": "some_access_key1", + "secretKey": "some_secret_key1" + } + ], + "actions": [ + "Admin", + "Read", + "List", + "Tagging", + "Write" + ] + }, + { + "name": "s3_tests", + "credentials": [ + { + "accessKey": "ABCDEFGHIJKLMNOPQRST", + "secretKey": "abcdefghijklmnopqrstuvwxyzabcdefghijklmn" + }, + { + "accessKey": "0555b35654ad1656d804", + "secretKey": "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==" + } + ], + "actions": [ + "Admin", + "Read", + "List", + "Tagging", + "Write" + ] + }, + { + "name": "s3_tests_alt", + "credentials": [ + { + "accessKey": "NOPQRSTUVWXYZABCDEFG", + "secretKey": "nopqrstuvwxyzabcdefghijklmnabcdefghijklm" + } + ], + "actions": [ + "Admin", + "Read", + "List", + "Tagging", + "Write" + ] + }, + { + "name": "s3_tests_tenant", + "credentials": [ + { + "accessKey": "HIJKLMNOPQRSTUVWXYZA", + "secretKey": "opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab" + } + ], + "actions": [ + "Admin", + "Read", + "List", + "Tagging", + "Write" + ] + }, + { + "name": "some_read_only_user", + "credentials": [ + { + "accessKey": "some_access_key2", + "secretKey": "some_secret_key2" + } + ], + "actions": [ + "Read" + ] + }, + { + "name": "some_normal_user", + "credentials": [ + { + "accessKey": "some_access_key3", + "secretKey": "some_secret_key3" + } + ], + "actions": [ + "Read", + "List", + "Tagging", + "Write" + ] + } + ] +} \ No newline at end of file diff --git a/docker/compose/s3tests.conf b/docker/compose/s3tests.conf new file mode 100644 index 000000000..68d9ddeb7 --- /dev/null +++ b/docker/compose/s3tests.conf @@ -0,0 +1,70 @@ +[DEFAULT] +## this section is just used for host, port and bucket_prefix + +# host set for rgw in vstart.sh +host = s3 + +# port set for rgw in vstart.sh +port = 8000 + +## say "False" to disable TLS +is_secure = False + +[fixtures] +## all the buckets created will start with this prefix; +## {random} will be filled with random characters to pad +## the prefix to 30 characters long, and avoid collisions +bucket prefix = yournamehere-{random}- + +[s3 main] +# main display_name set in vstart.sh +display_name = M. Tester + +# main user_idname set in vstart.sh +user_id = testid + +# main email set in vstart.sh +email = tester@ceph.com + +# zonegroup api_name for bucket location +api_name = default + +## main AWS access key +access_key = 0555b35654ad1656d804 + +## main AWS secret key +secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== + +## replace with key id obtained when secret is created, or delete if KMS not tested +#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef + +[s3 alt] +# alt display_name set in vstart.sh +display_name = john.doe +## alt email set in vstart.sh +email = john.doe@example.com + +# alt user_id set in vstart.sh +user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 + +# alt AWS access key set in vstart.sh +access_key = NOPQRSTUVWXYZABCDEFG + +# alt AWS secret key set in vstart.sh +secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm + +[s3 tenant] +# tenant display_name set in vstart.sh +display_name = testx$tenanteduser + +# tenant user_id set in vstart.sh +user_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef + +# tenant AWS secret key set in vstart.sh +access_key = HIJKLMNOPQRSTUVWXYZA + +# tenant AWS secret key set in vstart.sh +secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab + +# tenant email set in vstart.sh +email = tenanteduser@example.com \ No newline at end of file diff --git a/docker/compose/swarm-etcd.yml b/docker/compose/swarm-etcd.yml new file mode 100644 index 000000000..186b24790 --- /dev/null +++ b/docker/compose/swarm-etcd.yml @@ -0,0 +1,84 @@ +# 2021-01-30 16:25:30 +version: '3.8' + +services: + + etcd: + image: gasparekatapy/etcd + networks: + - net + deploy: + mode: replicated + replicas: 3 + + master: + image: chrislusf/seaweedfs:local + environment: + WEED_MASTER_FILER_DEFAULT: "filer:8888" + WEED_MASTER_SEQUENCER_TYPE: "raft" + ports: + - "9333:9333" + - "19333:19333" + networks: + - net + command: + - 'master' + - '-resumeState=true' + - '-ip=master' + - '-port=9333' + deploy: + mode: replicated + replicas: 1 + + filer: + image: chrislusf/seaweedfs:local + environment: + WEED_LEVELDB2_ENABLED: "false" + WEED_ETCD_ENABLED: "true" + WEED_ETCD_SERVERS: "etcd:2379" + ports: + - target: 8888 + published: 8888 + protocol: tcp + mode: host + - target: 18888 + published: 18888 + protocol: tcp + mode: host + networks: + - net + command: + - 'filer' + - '-ip=filer' + - '-port=8888' + - '-port.readonly=28888' + - '-master=master:9333' + - '-disableDirListing=true' + deploy: + mode: replicated + replicas: 1 + + volume: + image: chrislusf/seaweedfs:local + ports: + - target: 8080 + published: 8080 + protocol: tcp + mode: host + - target: 18080 + published: 18080 + protocol: tcp + mode: host + networks: + - net + command: + - 'volume' + - '-mserver=master:9333' + - '-port=8080' + deploy: + mode: global + + ########################################################################### + +networks: + net: diff --git a/docker/compose/tls.env b/docker/compose/tls.env new file mode 100644 index 000000000..a82954c4f --- /dev/null +++ b/docker/compose/tls.env @@ -0,0 +1,14 @@ +WEED_GRPC_CA=/etc/seaweedfs/tls/SeaweedFS_CA.crt +WEED_GRPC_ALLOWED_WILDCARD_DOMAIN=".dev" +WEED_GRPC_MASTER_CERT=/etc/seaweedfs/tls/master01.dev.crt +WEED_GRPC_MASTER_KEY=/etc/seaweedfs/tls/master01.dev.key +WEED_GRPC_VOLUME_CERT=/etc/seaweedfs/tls/volume01.dev.crt +WEED_GRPC_VOLUME_KEY=/etc/seaweedfs/tls/volume01.dev.key +WEED_GRPC_FILER_CERT=/etc/seaweedfs/tls/filer01.dev.crt +WEED_GRPC_FILER_KEY=/etc/seaweedfs/tls/filer01.dev.key +WEED_GRPC_CLIENT_CERT=/etc/seaweedfs/tls/client01.dev.crt +WEED_GRPC_CLIENT_KEY=/etc/seaweedfs/tls/client01.dev.key +WEED_GRPC_MASTER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" +WEED_GRPC_VOLUME_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" +WEED_GRPC_FILER_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" +WEED_GRPC_CLIENT_ALLOWED_COMMONNAMES="volume01.dev,master01.dev,filer01.dev,client01.dev" \ No newline at end of file diff --git a/docker/dev-compose.yml b/docker/dev-compose.yml deleted file mode 100644 index 1f44ff483..000000000 --- a/docker/dev-compose.yml +++ /dev/null @@ -1,43 +0,0 @@ -version: '2' - -services: - master: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 9333:9333 - - 19333:19333 - command: "master -ip=master" - volume: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 8080:8080 - - 18080:18080 - command: 'volume -max=5 -mserver="master:9333" -port=8080' - depends_on: - - master - filer: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 8888:8888 - - 18888:18888 - command: 'filer -master="master:9333"' - depends_on: - - master - - volume - s3: - build: - context: . - dockerfile: Dockerfile.go_build - ports: - - 8333:8333 - command: 's3 -filer="filer:8888"' - depends_on: - - master - - volume - - filer diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 791527d3a..a5a240575 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -1,55 +1,68 @@ #!/bin/sh +isArgPassed() { + arg="$1" + argWithEqualSign="$1=" + shift + while [ $# -gt 0 ]; do + passedArg="$1" + shift + case $passedArg in + $arg) + return 0 + ;; + $argWithEqualSign*) + return 0 + ;; + esac + done + return 1 +} + case "$1" in 'master') - ARGS="-mdir /data" - # Is this instance linked with an other master? (Docker commandline "--link master1:master") - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi - exec /usr/bin/weed $@ $ARGS + ARGS="-mdir=/data -volumePreallocate -volumeSizeLimitMB=1024" + shift + exec /usr/bin/weed master $ARGS $@ ;; 'volume') - ARGS="-ip `hostname -i` -dir /data" - # Is this instance linked with a master? (Docker commandline "--link master1:master") - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -mserver=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi - exec /usr/bin/weed $@ $ARGS + ARGS="-dir=/data -max=0" + if isArgPassed "-max" "$@"; then + ARGS="-dir=/data" + fi + shift + exec /usr/bin/weed volume $ARGS $@ ;; 'server') - ARGS="-ip `hostname -i` -dir /data" - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -master.peers=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi - exec /usr/bin/weed $@ $ARGS + ARGS="-dir=/data -volume.max=0 -master.volumePreallocate -master.volumeSizeLimitMB=1024" + if isArgPassed "-volume.max" "$@"; then + ARGS="-dir=/data -master.volumePreallocate -master.volumeSizeLimitMB=1024" + fi + shift + exec /usr/bin/weed server $ARGS $@ ;; 'filer') ARGS="" - if [ -n "$MASTER_PORT_9333_TCP_ADDR" ] ; then - ARGS="$ARGS -master=$MASTER_PORT_9333_TCP_ADDR:$MASTER_PORT_9333_TCP_PORT" - fi - exec /usr/bin/weed $@ $ARGS + shift + exec /usr/bin/weed filer $ARGS $@ ;; 's3') ARGS="-domainName=$S3_DOMAIN_NAME -key.file=$S3_KEY_FILE -cert.file=$S3_CERT_FILE" - if [ -n "$FILER_PORT_8888_TCP_ADDR" ] ; then - ARGS="$ARGS -filer=$FILER_PORT_8888_TCP_ADDR:$FILER_PORT_8888_TCP_PORT" - fi - exec /usr/bin/weed $@ $ARGS + shift + exec /usr/bin/weed s3 $ARGS $@ ;; 'cronjob') MASTER=${WEED_MASTER-localhost:9333} FIX_REPLICATION_CRON_SCHEDULE=${CRON_SCHEDULE-*/7 * * * * *} - echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "volume.fix.replication" | weed shell -master='$MASTER > /crontab + echo "$FIX_REPLICATION_CRON_SCHEDULE" 'echo "lock; volume.fix.replication; unlock" | weed shell -master='$MASTER > /crontab BALANCING_CRON_SCHEDULE=${CRON_SCHEDULE-25 * * * * *} - echo "$BALANCING_CRON_SCHEDULE" 'echo "volume.balance -c ALL -force" | weed shell -master='$MASTER >> /crontab + echo "$BALANCING_CRON_SCHEDULE" 'echo "lock; volume.balance -collection ALL_COLLECTIONS -force; unlock" | weed shell -master='$MASTER >> /crontab echo "Running Crontab:" cat /crontab exec supercronic /crontab diff --git a/docker/nginx/proxy.conf b/docker/nginx/proxy.conf new file mode 100644 index 000000000..59ff30ce2 --- /dev/null +++ b/docker/nginx/proxy.conf @@ -0,0 +1,30 @@ +# HTTP 1.1 support +proxy_http_version 1.1; +#proxy_buffering off; +proxy_set_header Host $http_host; +proxy_set_header Upgrade $http_upgrade; +proxy_set_header Connection $proxy_connection; +proxy_set_header X-Real-IP $remote_addr; +proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto; +proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl; +proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port; + +# Mitigate httpoxy attack (see README for details) +proxy_set_header Proxy ""; + +# aws default max_concurrent_requests 10 +# aws default multipart_threshold 8MB +proxy_buffering on; # GET buffering or “X-Accel-Buffering” enables or disables buffering of a response; +proxy_buffers 64 1m; # buffers used for reading a response from the proxied server, for a single connection +proxy_buffer_size 8k; # maximum size of the data that nginx can receive from the server at a time is set +proxy_busy_buffers_size 2m; + +proxy_request_buffering on; # PUT buffering +client_body_buffer_size 64m; # buffer size for reading client request body +client_max_body_size 64m; + +proxy_next_upstream error timeout non_idempotent http_500; # PUT request should be passed to the next server: +proxy_connect_timeout 200ms; +proxy_read_timeout 3s; #timeout is set only between two successive read operations +proxy_send_timeout 3s; #timeout is set only between two successive write operations diff --git a/docker/prometheus/prometheus.yml b/docker/prometheus/prometheus.yml new file mode 100644 index 000000000..34f669d56 --- /dev/null +++ b/docker/prometheus/prometheus.yml @@ -0,0 +1,13 @@ +global: + scrape_interval: 30s + scrape_timeout: 10s + +scrape_configs: + - job_name: services + metrics_path: /metrics + static_configs: + - targets: + - 'prometheus:9090' + - 'volume:9325' + - 'filer:9326' + - 's3:9327' \ No newline at end of file diff --git a/docker/seaweedfs-compose.yml b/docker/seaweedfs-compose.yml index 7f0cbc6f9..f7d02a105 100644 --- a/docker/seaweedfs-compose.yml +++ b/docker/seaweedfs-compose.yml @@ -4,28 +4,30 @@ services: master: image: chrislusf/seaweedfs # use a remote image ports: - - 9333:9333 - - 19333:19333 + - 9333:9333 + - 19333:19333 command: "master -ip=master" volume: image: chrislusf/seaweedfs # use a remote image ports: - - 8080:8080 - - 18080:18080 - command: 'volume -max=15 -mserver="master:9333" -port=8080' + - 8080:8080 + - 18080:18080 + - 9325:9325 + command: 'volume -mserver="master:9333" -port=8080 -metricsPort=9325' depends_on: - - master + - master filer: image: chrislusf/seaweedfs # use a remote image ports: - - 8888:8888 - - 18888:18888 - command: 'filer -master="master:9333"' + - 8888:8888 + - 18888:18888 + - 9326:9326 + command: 'filer -master="master:9333" -metricsPort=9326' tty: true stdin_open: true depends_on: - - master - - volume + - master + - volume cronjob: image: chrislusf/seaweedfs # use a remote image command: 'cronjob' @@ -34,14 +36,33 @@ services: CRON_SCHEDULE: '*/2 * * * * *' # Default: '*/5 * * * * *' WEED_MASTER: master:9333 # Default: localhost:9333 depends_on: - - master - - volume + - master + - volume s3: image: chrislusf/seaweedfs # use a remote image ports: - - 8333:8333 - command: 's3 -filer="filer:8888"' + - 8333:8333 + - 9327:9327 + command: 's3 -filer="filer:8888" -metricsPort=9327' depends_on: - - master - - volume - - filer + - master + - volume + - filer + webdav: + image: chrislusf/seaweedfs # use a remote image + ports: + - 7333:7333 + command: 'webdav -filer="filer:8888"' + depends_on: + - master + - volume + - filer + prometheus: + image: prom/prometheus:v2.21.0 + ports: + - 9000:9090 + volumes: + - ./prometheus:/etc/prometheus + command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml + depends_on: + - s3 diff --git a/docker/seaweedfs-dev-compose.yml b/docker/seaweedfs-dev-compose.yml new file mode 100644 index 000000000..2382fb17d --- /dev/null +++ b/docker/seaweedfs-dev-compose.yml @@ -0,0 +1,44 @@ +version: '2' + +services: + master: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 9333:9333 + - 19333:19333 + command: "master -ip=master" + volume: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8080:8080 + - 18080:18080 + command: 'volume -mserver="master:9333" -port=8080 -ip=volume' + depends_on: + - master + filer: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8888:8888 + - 18888:18888 + command: 'filer -master="master:9333"' + depends_on: + - master + - volume + s3: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 8333:8333 + command: 's3 -filer="filer:8888"' + depends_on: + - master + - volume + - filer + webdav: + image: chrislusf/seaweedfs:dev # use a remote dev image + ports: + - 7333:7333 + command: 'webdav -filer="filer:8888"' + depends_on: + - master + - volume + - filer diff --git a/docker/seaweedfs.sql b/docker/seaweedfs.sql new file mode 100644 index 000000000..38ebc575c --- /dev/null +++ b/docker/seaweedfs.sql @@ -0,0 +1,12 @@ +CREATE DATABASE IF NOT EXISTS seaweedfs; +CREATE USER IF NOT EXISTS 'seaweedfs'@'%' IDENTIFIED BY 'secret'; +GRANT ALL PRIVILEGES ON seaweedfs_fast.* TO 'seaweedfs'@'%'; +FLUSH PRIVILEGES; +USE seaweedfs; +CREATE TABLE IF NOT EXISTS filemeta ( + dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', + name VARCHAR(1000) COMMENT 'directory or file name', + directory TEXT COMMENT 'full path to parent directory', + meta LONGBLOB, + PRIMARY KEY (dirhash, name) +) DEFAULT CHARSET=utf8; \ No newline at end of file diff --git a/go.mod b/go.mod index 8fab3bb84..70bc33070 100644 --- a/go.mod +++ b/go.mod @@ -3,97 +3,106 @@ module github.com/chrislusf/seaweedfs go 1.12 require ( - cloud.google.com/go v0.44.3 + cloud.google.com/go v0.58.0 // indirect + cloud.google.com/go/pubsub v1.3.1 + cloud.google.com/go/storage v1.9.0 + github.com/Azure/azure-amqp-common-go/v2 v2.1.0 // indirect github.com/Azure/azure-pipeline-go v0.2.2 // indirect - github.com/Azure/azure-storage-blob-go v0.8.0 - github.com/DataDog/zstd v1.4.1 // indirect + github.com/Azure/azure-storage-blob-go v0.9.0 + github.com/OneOfOne/xxhash v1.2.2 github.com/Shopify/sarama v1.23.1 - github.com/aws/aws-sdk-go v1.23.13 - github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 - github.com/coreos/etcd v3.3.15+incompatible // indirect + github.com/aws/aws-sdk-go v1.34.30 + github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 + github.com/bwmarrin/snowflake v0.3.0 + github.com/cespare/xxhash v1.1.0 + github.com/chrislusf/raft v1.0.6 github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible - github.com/disintegration/imaging v1.6.1 + github.com/disintegration/imaging v1.6.2 github.com/dustin/go-humanize v1.0.0 github.com/eapache/go-resiliency v1.2.0 // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a + github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 + github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect + github.com/fclairamb/ftpserverlib v0.8.0 github.com/frankban/quicktest v1.7.2 // indirect - github.com/gabriel-vasile/mimetype v1.0.0 - github.com/go-redis/redis v6.15.2+incompatible - github.com/go-sql-driver/mysql v1.4.1 + github.com/go-errors/errors v1.1.1 // indirect + github.com/go-redis/redis/v8 v8.4.4 + github.com/go-sql-driver/mysql v1.5.0 github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 // indirect - github.com/golang/protobuf v1.3.2 + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e + github.com/golang/protobuf v1.4.2 github.com/google/btree v1.0.0 github.com/google/uuid v1.1.1 - github.com/gorilla/mux v1.7.3 + github.com/gorilla/mux v1.7.4 github.com/gorilla/websocket v1.4.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect - github.com/hashicorp/golang-lru v0.5.3 // indirect - github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f github.com/jcmturner/gofork v1.0.0 // indirect - github.com/karlseguin/ccache v2.0.3+incompatible - github.com/karlseguin/expect v1.0.1 // indirect + github.com/jinzhu/copier v0.2.8 + github.com/json-iterator/go v1.1.10 + github.com/karlseguin/ccache v2.0.3+incompatible // indirect + github.com/karlseguin/ccache/v2 v2.0.7 + github.com/klauspost/compress v1.10.9 // indirect github.com/klauspost/cpuid v1.2.1 // indirect github.com/klauspost/crc32 v1.2.0 github.com/klauspost/reedsolomon v1.9.2 - github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/kurin/blazer v0.5.3 - github.com/lib/pq v1.2.0 + github.com/lib/pq v1.10.0 + github.com/lunixbochs/vtclean v1.0.0 // indirect github.com/magiconair/properties v1.8.1 // indirect - github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect + github.com/mattn/go-colorable v0.1.2 // indirect github.com/mattn/go-runewidth v0.0.4 // indirect - github.com/nats-io/nats-server/v2 v2.0.4 // indirect - github.com/onsi/ginkgo v1.10.1 // indirect - github.com/onsi/gomega v1.7.0 // indirect - github.com/opentracing/opentracing-go v1.1.0 // indirect - github.com/pelletier/go-toml v1.4.0 // indirect + github.com/olivere/elastic/v7 v7.0.19 github.com/peterh/liner v1.1.0 github.com/pierrec/lz4 v2.2.7+incompatible // indirect - github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986 // indirect - github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b - github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b // indirect - github.com/prometheus/client_golang v1.1.0 - github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 // indirect - github.com/prometheus/procfs v0.0.4 // indirect - github.com/rakyll/statik v0.1.6 + github.com/prometheus/client_golang v1.3.0 + github.com/rakyll/statik v0.1.7 github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect - github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd - github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff - github.com/sirupsen/logrus v1.4.2 // indirect + github.com/seaweedfs/fuse v1.1.4 + github.com/seaweedfs/goexif v1.0.2 + github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.2.2 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.4.0 - github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 // indirect - github.com/stretchr/testify v1.4.0 // indirect + github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 + github.com/stretchr/testify v1.6.1 github.com/syndtr/goleveldb v1.0.0 + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c github.com/tidwall/gjson v1.3.2 github.com/tidwall/match v1.0.1 - github.com/uber-go/atomic v1.4.0 // indirect - github.com/uber/jaeger-client-go v2.17.0+incompatible // indirect - github.com/uber/jaeger-lib v2.0.0+incompatible // indirect + github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 + github.com/valyala/bytebufferpool v1.0.0 + github.com/valyala/fasthttp v1.20.0 + github.com/viant/assertly v0.5.4 // indirect + github.com/viant/ptrie v0.3.0 + github.com/viant/toolbox v0.33.2 // indirect github.com/willf/bitset v1.1.10 // indirect github.com/willf/bloom v2.0.3+incompatible github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect go.etcd.io/etcd v3.3.15+incompatible - gocloud.dev v0.16.0 - gocloud.dev/pubsub/natspubsub v0.16.0 - gocloud.dev/pubsub/rabbitpubsub v0.16.0 - golang.org/x/image v0.0.0-20190829233526-b3c06291d021 // indirect - golang.org/x/net v0.0.0-20190909003024-a7b16738d86b - golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b - golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 - google.golang.org/api v0.9.0 - google.golang.org/appengine v1.6.2 // indirect - google.golang.org/grpc v1.23.0 + go.mongodb.org/mongo-driver v1.3.2 + gocloud.dev v0.20.0 + gocloud.dev/pubsub/natspubsub v0.20.0 + gocloud.dev/pubsub/rabbitpubsub v0.20.0 + golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect + golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd + golang.org/x/tools v0.0.0-20200608174601-1b747fd94509 + google.golang.org/api v0.26.0 + google.golang.org/grpc v1.29.1 + google.golang.org/protobuf v1.24.0 + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect gopkg.in/karlseguin/expect.v1 v1.0.1 // indirect - sigs.k8s.io/yaml v1.1.0 // indirect ) -replace github.com/satori/go.uuid v1.2.0 => github.com/satori/go.uuid v0.0.0-20181028125025-b2ce2384e17b +// replace github.com/seaweedfs/fuse => /Users/chris/go/src/github.com/seaweedfs/fuse +// replace github.com/chrislusf/raft => /Users/chris/go/src/github.com/chrislusf/raft + +replace go.etcd.io/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547 diff --git a/go.sum b/go.sum index d16280568..0409b1ae1 100644 --- a/go.sum +++ b/go.sum @@ -1,18 +1,54 @@ +bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.44.3 h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ= cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.55.0/go.mod h1:ZHmoY+/lIMNkN2+fBmuTiqZ4inFhvQad8ft7MT8IV5Y= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.58.0 h1:vtAfVc723K3xKq1BQydk/FyCldnaNFhGhpJxaJzgRMQ= +cloud.google.com/go v0.58.0/go.mod h1:W+9FnSUw6nhVwXlFcp1eL+krq5+HQUJeUogSeJZZiWg= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.2.0/go.mod h1:iISCjWnTpnoJT1R287xRdjvQHJrxQOpeah4phb5D3h0= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.9.0 h1:oXnZyBjHB6hC8TnSle0AWW6pGJ29EuSo5ww+SFmdNBg= +cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GMqICWb9bU= contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ= contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= +github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= @@ -21,35 +57,78 @@ github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= +github.com/Azure/azure-service-bus-go v0.10.1/go.mod h1:E/FOceuKAFUfpbIJDKWz/May6guE+eGibfGT6q+n1to= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= +github.com/Azure/azure-storage-blob-go v0.9.0 h1:kORqvzXP8ORhKbW13FflGUaSE5CMyDWun9UwMxY8gPs= +github.com/Azure/azure-storage-blob-go v0.9.0/go.mod h1:8UBPbiOhrMQ4pLPi3gA1tXnpjrS76UYE/fo5A40vf4g= +github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/go-amqp v0.12.7/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ= github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190605020000-c4ba1fdf4d36/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= +github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.23.1 h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs= github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY= -github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/andybalholm/brotli v1.0.0 h1:7UCwP93aiSfvWpapti8g88vVVGp2qqtGyePsSuDafo4= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.23.13 h1:l/NG+mgQFRGG3dsFzEj0jw9JIs/zYdtU6MXhY1WIDmM= -github.com/aws/aws-sdk-go v1.23.13/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.31.13/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U= +github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.30 h1:izATc/E0+HcT5YHmaQVjn7GHCoqaBxn0PGo6Zq5UNFA= +github.com/aws/aws-sdk-go v1.34.30/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -58,60 +137,66 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 h1:fUmDBbSvv1uOzo/t8WaxZMVb7BxJ8JECo5lGoR9c5bA= +github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72/go.mod h1:OEE5igu/CDjGegM1Jn6ZMo7R6LlV/JChAkjfQQIRLpg= +github.com/bwmarrin/snowflake v0.3.0 h1:xm67bEhkKh6ij1790JB83OujPR5CzNe8QuQqAgISZN0= +github.com/bwmarrin/snowflake v0.3.0/go.mod h1:NdZxfVWX+oR6y2K0o6qAYv6gIOP9rjG0/E9WsDpxqwE= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92 h1:lM9SFsh0EPXkyJyrTJqLZPAIJBtNFP6LNkYXu2MnSZI= -github.com/chrislusf/raft v0.0.0-20190225081310-10d6e2182d92/go.mod h1:4jyiUCD5y548+yKW+oiHtccBiMaLCCbFBpK2t7X4eUo= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chrislusf/raft v1.0.4 h1:THhbsVik2hxdE0/VXX834f64Wn9RzgVPp+E+XCWZdKM= +github.com/chrislusf/raft v1.0.4/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68= +github.com/chrislusf/raft v1.0.5 h1:g8GxKCSStfm0/bGBDpNEbmEXL6MJkpXX+NI0ksbX5D4= +github.com/chrislusf/raft v1.0.5/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68= +github.com/chrislusf/raft v1.0.6 h1:wunb85WWhMKhNRn7EmdIw35D4Lmew0ZJv8oYDizR/+Y= +github.com/chrislusf/raft v1.0.6/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= -github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= -github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 h1:hxuZop6tSoOi0sxFzoGGYdRqNrPubyaIf9KoBG9tPiE= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/disintegration/imaging v1.6.1 h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE= -github.com/disintegration/imaging v1.6.1/go.mod h1:xuIt+sRxDFrHS0drzXUlCJthkJ8k7lkkUojDSR247MQ= +github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= +github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= @@ -122,43 +207,91 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4 h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M= github.com/facebookgo/stats v0.0.0-20151006221625-1b76add642e4/go.mod h1:vsJz7uE339KUCpBXx3JAJzSRH7Uk4iGGyJzR529qDIA= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fclairamb/ftpserverlib v0.8.0 h1:ZsWUQ8Vg3Y8LIWRUAzVnFXY982Yztz2odDdK/UVJtik= +github.com/fclairamb/ftpserverlib v0.8.0/go.mod h1:xF4cy07oCHA9ZorKehsFGqA/1UHYaonmqHK2g3P1X8U= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gabriel-vasile/mimetype v0.3.17 h1:NGWgggJJqTofUcTV1E7hkk2zVjZ54EfJa1z5O3z6By4= -github.com/gabriel-vasile/mimetype v0.3.17/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI= -github.com/gabriel-vasile/mimetype v1.0.0 h1:0QKnAQQhG6oOsb4GK7iPlet7RtjHi9us8RF/nXoTxhI= -github.com/gabriel-vasile/mimetype v1.0.0/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/go-errors/errors v1.1.1 h1:ljK/pL5ltg3qoN+OtN6yCv9HWSfMwxSx90GJCZQxYNg= +github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w= -github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4= -github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-redis/redis/v8 v8.4.4 h1:fGqgxCTR1sydaKI00oQf3OmkU/DIe/I/fYXvGklCIuc= +github.com/go-redis/redis/v8 v8.4.4/go.mod h1:nA0bQuF0i5JFx4Ta9RZxGKXFrQ8cRWntra97f0196iY= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6 h1:P66kRWyEoIx6URKgAC3ijx9jo9gEid7bEhLQ/Z0G65A= github.com/gocql/gocql v0.0.0-20190829130954-e163eff7a8c6/go.mod h1:Q7Sru5153KG8D9zwueuQJB3ccJf9/bIwF/x8b3oKgT8= -github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= @@ -168,24 +301,40 @@ github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -194,6 +343,13 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic= github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk= @@ -204,26 +360,32 @@ github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:x github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.3.0 h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60= github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= +github.com/google/wire v0.4.0 h1:kXcsA/rIGzJImVqPdhfnr6q0xsS9gU0515q1EPpJ9fE= +github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= @@ -234,55 +396,92 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.11.0 h1:aT5ISUniaOTErogCQ+4pGoYNBB6rm6Fq3g1v8QwYGas= github.com/grpc-ecosystem/grpc-gateway v1.11.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f h1:X+tnaqoCcBgAwSTJtoYW6p0qKiuPyMfofEHEFUf2kdU= -github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f/go.mod h1:Ip4fOwzCrnDVuluHBd7FXIMb7SHOKfkt9/UDrYSZvqI= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jinzhu/copier v0.2.8 h1:N8MbL5niMwE3P4dOwurJixz5rMkKfujmMRFmAanSzWE= +github.com/jinzhu/copier v0.2.8/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU= github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w= +github.com/karlseguin/ccache/v2 v2.0.7 h1:y5Pfi4eiyYCOD6LS/Kj+o6Nb4M5Ngpw9qFQs+v44ZYM= +github.com/karlseguin/ccache/v2 v2.0.7/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ= github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY= github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= +github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA= +github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.9 h1:pPRt1Z78crspaHISkpSSHjDlx+Tt9suHe519dsI0vF4= +github.com/klauspost/compress v1.10.9/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v1.2.0 h1:0VuyqOCruD33/lJ/ojXNvzVyl8Zr5zdTmj9l9qLZ86I= @@ -293,10 +492,10 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGi github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -305,10 +504,21 @@ github.com/kurin/blazer v0.5.3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= @@ -316,6 +526,9 @@ github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HN github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw= github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -323,11 +536,16 @@ github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8Bz github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -336,156 +554,177 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s= -github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt v0.2.6/go.mod h1:mQxQ0uHQ9FhEVPIcTSKwx2lqZEpXWWcCgA7R6NrWvvY= -github.com/nats-io/jwt v0.2.14 h1:wA50KvFz/JXGXMHRygTWsRGh/ixxgC5E3kHvmtGLNf4= -github.com/nats-io/jwt v0.2.14/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v1.0.1 h1:71ivoESdfT2K/qDiw5YwX/3W9/dR7c+m83xiGOj/EZ4= +github.com/nats-io/jwt v1.0.1/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M= github.com/nats-io/nats-server/v2 v2.0.0/go.mod h1:RyVdsHHvY4B6c9pWG+uRLpZ0h0XsqiuKp2XCTurP5LI= -github.com/nats-io/nats-server/v2 v2.0.4 h1:XOMeQRbhl1lGNTIctPhih6pTa15NGif54Uas6ZW5q7g= -github.com/nats-io/nats-server/v2 v2.0.4/go.mod h1:AWdGEVbjKRS9ZIx4DSP5eKW48nfFm7q3uiSkP/1KD7M= +github.com/nats-io/nats-server/v2 v2.1.2 h1:i2Ly0B+1+rzNZHHWtD4ZwKi+OU5l+uQo1iDHZ2PmiIc= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= github.com/nats-io/nats.go v1.8.1 h1:6lF/f1/NN6kzUDBz6pyvQDEXO39jqXcWRLu/tKjtOUQ= github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= +github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.10.0 h1:L8qnKaofSfNFbXg0C5F71LdjPRnmQwSsA4ukmkt1TvY= +github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M= github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= github.com/nats-io/nkeys v0.1.0 h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.2.0 h1:WXKF7diOaPU9cJdLD7nuzwasQy9vT1tBqzXZZf3AMJM= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= -github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= -github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k= -github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= -github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olivere/elastic/v7 v7.0.19 h1:w4F6JpqOISadhYf/n0NR1cNj73xHqh4pzPwD1Gkidts= +github.com/olivere/elastic/v7 v7.0.19/go.mod h1:4Jqt5xvjqpjCqgnTcHwl3j8TLs8mvoOK8NYgo/qEOu4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= +github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterh/liner v1.1.0 h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os= github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.7+incompatible h1:Eerk9aiqeZo2QzsbWOAsELUf9ddvAxEdMY9LYze/DEc= github.com/pierrec/lz4 v2.2.7+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= -github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= -github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= -github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= -github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c h1:hvQd3aOLKLF7xvRV6DzvPkKY4QXzfVbjU1BhW0d9yL8= -github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= -github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= -github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= -github.com/pingcap/kvproto v0.0.0-20190822090350-11ea838aedf7/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= -github.com/pingcap/kvproto v0.0.0-20190910074005-0e61b6f435c1/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= -github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986 h1:XFh7n8Cheo00pakfhpUofnlptHCuz9lkp4p/jXPb8lM= -github.com/pingcap/kvproto v0.0.0-20191022073741-81b2a2d9e986/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= -github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= -github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= -github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100 h1:TRyps2d+2TsJv1Vk4S2D+5COMDVKClnAO5aNmGGVyj0= -github.com/pingcap/parser v0.0.0-20191021083151-7c64f78a5100/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= -github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0 h1:GIEq+wZfrl2bcJxpuSrEH4H7/nlf5YdmpS+dU9lNIt8= -github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0/go.mod h1:G/6rJpnYwM0LKMec2rI82/5Kg6GaZMvlfB+e6/tvYmI= -github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b h1:6GfcYOX9/CCxPnNOivVxiDYXbZrCHU1mRp691iw9EYs= -github.com/pingcap/tidb v1.1.0-beta.0.20191023070859-58fc7d44f73b/go.mod h1:YfrHdQ613A+E2FSugyXOdJmeZQbXNjpXX2doNe8MGj8= -github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU= -github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tipb v0.0.0-20191015023537-709b39e7f8bb/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= -github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b h1:DZ0cTsn4lGMNaRjkUFKBtHn4s2F8KFMm83lWvSo+x7c= -github.com/pingcap/tipb v0.0.0-20191022094114-a2e8c3fa634b/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.4 h1:w8DjqFMJDjuVwdZBQoOozr4MVWOnwF7RcL/7uxBjY78= -github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs= -github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= -github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc= -github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= -github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff h1:uLd5zBvf5OA67wcVRePHrFt60bR4LSskaVhgVwyk0Jg= -github.com/seaweedfs/fuse v0.0.0-20190510212405-310228904eff/go.mod h1:cubdLmQFqEUZ9vNJrznhgc3m3VMAJi/nY2Ix2axXkG0= -github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v2.18.10+incompatible h1:cy84jW6EVRPa5g9HAHrlbxMSIjBhDSX0OFYyMYminYs= -github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seaweedfs/fuse v1.0.8 h1:HBPJTC77OlxwSd2JiTwvLPn8bWTElqQp3xs9vf3C15s= +github.com/seaweedfs/fuse v1.0.8/go.mod h1:W7ubwr1l7KQsMeUpxFFOFOSxUL/ucTRMAlVYs4xdfQ8= +github.com/seaweedfs/fuse v1.0.9 h1:3JZoGsW7cmmrd/U5KYcIGR2+EqyBvCYCoCpEdZAz/Dc= +github.com/seaweedfs/fuse v1.0.9/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8= +github.com/seaweedfs/fuse v1.1.0 h1:cL1qPHFNtFv0UuJTLjKKgWDzfJ4iZzTa4Y7ipc2acGw= +github.com/seaweedfs/fuse v1.1.0/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8= +github.com/seaweedfs/fuse v1.1.1 h1:WD51YFJcBViOx8I89jeqPD+vAKl4EowzBy9GUw0plb0= +github.com/seaweedfs/fuse v1.1.1/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8= +github.com/seaweedfs/fuse v1.1.3 h1:0DddotXwSRGbYG2kynoJyr8GHCy30Z2SpdhP3vdyijY= +github.com/seaweedfs/fuse v1.1.3/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8= +github.com/seaweedfs/fuse v1.1.4 h1:YYqkK86agMhXRSwR+wFbRI8ikMgk3kL6PNTna1MAHyQ= +github.com/seaweedfs/fuse v1.1.4/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8= +github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E= +github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk= +github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a h1:C6IhVTxNkhlb0tlCB6JfHOUv1f0xHPK7V8X4HlJZEJw= +github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= +github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= +github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= +github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.3.4/go.mod h1:ZjM1ozSIMJlAz/ay4SG8PeKF00ckUp+zMHZXV9/bvak= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.1 h1:GPTpEAuNr98px18yNQ66JllNil98wfRZ/5Ukny8FeQA= +github.com/spf13/afero v1.3.1/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -502,6 +741,9 @@ github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRN github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 h1:2MR0pKUzlP3SGgj5NYJe/zRYDwOu9ku6YHy+Iw7l5DM= +github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -512,11 +754,14 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI= -github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI= github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= @@ -524,98 +769,157 @@ github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0 github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= -github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= -github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= -github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.17.0+incompatible h1:35tpDuT3k0oBiN/aGoSWuiFaqKgKZSciSMnWrazhSHE= -github.com/uber/jaeger-client-go v2.17.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw= -github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365 h1:6iRwZdrFUzbcVYZwa8dXTIILGIxmmhjyUPJEcwzPGaU= +github.com/tsuna/gohbase v0.0.0-20201125011725-348991136365/go.mod h1:zj0GJHGvyf1ed3Jm/Tb4830c/ZKDq+YoLsCt2rGQuT0= github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= -github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= -github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= -github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.20.0 h1:olTmcnLQeZrkBc4TVgE/BatTo1NE/IvW050AuD8SW+U= +github.com/valyala/fasthttp v1.20.0/go.mod h1:jjraHZVbKOXftJfsOYoAjaeygpj5hr8ermTRJNroD7A= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/viant/assertly v0.5.4 h1:5Hh4U3pLZa6uhCFAGpYOxck/8l9TZczEzoHNfJAhHEQ= +github.com/viant/assertly v0.5.4/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/ptrie v0.3.0 h1:SDaRd7Gqr1+ItCNz0GpTxRdK21nOfqjV6YtBm9jGlMY= +github.com/viant/ptrie v0.3.0/go.mod h1:VguMnbGfz95Zw+V5VarYSqtqslDxJbOv++xLzxkMhec= +github.com/viant/toolbox v0.33.2 h1:Av844IIeGz81gT672qZemyptGfbrcxqGymA5RFnIPjE= +github.com/viant/toolbox v0.33.2/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA= github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= -go.etcd.io/etcd v3.3.15+incompatible h1:0VpOVCF6EFnJptt8Jh0EWEHO4j2fepyV1fpu9xz/UoQ= -go.etcd.io/etcd v3.3.15+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= +go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547 h1:s71VGheLtWmCYsnNjf+s7XE8HsrZnd3EYGrLGWVm7nY= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200425165423-262c93980547/go.mod h1:YoUyTScD3Vcv2RBm3eGVOq7i1ULiz3OuXoQFWOirmAM= +go.mongodb.org/mongo-driver v1.3.2 h1:IYppNjEV/C+/3VPbhHVxQ4t04eVW0cLp0/pNdW++6Ug= +go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/otel v0.15.0 h1:CZFy2lPhxd4HlhZnYK8gRyDotksO3Ip9rBweY1vVYJw= +go.opentelemetry.io/otel v0.15.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4= -go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= gocloud.dev v0.16.0 h1:hWeaQWxamGerwsU7B9xSWvUjx0p7TwG8fcHro2TzbbM= gocloud.dev v0.16.0/go.mod h1:xWGXD8t7bEhqPIuyAUFyXV9qHn+PvEY2F2GKPh7i/O0= +gocloud.dev v0.20.0 h1:mbEKMfnyPV7W1Rj35R1xXfjszs9dXkwSOq2KoFr25g8= +gocloud.dev v0.20.0/go.mod h1:+Y/RpSXrJthIOM8uFNzWp6MRu9pFPNFEEZrQMxpkfIc= gocloud.dev/pubsub/natspubsub v0.16.0 h1:MoBGXULDzb1fVaZsGWO5cUCgr6yoI/DHhau8OPGaGEI= gocloud.dev/pubsub/natspubsub v0.16.0/go.mod h1:0n7pT7PkLMClBUHDrOkHfOFVr/o/6kawNMwsyAbwadI= +gocloud.dev/pubsub/natspubsub v0.20.0 h1:DsOXYKfcSTh0SHKwuhpQAJmPLDj3+ARvYgBIupVPClE= +gocloud.dev/pubsub/natspubsub v0.20.0/go.mod h1:Zh7v7Q1DZjAoBwsavZLdvinMIO1eYE0PJTllMuX3VGA= gocloud.dev/pubsub/rabbitpubsub v0.16.0 h1:Bkv2njMSl2tmT3tGbvbwpiIDAXBIpqzP9dmts+rhD4E= gocloud.dev/pubsub/rabbitpubsub v0.16.0/go.mod h1:JJVdUUIqwgaaMJg/1xHQza0g4sI/4KHHSNiGE+pn4JM= -golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +gocloud.dev/pubsub/rabbitpubsub v0.20.0 h1:hwupxLvWG8jTPNQ+9Q/YWZzyMagL9blTwWYYhoW4pco= +gocloud.dev/pubsub/rabbitpubsub v0.20.0/go.mod h1:xYCXmI3ixWuCW4s1KqyZpgKT90MMjdXdMlb0Kgmd7TM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7 h1:0hQKqeLdqlt5iIwVOBErRisrHJAN57yOiPRQItI20fU= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 h1:KYGJGHOQy8oSi1fDlSpcZF0+juKwk/hEMv5SiwHogR0= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190829233526-b3c06291d021 h1:j6QOxNFMpEL1wIQX6TUdBPNfGZKmBOJS/vfSm8a7tdM= -golang.org/x/image v0.0.0-20190829233526-b3c06291d021/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -626,50 +930,113 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= -golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0= +golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b h1:3S2h5FadpNr0zUUCVZjlKIEYF+KaX/OBplTGo89CYHI= -golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd h1:WgqgiQvkiZWz7XLhphjt2GI2GcGCTIZs9jqXMWmH+oc= +golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -677,17 +1044,60 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110 h1:6S6bidS7O4yAwA5ORRbRIjvNQ9tGbLd5e+LRIaTeVDQ= -golang.org/x/tools v0.0.0-20190911022129-16c5e0f7d110/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114 h1:DnSr2mCsxyCE6ZgIkmcWUQY2R5cH/6wL7eIxEmQOMSE= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200608174601-1b747fd94509 h1:MI14dOfl3OG6Zd32w3ugsrvcUO810fDZdWakTq39dH4= +golang.org/x/tools v0.0.0-20200608174601-1b747fd94509/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= @@ -696,16 +1106,29 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.26.0 h1:VJZ8h6E8ip82FRpQl848c5vAadxlTXrUh8RzQzSRm08= +google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI= google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -714,20 +1137,61 @@ google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= -google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200325114520-5b2d0af7952b/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482 h1:i+Aiej6cta/Frzp13/swvwz5O00kYcSe0A/C5Wd7zX8= +google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= @@ -735,9 +1199,12 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/dutchcoders/goftp.v1 v1.0.0-20170301105846-ed59a591ce14 h1:tHqNpm9sPaE6BSuMLXBzgTwukQLdBEt4OYU2coQjEQQ= +gopkg.in/dutchcoders/goftp.v1 v1.0.0-20170301105846-ed59a591ce14/go.mod h1:nzmlZQ+UqB5+55CRTV/dOaiK8OrPl6Co96Ob8lH4Wxw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= @@ -754,23 +1221,41 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/karlseguin/expect.v1 v1.0.1 h1:9u0iUltnhFbJTHaSIH0EP+cuTU5rafIgmcsEsg2JQFw= gopkg.in/karlseguin/expect.v1 v1.0.1/go.mod h1:uB7QIJBcclvYbwlUDkSCsGjAOMis3fP280LyhuDEf2I= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +modernc.org/b v1.0.0 h1:vpvqeyp17ddcQWF29Czawql4lDdABCDRbXRAS4+aF2o= +modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= +modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 h1:VO9oZbbkvTwqLimlQt15QNdOOBArT2dw/bvzsMZBiqQ= -sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/k8s/README.md b/k8s/README.md new file mode 100644 index 000000000..c5615522c --- /dev/null +++ b/k8s/README.md @@ -0,0 +1,48 @@ +## SEAWEEDFS - helm chart (2.x) + +### info: +* master/filer/volume are stateful sets with anti-affinity on the hostname, +so your deployment will be spread/HA. +* chart is using memsql(mysql) as the filer backend to enable HA (multiple filer instances) +and backup/HA memsql can provide. +* mysql user/password are created in a k8s secret (secret-seaweedfs-db.yaml) and injected to the filer +with ENV. +* cert config exists and can be enabled, but not been tested. + +### prerequisites +kubernetes node have labels which help to define which node(Host) will run which pod. + +s3/filer/master needs the label **sw-backend=true** + +volume need the label **sw-volume=true** + +to label a node to be able to run all pod types in k8s: +``` +kubectl label node YOUR_NODE_NAME sw-volume=true,sw-backend=true +``` + +on production k8s deployment you will want each pod to have a different host, +especially the volume server & the masters, currently all pods (master/volume/filer) +have anti-affinity rule to disallow running multiple pod type on the same host. +if you still want to run multiple pods of the same type (master/volume/filer) on the same host +please set/update the corresponding affinity rule in values.yaml to an empty one: + +```affinity: ""``` + +### PVC - storage class ### + +on the volume stateful set added support for K8S PVC, currently example +with the simple local-path-provisioner from Rancher (comes included with k3d / k3s) +https://github.com/rancher/local-path-provisioner + +you can use ANY storage class you like, just update the correct storage-class +for your deployment. + +### current instances config (AIO): +1 instance for each type (master/filer+s3/volume) + +you can update the replicas count for each node type in values.yaml, +need to add more nodes with the corresponding labels. + +most of the configuration are available through values.yaml + diff --git a/k8s/seaweedfs/.helmignore b/k8s/seaweedfs/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/k8s/seaweedfs/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml new file mode 100644 index 000000000..0025c9760 --- /dev/null +++ b/k8s/seaweedfs/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +description: SeaweedFS +name: seaweedfs +appVersion: "2.41" +version: 2.41 diff --git a/k8s/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json b/k8s/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json new file mode 100644 index 000000000..d492a0695 --- /dev/null +++ b/k8s/seaweedfs/dashboards/seaweedfs-grafana-dashboard.json @@ -0,0 +1,1856 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS-DEV", + "label": "prometheus-dev", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.6.2" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "${DS_PROMETHEUS-DEV}", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": 10423, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "refresh": "30s", + "rows": [ + { + "collapse": true, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 46, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 90th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 49, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 45, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 99th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(SeaweedFS_filer_request_total[1m]) * 5", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer QPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 56, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 Request Duration 90th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 57, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 Request Duration 99th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 55, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(SeaweedFS_s3_request_total[1m]) * 5", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 API QPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "hideTimeOverride": false, + "id": 59, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (type) (SeaweedFS_s3_request_total{type=~'PUT|COPY|POST|LIST'})*0.000005", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}} requests", + "refId": "A", + "step": 30 + }, + { + "expr": "sum (SeaweedFS_s3_request_total{type=~'PUT|COPY|POST|LIST'})*0.000005", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "All PUT, COPY, POST, LIST", + "refId": "C", + "step": 30 + }, + { + "expr": "sum (SeaweedFS_s3_request_total{type!~'PUT|COPY|POST|LIST'})*0.0000004", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "GET and all other", + "refId": "B" + }, + { + "expr": "sum by (type) (SeaweedFS_s3_request_total{type!~'PUT|COPY|POST|LIST'})*0.0000004", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}} requests", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": "1M", + "timeShift": null, + "title": "S3 API Monthly Cost if on AWS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "currencyUSD", + "label": "Cost in US$", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "currencyUSD", + "label": "Write Cost", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "S3 Gateway", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 252, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 47, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le, exported_instance))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{exported_instance}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 40, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_volumeServer_request_total[1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "fill": 1, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_volumes) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{collection}} {{type}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_max_volumes)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "fill": 1, + "id": 50, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{collection}} {{type}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Collection and Type", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "fill": 1, + "id": 51, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (exported_instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{exported_instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Host", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Volume Server", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_filerStore_request_total [1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Store", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 242, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 52, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_memstats_alloc_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "bytes allocated", + "refId": "B" + }, + { + "expr": "rate(go_memstats_alloc_bytes_total{exported_job=\"filer\"}[30s])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "alloc rate", + "refId": "A" + }, + { + "expr": "go_memstats_stack_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "stack inuse", + "refId": "C" + }, + { + "expr": "go_memstats_heap_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "heap inuse", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Memory Stats", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_gc_duration_seconds{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{quantile}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go GC duration quantiles", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 53, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{exported_instance}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Routines", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Instances", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30d", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "SeaweedFS", + "version": 2 +} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/_helpers.tpl b/k8s/seaweedfs/templates/_helpers.tpl new file mode 100644 index 000000000..a9ee89f03 --- /dev/null +++ b/k8s/seaweedfs/templates/_helpers.tpl @@ -0,0 +1,151 @@ +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to +this (by the DNS naming spec). If release name contains chart name it will +be used as a full name. +*/}} +{{- define "seaweedfs.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "seaweedfs.chart" -}} +{{- printf "%s-helm" .Chart.Name | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "seaweedfs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Inject extra environment vars in the format key:value, if populated +*/}} +{{- define "seaweedfs.extraEnvironmentVars" -}} +{{- if .extraEnvironmentVars -}} +{{- range $key, $value := .extraEnvironmentVars }} +- name: {{ $key }} + value: {{ $value | quote }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper filer image */}} +{{- define "filer.image" -}} +{{- if .Values.filer.imageOverride -}} +{{- $imageOverride := .Values.filer.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Chart.AppVersion | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper dbSchema image */}} +{{- define "filer.dbSchema.image" -}} +{{- if .Values.filer.dbSchema.imageOverride -}} +{{- $imageOverride := .Values.filer.dbSchema.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.global.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.global.repository | toString -}} +{{- $name := .Values.filer.dbSchema.imageName | toString -}} +{{- $tag := .Values.filer.dbSchema.imageTag | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper master image */}} +{{- define "master.image" -}} +{{- if .Values.master.imageOverride -}} +{{- $imageOverride := .Values.master.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Chart.AppVersion | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper s3 image */}} +{{- define "s3.image" -}} +{{- if .Values.s3.imageOverride -}} +{{- $imageOverride := .Values.s3.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Chart.AppVersion | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper volume image */}} +{{- define "volume.image" -}} +{{- if .Values.volume.imageOverride -}} +{{- $imageOverride := .Values.volume.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Chart.AppVersion | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + +{{/* Return the proper cronjob image */}} +{{- define "cronjob.image" -}} +{{- if .Values.cronjob.imageOverride -}} +{{- $imageOverride := .Values.cronjob.imageOverride -}} +{{- printf "%s" $imageOverride -}} +{{- else -}} +{{- $registryName := default .Values.image.registry .Values.global.localRegistry | toString -}} +{{- $repositoryName := .Values.image.repository | toString -}} +{{- $name := .Values.global.imageName | toString -}} +{{- $tag := .Chart.AppVersion | toString -}} +{{- printf "%s%s%s:%s" $registryName $repositoryName $name $tag -}} +{{- end -}} +{{- end -}} + + +{{/* check if any PVC exists */}} +{{- define "volume.pvc_exists" -}} +{{- if or (or (eq .Values.volume.data.type "persistentVolumeClaim") (and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "persistentVolumeClaim") -}} +{{- printf "true" -}} +{{- else -}} +{{- printf "false" -}} +{{- end -}} +{{- end -}} + +{{/* check if any HostPath exists */}} +{{- define "volume.hostpath_exists" -}} +{{- if or (or (eq .Values.volume.data.type "hostPath") (and (eq .Values.volume.idx.type "hostPath") .Values.volume.dir_idx )) (eq .Values.volume.logs.type "hostPath") -}} +{{- printf "true" -}} +{{- else -}} +{{- if or .Values.global.enableSecurity .Values.volume.extraVolumes -}} +{{- printf "true" -}} +{{- else -}} +{{- printf "false" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/k8s/seaweedfs/templates/ca-cert.yaml b/k8s/seaweedfs/templates/ca-cert.yaml new file mode 100644 index 000000000..056f01502 --- /dev/null +++ b/k8s/seaweedfs/templates/ca-cert.yaml @@ -0,0 +1,14 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-ca-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + commonName: "{{ template "seaweedfs.name" . }}-root-ca" + isCA: true + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer +{{- end }} diff --git a/k8s/seaweedfs/templates/cert-clusterissuer.yaml b/k8s/seaweedfs/templates/cert-clusterissuer.yaml new file mode 100644 index 000000000..d0bd42593 --- /dev/null +++ b/k8s/seaweedfs/templates/cert-clusterissuer.yaml @@ -0,0 +1,8 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: ClusterIssuer +metadata: + name: {{ template "seaweedfs.name" . }}-clusterissuer +spec: + selfSigned: {} +{{- end }} diff --git a/k8s/seaweedfs/templates/client-cert.yaml b/k8s/seaweedfs/templates/client-cert.yaml new file mode 100644 index 000000000..4d27b5659 --- /dev/null +++ b/k8s/seaweedfs/templates/client-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-client-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-client-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/cronjob.yaml b/k8s/seaweedfs/templates/cronjob.yaml new file mode 100644 index 000000000..4caf4bad1 --- /dev/null +++ b/k8s/seaweedfs/templates/cronjob.yaml @@ -0,0 +1,58 @@ +{{- if .Values.cronjob }} +{{- if .Values.cronjob.enabled }} +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: {{ include "seaweedfs.fullname" . }}-cronjob +spec: + schedule: "{{ .Values.cronjob.schedule }}" + startingDeadlineSeconds: 200 + concurrencyPolicy: Forbid + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + jobTemplate: + spec: + backoffLimit: 2 + template: + spec: + {{- if .Values.cronjob.nodeSelector }} + nodeSelector: + {{ tpl .Values.cronjob.nodeSelector . | indent 12 | trim }} + {{- end }} + {{- if .Values.cronjob.tolerations }} + tolerations: + {{ tpl .Values.cronjob.tolerations . | nindent 12 | trim }} + {{- end }} + restartPolicy: OnFailure + containers: + - name: shell + image: {{ template "cronjob.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + resources: + {{- toYaml .Values.cronjob.resources| nindent 16 }} + command: + - sh + - -c + - | + set -ex + echo -e "lock\n\ + volume.balance -force \ + {{ if .Values.volume.dataCenter }} -dataCenter {{ .Values.volume.dataCenter }}{{ end }}\ + {{ if .Values.cronjob.collection }} -collection {{ .Values.cronjob.collection }}{{ end }}\n\ + {{- if .Values.cronjob.enableFixReplication }} + volume.fix.replication -collectionPattern={{ .Values.cronjob.collectionPattern }} \n\ + {{- end }} + unlock\n" | \ + /usr/bin/weed shell \ + {{- if .Values.cronjob.master }} + -master {{ .Values.cronjob.master }} \ + {{- else }} + -master {{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc:{{ .Values.master.port }} \ + {{- end }} + {{- if .Values.cronjob.filer }} + -filer {{ .Values.cronjob.filer }} + {{- else }} + -filer {{ template "seaweedfs.name" . }}-filer.{{ .Release.Namespace }}.svc:{{ .Values.filer.port }} + {{- end }} +{{- end }} +{{- end }} diff --git a/k8s/seaweedfs/templates/filer-cert.yaml b/k8s/seaweedfs/templates/filer-cert.yaml new file mode 100644 index 000000000..855183c54 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-filer-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/filer-service-client.yaml b/k8s/seaweedfs/templates/filer-service-client.yaml new file mode 100644 index 000000000..929b6f8bc --- /dev/null +++ b/k8s/seaweedfs/templates/filer-service-client.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-filer-client + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: filer +{{- if .Values.filer.metricsPort }} + monitoring: "true" +{{- end }} +spec: + clusterIP: None + ports: + - name: "swfs-filer" + port: {{ .Values.filer.port }} + targetPort: {{ .Values.filer.port }} + protocol: TCP + - name: "swfs-filer-grpc" + port: {{ .Values.filer.grpcPort }} + targetPort: {{ .Values.filer.grpcPort }} + protocol: TCP +{{- if .Values.filer.metricsPort }} + - name: "metrics" + port: {{ .Values.filer.metricsPort }} + targetPort: {{ .Values.filer.metricsPort }} + protocol: TCP +{{- end }} + selector: + app: {{ template "seaweedfs.name" . }} + component: filer diff --git a/k8s/seaweedfs/templates/filer-service.yaml b/k8s/seaweedfs/templates/filer-service.yaml new file mode 100644 index 000000000..45035fc27 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: filer +spec: + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: "swfs-filer" + port: {{ .Values.filer.port }} + targetPort: {{ .Values.filer.port }} + protocol: TCP + - name: "swfs-filer-grpc" + port: {{ .Values.filer.grpcPort }} + targetPort: {{ .Values.filer.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: filer diff --git a/k8s/seaweedfs/templates/filer-servicemonitor.yaml b/k8s/seaweedfs/templates/filer-servicemonitor.yaml new file mode 100644 index 000000000..f07f6ebef --- /dev/null +++ b/k8s/seaweedfs/templates/filer-servicemonitor.yaml @@ -0,0 +1,18 @@ +{{- if .Values.filer.metricsPort }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: filer +spec: + endpoints: + - interval: 30s + port: swfs-filer-metrics + scrapeTimeout: 5s + selector: + app: {{ template "seaweedfs.name" . }} + component: filer +{{- end }} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/filer-statefulset.yaml b/k8s/seaweedfs/templates/filer-statefulset.yaml new file mode 100644 index 000000000..fc1253479 --- /dev/null +++ b/k8s/seaweedfs/templates/filer-statefulset.yaml @@ -0,0 +1,267 @@ +{{- if .Values.filer.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-filer + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-filer + podManagementPolicy: Parallel + replicas: {{ .Values.filer.replicas }} + {{- if (gt (int .Values.filer.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.filer.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: filer + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: filer + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }} + {{- if .Values.filer.affinity }} + affinity: + {{ tpl .Values.filer.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.filer.tolerations }} + tolerations: + {{ tpl .Values.filer.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + serviceAccountName: seaweefds-rw-sa #hack for delete pod master after migration + terminationGracePeriodSeconds: 60 + {{- if .Values.filer.priorityClassName }} + priorityClassName: {{ .Values.filer.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "filer.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: WEED_MYSQL_USERNAME + valueFrom: + secretKeyRef: + name: secret-seaweedfs-db + key: user + - name: WEED_MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: secret-seaweedfs-db + key: password + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.filer.extraEnvironmentVars }} + {{- range $key, $value := .Values.filer.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.global.extraEnvironmentVars }} + {{- range $key, $value := .Values.global.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.filer.loggingOverrideLevel }} + -v={{ .Values.filer.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + filer \ + -port={{ .Values.filer.port }} \ + {{- if .Values.filer.metricsPort }} + -metricsPort {{ .Values.filer.metricsPort }} \ + {{- end }} + {{- if .Values.filer.redirectOnRead }} + -redirectOnRead \ + {{- end }} + {{- if .Values.filer.disableHttp }} + -disableHttp \ + {{- end }} + {{- if .Values.filer.disableDirListing }} + -disableDirListing \ + {{- end }} + -dirListLimit={{ .Values.filer.dirListLimit }} \ + {{- if .Values.global.enableReplication }} + -defaultReplicaPlacement={{ .Values.global.replicationPlacment }} \ + {{- else }} + -defaultReplicaPlacement={{ .Values.filer.defaultReplicaPlacement }} \ + {{- end }} + {{- if .Values.filer.disableDirListing }} + -disableDirListing \ + {{- end }} + {{- if .Values.filer.maxMB }} + -maxMB={{ .Values.filer.maxMB }} \ + {{- end }} + {{- if .Values.filer.encryptVolumeData }} + -encryptVolumeData \ + {{- end }} + -ip=${POD_IP} \ + {{- if .Values.filer.enable_peers }} + {{- if gt (.Values.filer.replicas | int) 1 }} + -peers=$(echo -n "{{ range $index := until (.Values.filer.replicas | int) }}${SEAWEEDFS_FULLNAME}-filer-{{ $index }}.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}{{ if lt $index (sub ($.Values.filer.replicas | int) 1) }},{{ end }}{{ end }}" | sed "s/$HOSTNAME.${SEAWEEDFS_FULLNAME}-filer:{{ $.Values.filer.port }}//" | sed 's/,$//; 's/^,//'; s/,,/,/;' ) \ + {{- end }} + {{- end }} + {{- if .Values.filer.s3.enabled }} + -s3 \ + -s3.port={{ .Values.filer.s3.port }} \ + {{- if .Values.filer.s3.domainName }} + -s3.domainName={{ .Values.filer.s3.domainName }} \ + {{- end }} + {{- if .Values.global.enableSecurity }} + -s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \ + -s3.key.file=/usr/local/share/ca-certificates/client/tls.key \ + {{- end }} + {{- if .Values.filer.s3.allowEmptyFolder }} + -s3.allowEmptyFolder={{ .Values.filer.s3.allowEmptyFolder }} \ + {{- end }} + {{- if .Values.filer.s3.enableAuth }} + -s3.config=/etc/sw/seaweedfs_s3_config \ + {{- end }} + {{- end }} + -master={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + {{- if or (.Values.global.enableSecurity) (.Values.filer.extraVolumeMounts) }} + volumeMounts: + - name: seaweedfs-filer-log-volume + mountPath: "/logs/" + - mountPath: /etc/sw + name: config-users + readOnly: true + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.filer.extraVolumeMounts . | nindent 12 | trim }} + {{- end }} + ports: + - containerPort: {{ .Values.filer.port }} + name: swfs-filer + - containerPort: {{ .Values.filer.grpcPort }} + #name: swfs-filer-grpc + readinessProbe: + httpGet: + path: / + port: {{ .Values.filer.port }} + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: / + port: {{ .Values.filer.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 5 + timeoutSeconds: 10 + {{- if .Values.filer.resources }} + resources: + {{ tpl .Values.filer.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-filer-log-volume + hostPath: + path: /storage/logs/seaweedfs/filer + type: DirectoryOrCreate + - name: db-schema-config-volume + configMap: + name: seaweedfs-db-init-config + - name: config-users + secret: + defaultMode: 420 + secretName: seaweedfs-s3-secret + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.filer.extraVolumes . | indent 8 | trim }} + {{- if .Values.filer.nodeSelector }} + nodeSelector: + {{ tpl .Values.filer.nodeSelector . | indent 8 | trim }} + {{- end }} +{{/* volumeClaimTemplates:*/}} +{{/* - metadata:*/}} +{{/* name: data-{{ .Release.Namespace }}*/}} +{{/* spec:*/}} +{{/* accessModes:*/}} +{{/* - ReadWriteOnce*/}} +{{/* resources:*/}} +{{/* requests:*/}} +{{/* storage: {{ .Values.filer.storage }}*/}} +{{/* {{- if .Values.filer.storageClass }}*/}} +{{/* storageClassName: {{ .Values.filer.storageClass }}*/}} +{{/* {{- end }}*/}} +{{- end }} diff --git a/k8s/seaweedfs/templates/ingress.yaml b/k8s/seaweedfs/templates/ingress.yaml new file mode 100644 index 000000000..dcd52c138 --- /dev/null +++ b/k8s/seaweedfs/templates/ingress.yaml @@ -0,0 +1,59 @@ +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-{{ template "seaweedfs.name" . }}-filer + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/auth-type: "basic" + nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Filer' + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + sub_filter '' ' '; #add base url + sub_filter '="/' '="./'; #make absolute paths to relative + sub_filter '=/' '=./'; + sub_filter '/seaweedfsstatic' './seaweedfsstatic'; + sub_filter_once off; +spec: + rules: + - http: + paths: + - path: /sw-filer/?(.*) + backend: + serviceName: {{ template "seaweedfs.name" . }}-filer + servicePort: {{ .Values.filer.port }} +--- +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: ingress-{{ template "seaweedfs.name" . }}-master + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/auth-type: "basic" + nginx.ingress.kubernetes.io/auth-secret: "default/ingress-basic-auth-secret" + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - SW-Master' + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/enable-rewrite-log: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/configuration-snippet: | + sub_filter '' ' '; #add base url + sub_filter '="/' '="./'; #make absolute paths to relative + sub_filter '=/' '=./'; + sub_filter '/seaweedfsstatic' './seaweedfsstatic'; + sub_filter_once off; +spec: + rules: + - http: + paths: + - path: /sw-master/?(.*) + backend: + serviceName: {{ template "seaweedfs.name" . }}-master + servicePort: {{ .Values.master.port }} diff --git a/k8s/seaweedfs/templates/master-cert.yaml b/k8s/seaweedfs/templates/master-cert.yaml new file mode 100644 index 000000000..a8b0fc1d1 --- /dev/null +++ b/k8s/seaweedfs/templates/master-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-master-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-master-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/master-service.yaml b/k8s/seaweedfs/templates/master-service.yaml new file mode 100644 index 000000000..0ce467538 --- /dev/null +++ b/k8s/seaweedfs/templates/master-service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: master + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: "swfs-master" + port: {{ .Values.master.port }} + targetPort: {{ .Values.master.port }} + protocol: TCP + - name: "swfs-master-grpc" + port: {{ .Values.master.grpcPort }} + targetPort: {{ .Values.master.grpcPort }} + protocol: TCP + selector: + app: {{ template "seaweedfs.name" . }} + component: master diff --git a/k8s/seaweedfs/templates/master-statefulset.yaml b/k8s/seaweedfs/templates/master-statefulset.yaml new file mode 100644 index 000000000..e5a7a537a --- /dev/null +++ b/k8s/seaweedfs/templates/master-statefulset.yaml @@ -0,0 +1,227 @@ +{{- if .Values.master.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-master + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-master + podManagementPolicy: Parallel + replicas: {{ .Values.master.replicas }} + {{- if (gt (int .Values.master.updatePartition) 0) }} + updateStrategy: + type: RollingUpdate + rollingUpdate: + partition: {{ .Values.master.updatePartition }} + {{- end }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: master + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: master + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.master.restartPolicy }} + {{- if .Values.master.affinity }} + affinity: + {{ tpl .Values.master.affinity . | nindent 8 | trim }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: + {{ tpl .Values.master.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 60 + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "master.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.master.extraEnvironmentVars }} + {{- range $key, $value := .Values.master.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + {{- if .Values.global.extraEnvironmentVars }} + {{- range $key, $value := .Values.global.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.master.loggingOverrideLevel }} + -v={{ .Values.master.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + master \ + -port={{ .Values.master.port }} \ + -mdir=/data \ + -ip.bind={{ .Values.master.ipBind }} \ + {{- if .Values.global.enableReplication }} + -defaultReplication={{ .Values.global.replicationPlacment }} \ + {{- else }} + -defaultReplication={{ .Values.master.defaultReplication }} \ + {{- end }} + {{- if .Values.master.volumePreallocate }} + -volumePreallocate \ + {{- end }} + {{- if .Values.global.monitoring.enabled }} + -metrics.address="{{ .Values.global.monitoring.gatewayHost }}:{{ .Values.global.monitoring.gatewayPort }}" \ + {{- end }} + -volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \ + {{- if .Values.master.disableHttp }} + -disableHttp \ + {{- end }} + {{- if .Values.master.pulseSeconds }} + -pulseSeconds={{ .Values.master.pulseSeconds }} \ + {{- end }} + {{- if .Values.master.garbageThreshold }} + -garbageThreshold={{ .Values.master.garbageThreshold }} \ + {{- end }} + {{- if .Values.master.metricsIntervalSec }} + -metrics.intervalSeconds={{ .Values.master.metricsIntervalSec }} \ + {{- end }} + -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master \ + -peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name : data-{{ .Release.Namespace }} + mountPath: /data + - name: seaweedfs-master-log-volume + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.master.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.master.port }} + name: swfs-master + - containerPort: {{ .Values.master.grpcPort }} + #name: swfs-master-grpc + readinessProbe: + httpGet: + path: /cluster/status + port: {{ .Values.master.port }} + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 45 + successThreshold: 2 + failureThreshold: 100 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: /cluster/status + port: {{ .Values.master.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 4 + timeoutSeconds: 10 + {{- if .Values.master.resources }} + resources: + {{ tpl .Values.master.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: seaweedfs-master-log-volume + hostPath: + path: /storage/logs/seaweedfs/master + type: DirectoryOrCreate + - name: data-{{ .Release.Namespace }} + hostPath: + path: /ssd/seaweed-master/ + type: DirectoryOrCreate + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.master.extraVolumes . | indent 8 | trim }} + {{- if .Values.master.nodeSelector }} + nodeSelector: + {{ tpl .Values.master.nodeSelector . | indent 8 | trim }} + {{- end }} +{{/* volumeClaimTemplates:*/}} +{{/* - metadata:*/}} +{{/* name: data-{{ .Release.Namespace }}*/}} +{{/* spec:*/}} +{{/* accessModes:*/}} +{{/* - ReadWriteOnce*/}} +{{/* resources:*/}} +{{/* requests:*/}} +{{/* storage: {{ .Values.master.storage }}*/}} +{{/* {{- if .Values.master.storageClass }}*/}} +{{/* storageClassName: {{ .Values.master.storageClass }}*/}} +{{/* {{- end }}*/}} +{{- end }} diff --git a/k8s/seaweedfs/templates/s3-deployment.yaml b/k8s/seaweedfs/templates/s3-deployment.yaml new file mode 100644 index 000000000..b513e937b --- /dev/null +++ b/k8s/seaweedfs/templates/s3-deployment.yaml @@ -0,0 +1,188 @@ +{{- if .Values.s3.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-s3 + replicas: {{ .Values.s3.replicas }} + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: s3 + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: s3 + spec: + restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }} + {{- if .Values.s3.tolerations }} + tolerations: + {{ tpl .Values.s3.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.s3.priorityClassName }} + priorityClassName: {{ .Values.s3.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + containers: + - name: seaweedfs + image: {{ template "s3.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.global.extraEnvironmentVars }} + {{- range $key, $value := .Values.global.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.s3.loggingOverrideLevel }} + -v={{ .Values.s3.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + s3 \ + -port={{ .Values.s3.port }} \ + {{- if .Values.s3.metricsPort }} + -metricsPort {{ .Values.s3.metricsPort }} \ + {{- end }} + {{- if .Values.global.enableSecurity }} + -cert.file=/usr/local/share/ca-certificates/client/tls.crt \ + -key.file=/usr/local/share/ca-certificates/client/tls.key \ + {{- end }} + {{- if .Values.s3.domainName }} + -domainName={{ .Values.s3.domainName }} \ + {{- end }} + {{- if .Values.s3.allowEmptyFolder }} + -allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \ + {{- end }} + {{- if .Values.s3.enableAuth }} + -config=/etc/sw/seaweedfs_s3_config \ + {{- end }} + -filer={{ template "seaweedfs.name" . }}-filer-client:{{ .Values.filer.port }} + volumeMounts: + - name: logs + mountPath: "/logs/" + - mountPath: /etc/sw + name: config-users + readOnly: true + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.s3.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.s3.port }} + name: swfs-s3 + readinessProbe: + httpGet: + path: / + port: {{ .Values.s3.port }} + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + failureThreshold: 100 + timeoutSeconds: 10 + livenessProbe: + httpGet: + path: / + port: {{ .Values.s3.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 60 + successThreshold: 1 + failureThreshold: 20 + timeoutSeconds: 10 + {{- if .Values.s3.resources }} + resources: + {{ tpl .Values.s3.resources . | nindent 12 | trim }} + {{- end }} + volumes: + - name: config-users + secret: + defaultMode: 420 + secretName: seaweedfs-s3-secret + {{- if eq .Values.s3.logs.type "hostPath" }} + - name: logs + hostPath: + path: /storage/logs/seaweedfs/s3 + type: DirectoryOrCreate + {{- end }} + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{ tpl .Values.s3.extraVolumes . | indent 8 | trim }} + {{- if .Values.s3.nodeSelector }} + nodeSelector: + {{ tpl .Values.s3.nodeSelector . | indent 8 | trim }} + {{- end }} +{{- end }} diff --git a/k8s/seaweedfs/templates/s3-service.yaml b/k8s/seaweedfs/templates/s3-service.yaml new file mode 100644 index 000000000..122b33298 --- /dev/null +++ b/k8s/seaweedfs/templates/s3-service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: s3 +spec: + ports: + - name: "swfs-s3" + port: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }} + targetPort: {{ if .Values.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }} + protocol: TCP +{{- if and .Values.s3.enabled .Values.s3.metricsPort }} + - name: "metrics" + port: {{ .Values.s3.metricsPort }} + targetPort: {{ .Values.s3.metricsPort }} + protocol: TCP +{{- end }} + selector: + app: {{ template "seaweedfs.name" . }} + component: {{ if .Values.s3.enabled }}s3{{ else }}filer{{ end }} diff --git a/k8s/seaweedfs/templates/s3-servicemonitor.yaml b/k8s/seaweedfs/templates/s3-servicemonitor.yaml new file mode 100644 index 000000000..7f18f00f5 --- /dev/null +++ b/k8s/seaweedfs/templates/s3-servicemonitor.yaml @@ -0,0 +1,18 @@ +{{- if .Values.s3.metricsPort }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "seaweedfs.name" . }}-s3 + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: s3 +spec: + endpoints: + - interval: 30s + port: swfs-s3-metrics + scrapeTimeout: 5s + selector: + app: {{ template "seaweedfs.name" . }} + component: s3 +{{- end }} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml b/k8s/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml new file mode 100644 index 000000000..eb5a5ebac --- /dev/null +++ b/k8s/seaweedfs/templates/seaweedfs-grafana-dashboard.yaml @@ -0,0 +1,20 @@ +{{- if .Values.global.monitoring.enabled }} +{{- $files := .Files.Glob "dashboards/*.json" }} +{{- if $files }} +apiVersion: v1 +kind: ConfigMapList +items: +{{- range $path, $fileContents := $files }} +{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }} +- apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ printf "%s" $dashboardName | lower | replace "_" "-" }} + namespace: {{ $.Release.Namespace }} + labels: + grafana_dashboard: "1" + data: + {{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/seaweedfs-s3-secret.yaml b/k8s/seaweedfs/templates/seaweedfs-s3-secret.yaml new file mode 100644 index 000000000..66fd5f28e --- /dev/null +++ b/k8s/seaweedfs/templates/seaweedfs-s3-secret.yaml @@ -0,0 +1,21 @@ +{{- if not (or .Values.filer.s3.skipAuthSecretCreation .Values.s3.skipAuthSecretCreation) }} +{{- $access_key_admin := randAlphaNum 16 -}} +{{- $secret_key_admin := randAlphaNum 32 -}} +{{- $access_key_read := randAlphaNum 16 -}} +{{- $secret_key_read := randAlphaNum 32 -}} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: seaweedfs-s3-secret + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/resource-policy": keep + "helm.sh/hook": "pre-install" +stringData: + admin_access_key_id: {{ $access_key_admin }} + admin_secret_access_key: {{ $secret_key_admin }} + read_access_key_id: {{ $access_key_read }} + read_secret_access_key: {{ $secret_key_read }} + seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"{{ $access_key_admin }}","secretKey":"{{ $secret_key_admin }}"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"{{ $access_key_read }}","secretKey":"{{ $secret_key_read }}"}],"actions":["Read"]}]}' +{{- end }} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml new file mode 100644 index 000000000..c6132c9ea --- /dev/null +++ b/k8s/seaweedfs/templates/secret-seaweedfs-db.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: secret-seaweedfs-db + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/resource-policy": keep + "helm.sh/hook": "pre-install" +stringData: + user: "YourSWUser" + password: "HardCodedPassword" + # better to random generate and create in DB + # password: {{ randAlphaNum 10 | sha256sum | b64enc | trunc 32 }} diff --git a/k8s/seaweedfs/templates/security-configmap.yaml b/k8s/seaweedfs/templates/security-configmap.yaml new file mode 100644 index 000000000..7d06614ec --- /dev/null +++ b/k8s/seaweedfs/templates/security-configmap.yaml @@ -0,0 +1,52 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "seaweedfs.name" . }}-security-config + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + security.toml: |- + # this file is read by master, volume server, and filer + + # the jwt signing key is read by master and volume server + # a jwt expires in 10 seconds + [jwt.signing] + key = "{{ randAlphaNum 10 | b64enc }}" + + # all grpc tls authentications are mutual + # the values for the following ca, cert, and key are paths to the PERM files. + [grpc] + ca = "/usr/local/share/ca-certificates/ca/tls.crt" + + [grpc.volume] + cert = "/usr/local/share/ca-certificates/volume/tls.crt" + key = "/usr/local/share/ca-certificates/volume/tls.key" + + [grpc.master] + cert = "/usr/local/share/ca-certificates/master/tls.crt" + key = "/usr/local/share/ca-certificates/master/tls.key" + + [grpc.filer] + cert = "/usr/local/share/ca-certificates/filer/tls.crt" + key = "/usr/local/share/ca-certificates/filer/tls.key" + + # use this for any place needs a grpc client + # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" + [grpc.client] + cert = "/usr/local/share/ca-certificates/client/tls.crt" + key = "/usr/local/share/ca-certificates/client/tls.key" + + # volume server https options + # Note: work in progress! + # this does not work with other clients, e.g., "weed filer|mount" etc, yet. + [https.client] + enabled = false + [https.volume] + cert = "" + key = "" +{{- end }} diff --git a/k8s/seaweedfs/templates/service-account.yaml b/k8s/seaweedfs/templates/service-account.yaml new file mode 100644 index 000000000..e82ef7d62 --- /dev/null +++ b/k8s/seaweedfs/templates/service-account.yaml @@ -0,0 +1,29 @@ +#hack for delete pod master after migration +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: seaweefds-rw-cr +rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: seaweefds-rw-sa + namespace: {{ .Release.Namespace }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: system:serviceaccount:seaweefds-rw-sa:default +subjects: +- kind: ServiceAccount + name: seaweefds-rw-sa + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: seaweefds-rw-cr \ No newline at end of file diff --git a/k8s/seaweedfs/templates/volume-cert.yaml b/k8s/seaweedfs/templates/volume-cert.yaml new file mode 100644 index 000000000..72c62a0f5 --- /dev/null +++ b/k8s/seaweedfs/templates/volume-cert.yaml @@ -0,0 +1,33 @@ +{{- if .Values.global.enableSecurity }} +apiVersion: certmanager.k8s.io/v1alpha1 +kind: Certificate +metadata: + name: {{ template "seaweedfs.name" . }}-volume-cert + namespace: {{ .Release.Namespace }} +spec: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + issuerRef: + name: {{ template "seaweedfs.name" . }}-clusterissuer + kind: ClusterIssuer + commonName: {{ .Values.certificates.commonName }} + organization: + - "SeaweedFS CA" + dnsNames: + - '*.{{ .Release.Namespace }}' + - '*.{{ .Release.Namespace }}.svc' + - '*.{{ .Release.Namespace }}.svc.cluster.local' + - '*.{{ template "seaweedfs.name" . }}-master' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc' + - '*.{{ template "seaweedfs.name" . }}-master.{{ .Release.Namespace }}.svc.cluster.local' +{{- if .Values.certificates.ipAddresses }} + ipAddresses: + {{- range .Values.certificates.ipAddresses }} + - {{ . }} + {{- end }} +{{- end }} + keyAlgorithm: {{ .Values.certificates.keyAlgorithm }} + keySize: {{ .Values.certificates.keySize }} + duration: {{ .Values.certificates.duration }} + renewBefore: {{ .Values.certificates.renewBefore }} +{{- end }} diff --git a/k8s/seaweedfs/templates/volume-service.yaml b/k8s/seaweedfs/templates/volume-service.yaml new file mode 100644 index 000000000..0a9173fde --- /dev/null +++ b/k8s/seaweedfs/templates/volume-service.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: volume +spec: + clusterIP: None + ports: + - name: "swfs-volume" + port: {{ .Values.volume.port }} + targetPort: {{ .Values.volume.port }} + protocol: TCP + - name: "swfs-volume-18080" + port: {{ .Values.volume.grpcPort }} + targetPort: {{ .Values.volume.grpcPort }} + protocol: TCP +{{- if .Values.volume.metricsPort }} + - name: "swfs-volume-metrics" + port: {{ .Values.volume.metricsPort }} + targetPort: {{ .Values.volume.metricsPort }} + protocol: TCP +{{- end }} + selector: + app: {{ template "seaweedfs.name" . }} + component: volume \ No newline at end of file diff --git a/k8s/seaweedfs/templates/volume-servicemonitor.yaml b/k8s/seaweedfs/templates/volume-servicemonitor.yaml new file mode 100644 index 000000000..1b286e9b6 --- /dev/null +++ b/k8s/seaweedfs/templates/volume-servicemonitor.yaml @@ -0,0 +1,18 @@ +{{- if .Values.volume.metricsPort }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + component: volume +spec: + endpoints: + - interval: 30s + port: swfs-volume-metrics + scrapeTimeout: 5s + selector: + app: {{ template "seaweedfs.name" . }} + component: volume +{{- end }} \ No newline at end of file diff --git a/k8s/seaweedfs/templates/volume-statefulset.yaml b/k8s/seaweedfs/templates/volume-statefulset.yaml new file mode 100644 index 000000000..652fd9ea3 --- /dev/null +++ b/k8s/seaweedfs/templates/volume-statefulset.yaml @@ -0,0 +1,276 @@ +{{- if .Values.volume.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "seaweedfs.name" . }}-volume + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + serviceName: {{ template "seaweedfs.name" . }}-volume + replicas: {{ .Values.volume.replicas }} + podManagementPolicy: Parallel + selector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: volume + template: + metadata: + labels: + app: {{ template "seaweedfs.name" . }} + chart: {{ template "seaweedfs.chart" . }} + release: {{ .Release.Name }} + component: volume + spec: + {{- if .Values.volume.affinity }} + affinity: + {{ tpl .Values.volume.affinity . | nindent 8 | trim }} + {{- end }} + restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }} + {{- if .Values.volume.tolerations }} + tolerations: + {{ tpl .Values.volume.tolerations . | nindent 8 | trim }} + {{- end }} + {{- if .Values.global.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.global.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if .Values.volume.priorityClassName }} + priorityClassName: {{ .Values.volume.priorityClassName | quote }} + {{- end }} + enableServiceLinks: false + {{- if .Values.volume.dir_idx }} + initContainers: + - name: seaweedfs-vol-move-idx + image: {{ template "volume.image" . }} + imagePullPolicy: {{ .Values.global.pullPolicy | default "IfNotPresent" }} + command: [ '/bin/sh', '-c' ] + args: ['if ls {{ .Values.volume.dir }}/*.idx >/dev/null 2>&1; then mv {{ .Values.volume.dir }}/*.idx {{ .Values.volume.dir_idx }}/; fi;'] + volumeMounts: + - name: idx + mountPath: {{ .Values.volume.dir_idx }} + - name: data + mountPath: {{ .Values.volume.dir }} + {{- end }} + containers: + - name: seaweedfs + image: {{ template "volume.image" . }} + imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: SEAWEEDFS_FULLNAME + value: "{{ template "seaweedfs.name" . }}" + {{- if .Values.global.extraEnvironmentVars }} + {{- range $key, $value := .Values.global.extraEnvironmentVars }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- end }} + command: + - "/bin/sh" + - "-ec" + - | + exec /usr/bin/weed -logdir=/logs \ + {{- if .Values.volume.loggingOverrideLevel }} + -v={{ .Values.volume.loggingOverrideLevel }} \ + {{- else }} + -v={{ .Values.global.loggingLevel }} \ + {{- end }} + volume \ + -port={{ .Values.volume.port }} \ + {{- if .Values.volume.metricsPort }} + -metricsPort {{ .Values.volume.metricsPort }} \ + {{- end }} + -dir={{ .Values.volume.dir }} \ + {{- if .Values.volume.dir_idx }} + -dir.idx={{ .Values.volume.dir_idx }} \ + {{- end }} + -max={{ .Values.volume.maxVolumes }} \ + {{- if .Values.volume.rack }} + -rack={{ .Values.volume.rack }} \ + {{- end }} + {{- if .Values.volume.dataCenter }} + -dataCenter={{ .Values.volume.dataCenter }} \ + {{- end }} + -ip.bind={{ .Values.volume.ipBind }} \ + -read.redirect={{ .Values.volume.readRedirect }} \ + {{- if .Values.volume.whiteList }} + -whiteList={{ .Values.volume.whiteList }} \ + {{- end }} + {{- if .Values.volume.imagesFixOrientation }} + -images.fix.orientation \ + {{- end }} + {{- if .Values.volume.pulseSeconds }} + -pulseSeconds={{ .Values.volume.pulseSeconds }} \ + {{- end }} + {{- if .Values.volume.index }} + -index={{ .Values.volume.index }} \ + {{- end }} + {{- if .Values.volume.fileSizeLimitMB }} + -fileSizeLimitMB={{ .Values.volume.fileSizeLimitMB }} \ + {{- end }} + -minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \ + -ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume \ + -compactionMBps={{ .Values.volume.compactionMBps }} \ + -mserver={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} + volumeMounts: + - name: data + mountPath: "{{ .Values.volume.dir }}/" + {{- if .Values.volume.dir_idx }} + - name: idx + mountPath: "{{ .Values.volume.dir_idx }}/" + {{- end }} + - name: logs + mountPath: "/logs/" + {{- if .Values.global.enableSecurity }} + - name: security-config + readOnly: true + mountPath: /etc/seaweedfs/security.toml + subPath: security.toml + - name: ca-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/ca/ + - name: master-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/master/ + - name: volume-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/volume/ + - name: filer-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/filer/ + - name: client-cert + readOnly: true + mountPath: /usr/local/share/ca-certificates/client/ + {{- end }} + {{ tpl .Values.volume.extraVolumeMounts . | nindent 12 | trim }} + ports: + - containerPort: {{ .Values.volume.port }} + name: swfs-vol + - containerPort: {{ .Values.volume.grpcPort }} + #name: swfs-vol-grpc + readinessProbe: + httpGet: + path: /status + port: {{ .Values.volume.port }} + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 90 + successThreshold: 1 + failureThreshold: 100 + timeoutSeconds: 30 + livenessProbe: + httpGet: + path: /status + port: {{ .Values.volume.port }} + scheme: HTTP + initialDelaySeconds: 20 + periodSeconds: 90 + successThreshold: 1 + failureThreshold: 4 + timeoutSeconds: 30 + {{- if .Values.volume.resources }} + resources: + {{ tpl .Values.volume.resources . | nindent 12 | trim }} + {{- end }} + {{- $hostpath_exists := include "volume.hostpath_exists" . -}} + {{- if $hostpath_exists }} + volumes: + {{- if eq .Values.volume.data.type "hostPath" }} + - name: data + hostPath: + path: /storage/object_store/ + type: DirectoryOrCreate + {{- end }} + {{- if and (eq .Values.volume.idx.type "hostPath") .Values.volume.dir_idx }} + - name: idx + hostPath: + path: /ssd/seaweedfs-volume-idx/ + type: DirectoryOrCreate + {{- end }} + {{- if eq .Values.volume.logs.type "hostPath" }} + - name: logs + hostPath: + path: /storage/logs/seaweedfs/volume + type: DirectoryOrCreate + {{- end }} + {{- if .Values.global.enableSecurity }} + - name: security-config + configMap: + name: {{ template "seaweedfs.name" . }}-security-config + - name: ca-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-ca-cert + - name: master-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-master-cert + - name: volume-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-volume-cert + - name: filer-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-filer-cert + - name: client-cert + secret: + secretName: {{ template "seaweedfs.name" . }}-client-cert + {{- end }} + {{- if .Values.volume.extraVolumes }} + {{ tpl .Values.volume.extraVolumes . | indent 8 | trim }} + {{- end }} + {{- end }} + {{- if .Values.volume.nodeSelector }} + nodeSelector: + {{ tpl .Values.volume.nodeSelector . | indent 8 | trim }} + {{- end }} + {{- $pvc_exists := include "volume.pvc_exists" . -}} + {{- if $pvc_exists }} + volumeClaimTemplates: + {{- if eq .Values.volume.data.type "persistentVolumeClaim"}} + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.volume.data.storageClass }} + resources: + requests: + storage: {{ .Values.volume.data.size }} + {{- end }} + {{- if and (eq .Values.volume.idx.type "persistentVolumeClaim") .Values.volume.dir_idx }} + - metadata: + name: idx + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.volume.idx.storageClass }} + resources: + requests: + storage: {{ .Values.volume.idx.size }} + {{- end }} + {{- if eq .Values.volume.logs.type "persistentVolumeClaim" }} + - metadata: + name: logs + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: {{ .Values.volume.logs.storageClass }} + resources: + requests: + storage: {{ .Values.volume.logs.size }} + {{- end }} + {{- end }} +{{- end }} diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml new file mode 100644 index 000000000..a4abaccf3 --- /dev/null +++ b/k8s/seaweedfs/values.yaml @@ -0,0 +1,418 @@ +# Available parameters and their default values for the SeaweedFS chart. + +global: + registry: "" + repository: "" + imageName: chrislusf/seaweedfs + # imageTag: "2.41" - started using {.Chart.appVersion} + imagePullPolicy: IfNotPresent + imagePullSecrets: imagepullsecret + restartPolicy: Always + loggingLevel: 1 + enableSecurity: false + monitoring: + enabled: false + gatewayHost: null + gatewayPort: null + # if enabled will use global.replicationPlacment and override master & filer defaultReplicaPlacement config + enableReplication: false + # replication type is XYZ: + # X number of replica in other data centers + # Y number of replica in other racks in the same data center + # Z number of replica in other servers in the same rack + replicationPlacment: "001" + extraEnvironmentVars: + WEED_CLUSTER_DEFAULT: "sw" + WEED_CLUSTER_SW_MASTER: "seaweedfs-master:9333" + WEED_CLUSTER_SW_FILER: "seaweedfs-filer-client:8888" + +image: + registry: "" + repository: "" + +master: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + replicas: 1 + port: 9333 + grpcPort: 19333 + ipBind: "0.0.0.0" + volumePreallocate: false + #Master stops directing writes to oversized volumes + volumeSizeLimitMB: 30000 + loggingOverrideLevel: null + #number of seconds between heartbeats, default 5 + pulseSeconds: null + #threshold to vacuum and reclaim spaces, default 0.3 (30%) + garbageThreshold: null + #Prometheus push interval in seconds, default 15 + metricsIntervalSec: 15 + # replication type is XYZ: + # X number of replica in other data centers + # Y number of replica in other racks in the same data center + # Z number of replica in other servers in the same rack + defaultReplication: "000" + + # Disable http request, only gRpc operations are allowed + disableHttp: false + + extraVolumes: "" + extraVolumeMounts: "" + + # storage and storageClass are the settings for configuring stateful + # storage for the master pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 25Gi + storageClass: null + + # Resource requests, limits, etc. for the master cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # updatePartition is used to control a careful rolling update of SeaweedFS + # masters. + updatePartition: 0 + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: master + topologyKey: kubernetes.io/hostname + + # Toleration Settings for master pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for master pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to master pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + extraEnvironmentVars: + WEED_MASTER_VOLUME_GROWTH_COPY_1: 7 + WEED_MASTER_VOLUME_GROWTH_COPY_2: 6 + WEED_MASTER_VOLUME_GROWTH_COPY_3: 3 + WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1 + +volume: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + port: 8080 + grpcPort: 18080 + metricsPort: 9327 + ipBind: "0.0.0.0" + replicas: 1 + loggingOverrideLevel: null + # number of seconds between heartbeats, must be smaller than or equal to the master's setting + pulseSeconds: null + # Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance., default memory + index: null + # limit file size to avoid out of memory, default 256mb + fileSizeLimitMB: null + # minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly + minFreeSpacePercent: 7 + +# can use ANY storage-class , example with local-path-provisner +# data: +# type: "persistentVolumeClaim" +# size: "24Ti" +# storageClass: "local-path-provisioner" + data: + type: "hostPath" + size: "" + storageClass: "" + idx: + type: "hostPath" + size: "" + storageClass: "" + + logs: + type: "hostPath" + size: "" + storageClass: "" + + # limit background compaction or copying speed in mega bytes per second + compactionMBps: "50" + + # Directories to store data files. dir[,dir]... (default "/tmp") + dir: "/data" + # Directories to store index files. dir[,dir]... (default "/tmp") + dir_idx: null + + # Maximum numbers of volumes, count[,count]... + # If set to zero on non-windows OS, the limit will be auto configured. (default "7") + maxVolumes: "0" + + # Volume server's rack name + rack: null + + # Volume server's data center name + dataCenter: null + + # Redirect moved or non-local volumes. (default true) + readRedirect: true + + # Comma separated Ip addresses having write permission. No limit if empty. + whiteList: null + + # Adjust jpg orientation when uploading. + imagesFixOrientation: false + + extraVolumes: "" + extraVolumeMounts: "" + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: volume + topologyKey: kubernetes.io/hostname + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-volume: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + +filer: + enabled: true + repository: null + imageName: null + imageTag: null + imageOverride: null + restartPolicy: null + replicas: 1 + port: 8888 + grpcPort: 18888 + metricsPort: 9327 + loggingOverrideLevel: null + # replication type is XYZ: + # X number of replica in other data centers + # Y number of replica in other racks in the same data center + # Z number of replica in other servers in the same rack + defaultReplicaPlacement: "000" + # turn off directory listing + disableDirListing: false + # split files larger than the limit, default 32 + maxMB: null + # encrypt data on volume servers + encryptVolumeData: false + # enable peers sync metadata, for leveldb (localdb for filer but with sync across) + enable_peers: false + + # Whether proxy or redirect to volume server during file GET request + redirectOnRead: false + + # Limit sub dir listing size (default 100000) + dirListLimit: 100000 + + # Turn off directory listing + disableDirListing: false + + # Disable http request, only gRpc operations are allowed + disableHttp: false + + # storage and storageClass are the settings for configuring stateful + # storage for the master pods. storage should be set to the disk size of + # the attached volume. storageClass is the class of storage which defaults + # to null (the Kube cluster will pick the default). + storage: 25Gi + storageClass: null + + extraVolumes: "" + extraVolumeMounts: "" + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: {{ template "seaweedfs.name" . }} + release: "{{ .Release.Name }}" + component: filer + topologyKey: kubernetes.io/hostname + + # updatePartition is used to control a careful rolling update of SeaweedFS + # masters. + updatePartition: 0 + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + # extraEnvVars is a list of extra enviroment variables to set with the stateful set. + extraEnvironmentVars: + WEED_MYSQL_ENABLED: "true" + WEED_MYSQL_HOSTNAME: "mysql-db-host" + WEED_MYSQL_PORT: "3306" + WEED_MYSQL_DATABASE: "sw_database" + WEED_MYSQL_CONNECTION_MAX_IDLE: "5" + WEED_MYSQL_CONNECTION_MAX_OPEN: "75" + # "refresh" connection every 10 minutes, eliminating mysql closing "old" connections + WEED_MYSQL_CONNECTION_MAX_LIFETIME_SECONDS: "600" + # enable usage of memsql as filer backend + WEED_MYSQL_INTERPOLATEPARAMS: "true" + WEED_LEVELDB2_ENABLED: "false" + # with http DELETE, by default the filer would check whether a folder is empty. + # recursive_delete will delete all sub folders and files, similar to "rm -Rf" + WEED_FILER_OPTIONS_RECURSIVE_DELETE: "false" + # directories under this folder will be automatically creating a separate bucket + WEED_FILER_BUCKETS_FOLDER: "/buckets" + + s3: + enabled: true + port: 8333 + #allow empty folders + allowEmptyFolder: false + # Suffix of the host name, {bucket}.{domainName} + domainName: "" + # enable user & permission to s3 (need to inject to all services) + enableAuth: false + skipAuthSecretCreation: false + +s3: + enabled: false + repository: null + imageName: null + imageTag: null + restartPolicy: null + replicas: 1 + port: 8333 + metricsPort: 9327 + loggingOverrideLevel: null + #allow empty folders + allowEmptyFolder: true + # enable user & permission to s3 (need to inject to all services) + enableAuth: false + skipAuthSecretCreation: false + + # Suffix of the host name, {bucket}.{domainName} + domainName: "" + + extraVolumes: "" + extraVolumeMounts: "" + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec, + # formatted as a multi-line string. By default no direct resource request + # is made. + resources: null + + # Toleration Settings for server pods + # This should be a multi-line string matching the Toleration array + # in a PodSpec. + tolerations: "" + + # nodeSelector labels for server pod assignment, formatted as a muli-line string. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: | + # beta.kubernetes.io/arch: amd64 + nodeSelector: | + sw-backend: "true" + + # used to assign priority to server pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + priorityClassName: "" + + logs: + type: "hostPath" + size: "" + storageClass: "" + +cronjob: + enabled: true + master: "seaweedfs-master:9333" + filer: "seaweedfs-filer-client:8888" + tolerations: "" + nodeSelector: | + sw-backend: "true" + replication: + enable: true + collectionPattern: "" + schedule: "*/7 * * * *" + resources: null + # balance all volumes among volume servers + # ALL|EACH_COLLECTION| + collection: "" + + +certificates: + commonName: "SeaweedFS CA" + ipAddresses: [] + keyAlgorithm: rsa + keySize: 2048 + duration: 2160h # 90d + renewBefore: 360h # 15d diff --git a/note/SeaweedFS_Architecture.png b/note/SeaweedFS_Architecture.png new file mode 100644 index 000000000..f960dc3e1 Binary files /dev/null and b/note/SeaweedFS_Architecture.png differ diff --git a/note/SeaweedFS_Cluster_Backup.png b/note/SeaweedFS_Cluster_Backup.png new file mode 100644 index 000000000..d8f2e19ff Binary files /dev/null and b/note/SeaweedFS_Cluster_Backup.png differ diff --git a/note/SeaweedFS_XDR.png b/note/SeaweedFS_XDR.png new file mode 100644 index 000000000..24e468a62 Binary files /dev/null and b/note/SeaweedFS_XDR.png differ diff --git a/note/shuguang.png b/note/shuguang.png new file mode 100644 index 000000000..54b6d0b6b Binary files /dev/null and b/note/shuguang.png differ diff --git a/other/java/client/pom.xml b/other/java/client/pom.xml index 0c585a941..f4e522a3e 100644 --- a/other/java/client/pom.xml +++ b/other/java/client/pom.xml @@ -1,10 +1,11 @@ - + 4.0.0 com.github.chrislusf seaweedfs-client - 1.2.4 + 1.6.4 org.sonatype.oss @@ -16,7 +17,7 @@ 3.9.1 1.23.0 - 28.0-jre + 30.0-jre @@ -64,9 +65,14 @@ junit junit - 4.12 + 4.13.1 test + + javax.annotation + javax.annotation-api + 1.3.2 + @@ -88,8 +94,8 @@ org.apache.maven.plugins maven-compiler-plugin - 7 - 7 + 8 + 8 @@ -97,9 +103,11 @@ protobuf-maven-plugin 0.6.1 - com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + grpc-java - io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + diff --git a/other/java/client/pom.xml.deploy b/other/java/client/pom.xml.deploy new file mode 100644 index 000000000..9c8c4f45e --- /dev/null +++ b/other/java/client/pom.xml.deploy @@ -0,0 +1,170 @@ + + + 4.0.0 + + com.github.chrislusf + seaweedfs-client + 1.6.4 + + + org.sonatype.oss + oss-parent + 9 + + + + 3.9.1 + + 1.23.0 + 28.0-jre + + + + + com.moandjiezana.toml + toml4j + 0.7.2 + + + + com.google.protobuf + protobuf-java + ${protobuf.version} + + + com.google.guava + guava + ${guava.version} + + + io.grpc + grpc-netty-shaded + ${grpc.version} + + + io.grpc + grpc-protobuf + ${grpc.version} + + + io.grpc + grpc-stub + ${grpc.version} + + + org.slf4j + slf4j-api + 1.7.25 + + + org.apache.httpcomponents + httpmime + 4.5.6 + + + junit + junit + 4.12 + test + + + + + + ossrh + https://oss.sonatype.org/content/repositories/snapshots + + + + + + kr.motd.maven + os-maven-plugin + 1.6.2 + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 8 + 8 + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + + grpc-java + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + + + + + + compile + compile-custom + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.5 + + + sign-artifacts + verify + + sign + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.6.7 + true + + ossrh + https://oss.sonatype.org/ + true + + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.9.1 + + + attach-javadocs + + jar + + + + + + + + diff --git a/other/java/client/pom_debug.xml b/other/java/client/pom_debug.xml new file mode 100644 index 000000000..12ea860c2 --- /dev/null +++ b/other/java/client/pom_debug.xml @@ -0,0 +1,144 @@ + + + 4.0.0 + + com.github.chrislusf + seaweedfs-client + 1.6.4 + + + org.sonatype.oss + oss-parent + 9 + + + + 3.9.1 + + 1.23.0 + 28.0-jre + + + + + com.moandjiezana.toml + toml4j + 0.7.2 + + + + com.google.protobuf + protobuf-java + ${protobuf.version} + + + com.google.guava + guava + ${guava.version} + + + io.grpc + grpc-netty-shaded + ${grpc.version} + + + io.grpc + grpc-protobuf + ${grpc.version} + + + io.grpc + grpc-stub + ${grpc.version} + + + org.slf4j + slf4j-api + 1.7.25 + + + org.apache.httpcomponents + httpmime + 4.5.6 + + + junit + junit + 4.13.1 + test + + + javax.annotation + javax.annotation-api + 1.3.2 + + + + + + + kr.motd.maven + os-maven-plugin + 1.6.2 + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 8 + 8 + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + + grpc-java + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + + + + + + compile + compile-custom + + + + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.9.1 + + + attach-javadocs + + jar + + + + + + + + diff --git a/other/java/client/src/main/java/seaweedfs/client/ByteBufferPool.java b/other/java/client/src/main/java/seaweedfs/client/ByteBufferPool.java new file mode 100644 index 000000000..51053becd --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/ByteBufferPool.java @@ -0,0 +1,42 @@ +package seaweedfs.client; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +public class ByteBufferPool { + + private static final int MIN_BUFFER_SIZE = 8 * 1024 * 1024; + private static final Logger LOG = LoggerFactory.getLogger(ByteBufferPool.class); + + private static final List bufferList = new ArrayList<>(); + + public static synchronized ByteBuffer request(int bufferSize) { + if (bufferSize < MIN_BUFFER_SIZE) { + bufferSize = MIN_BUFFER_SIZE; + } + LOG.debug("requested new buffer {}", bufferSize); + if (bufferList.isEmpty()) { + return ByteBuffer.allocate(bufferSize); + } + ByteBuffer buffer = bufferList.remove(bufferList.size() - 1); + if (buffer.capacity() >= bufferSize) { + return buffer; + } + + LOG.info("add new buffer from {} to {}", buffer.capacity(), bufferSize); + bufferList.add(0, buffer); + return ByteBuffer.allocate(bufferSize); + + } + + public static synchronized void release(ByteBuffer obj) { + ((Buffer)obj).clear(); + bufferList.add(0, obj); + } + +} diff --git a/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java b/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java new file mode 100644 index 000000000..58870d742 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/ChunkCache.java @@ -0,0 +1,36 @@ +package seaweedfs.client; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; + +import java.util.concurrent.TimeUnit; + +public class ChunkCache { + + private Cache cache = null; + + public ChunkCache(int maxEntries) { + if (maxEntries == 0) { + return; + } + this.cache = CacheBuilder.newBuilder() + .maximumSize(maxEntries) + .expireAfterAccess(1, TimeUnit.HOURS) + .build(); + } + + public byte[] getChunk(String fileId) { + if (this.cache == null) { + return null; + } + return this.cache.getIfPresent(fileId); + } + + public void setChunk(String fileId, byte[] data) { + if (this.cache == null) { + return; + } + this.cache.put(fileId, data); + } + +} diff --git a/other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java b/other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java new file mode 100644 index 000000000..9b6ba5dfc --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java @@ -0,0 +1,140 @@ +package seaweedfs.client; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class FileChunkManifest { + + private static final Logger LOG = LoggerFactory.getLogger(FileChunkManifest.class); + + private static final int mergeFactor = 1000; + + public static boolean hasChunkManifest(List chunks) { + for (FilerProto.FileChunk chunk : chunks) { + if (chunk.getIsChunkManifest()) { + return true; + } + } + return false; + } + + public static List resolveChunkManifest( + final FilerClient filerClient, List chunks) throws IOException { + + List dataChunks = new ArrayList<>(); + + for (FilerProto.FileChunk chunk : chunks) { + if (!chunk.getIsChunkManifest()) { + dataChunks.add(chunk); + continue; + } + + // IsChunkManifest + LOG.debug("fetching chunk manifest:{}", chunk); + byte[] data = fetchChunk(filerClient, chunk); + FilerProto.FileChunkManifest m = FilerProto.FileChunkManifest.newBuilder().mergeFrom(data).build(); + List resolvedChunks = new ArrayList<>(); + for (FilerProto.FileChunk t : m.getChunksList()) { + // avoid deprecated chunk.getFileId() + resolvedChunks.add(t.toBuilder().setFileId(FilerClient.toFileId(t.getFid())).build()); + } + dataChunks.addAll(resolveChunkManifest(filerClient, resolvedChunks)); + } + + return dataChunks; + } + + private static byte[] fetchChunk(final FilerClient filerClient, FilerProto.FileChunk chunk) throws IOException { + + String vid = "" + chunk.getFid().getVolumeId(); + FilerProto.Locations locations = filerClient.vidLocations.get(vid); + if (locations == null) { + FilerProto.LookupVolumeRequest.Builder lookupRequest = FilerProto.LookupVolumeRequest.newBuilder(); + lookupRequest.addVolumeIds(vid); + FilerProto.LookupVolumeResponse lookupResponse = filerClient + .getBlockingStub().lookupVolume(lookupRequest.build()); + locations = lookupResponse.getLocationsMapMap().get(vid); + filerClient.vidLocations.put(vid, locations); + LOG.debug("fetchChunk vid:{} locations:{}", vid, locations); + } + + SeaweedRead.ChunkView chunkView = new SeaweedRead.ChunkView( + FilerClient.toFileId(chunk.getFid()), // avoid deprecated chunk.getFileId() + 0, + -1, + 0, + true, + chunk.getCipherKey().toByteArray(), + chunk.getIsCompressed()); + + byte[] chunkData = SeaweedRead.chunkCache.getChunk(chunkView.fileId); + if (chunkData == null) { + LOG.debug("doFetchFullChunkData:{}", chunkView); + chunkData = SeaweedRead.doFetchFullChunkData(filerClient, chunkView, locations); + } + if (chunk.getIsChunkManifest()){ + LOG.debug("chunk {} size {}", chunkView.fileId, chunkData.length); + SeaweedRead.chunkCache.setChunk(chunkView.fileId, chunkData); + } + + return chunkData; + + } + + public static List maybeManifestize( + final FilerClient filerClient, List inputChunks, String parentDirectory) throws IOException { + // the return variable + List chunks = new ArrayList<>(); + + List dataChunks = new ArrayList<>(); + for (FilerProto.FileChunk chunk : inputChunks) { + if (!chunk.getIsChunkManifest()) { + dataChunks.add(chunk); + } else { + chunks.add(chunk); + } + } + + int remaining = dataChunks.size(); + for (int i = 0; i + mergeFactor < dataChunks.size(); i += mergeFactor) { + FilerProto.FileChunk chunk = mergeIntoManifest(filerClient, dataChunks.subList(i, i + mergeFactor), parentDirectory); + chunks.add(chunk); + remaining -= mergeFactor; + } + + // remaining + for (int i = dataChunks.size() - remaining; i < dataChunks.size(); i++) { + chunks.add(dataChunks.get(i)); + } + return chunks; + } + + private static FilerProto.FileChunk mergeIntoManifest(final FilerClient filerClient, List dataChunks, String parentDirectory) throws IOException { + // create and serialize the manifest + dataChunks = FilerClient.beforeEntrySerialization(dataChunks); + FilerProto.FileChunkManifest.Builder m = FilerProto.FileChunkManifest.newBuilder().addAllChunks(dataChunks); + byte[] data = m.build().toByteArray(); + + long minOffset = Long.MAX_VALUE; + long maxOffset = -1; + for (FilerProto.FileChunk chunk : dataChunks) { + minOffset = Math.min(minOffset, chunk.getOffset()); + maxOffset = Math.max(maxOffset, chunk.getSize() + chunk.getOffset()); + } + + FilerProto.FileChunk.Builder manifestChunk = SeaweedWrite.writeChunk( + filerClient.getReplication(), + filerClient, + minOffset, + data, 0, data.length, parentDirectory); + manifestChunk.setIsChunkManifest(true); + manifestChunk.setSize(maxOffset - minOffset); + return manifestChunk.build(); + + } + +} diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java index 84aa26ad9..257a9873d 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java @@ -1,27 +1,82 @@ package seaweedfs.client; +import com.google.common.base.Strings; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.nio.file.Path; -import java.nio.file.Paths; +import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; -public class FilerClient { +public class FilerClient extends FilerGrpcClient { private static final Logger LOG = LoggerFactory.getLogger(FilerClient.class); - private FilerGrpcClient filerGrpcClient; - public FilerClient(String host, int grpcPort) { - filerGrpcClient = new FilerGrpcClient(host, grpcPort); + super(host, grpcPort); + } + + public static String toFileId(FilerProto.FileId fid) { + if (fid == null) { + return null; + } + return String.format("%d,%x%08x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie()); + } + + public static FilerProto.FileId toFileIdObject(String fileIdStr) { + if (fileIdStr == null || fileIdStr.length() == 0) { + return null; + } + int commaIndex = fileIdStr.lastIndexOf(','); + String volumeIdStr = fileIdStr.substring(0, commaIndex); + String fileKeyStr = fileIdStr.substring(commaIndex + 1, fileIdStr.length() - 8); + String cookieStr = fileIdStr.substring(fileIdStr.length() - 8); + + return FilerProto.FileId.newBuilder() + .setVolumeId(Integer.parseInt(volumeIdStr)) + .setFileKey(Long.parseLong(fileKeyStr, 16)) + .setCookie((int) Long.parseLong(cookieStr, 16)) + .build(); + } + + public static List beforeEntrySerialization(List chunks) { + List cleanedChunks = new ArrayList<>(); + for (FilerProto.FileChunk chunk : chunks) { + FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder(); + chunkBuilder.clearFileId(); + chunkBuilder.clearSourceFileId(); + chunkBuilder.setFid(toFileIdObject(chunk.getFileId())); + FilerProto.FileId sourceFid = toFileIdObject(chunk.getSourceFileId()); + if (sourceFid != null) { + chunkBuilder.setSourceFid(sourceFid); + } + cleanedChunks.add(chunkBuilder.build()); + } + return cleanedChunks; } - public FilerClient(FilerGrpcClient filerGrpcClient) { - this.filerGrpcClient = filerGrpcClient; + public static FilerProto.Entry afterEntryDeserialization(FilerProto.Entry entry) { + if (entry.getChunksList().size() <= 0) { + return entry; + } + String fileId = entry.getChunks(0).getFileId(); + if (fileId != null && fileId.length() != 0) { + return entry; + } + FilerProto.Entry.Builder entryBuilder = entry.toBuilder(); + entryBuilder.clearChunks(); + for (FilerProto.FileChunk chunk : entry.getChunksList()) { + FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder(); + chunkBuilder.setFileId(toFileId(chunk.getFid())); + String sourceFileId = toFileId(chunk.getSourceFid()); + if (sourceFileId != null) { + chunkBuilder.setSourceFileId(sourceFileId); + } + entryBuilder.addChunks(chunkBuilder); + } + return entryBuilder.build(); } public boolean mkdirs(String path, int mode) { @@ -38,9 +93,9 @@ public class FilerClient { if ("/".equals(path)) { return true; } - Path pathObject = Paths.get(path); - String parent = pathObject.getParent().toString(); - String name = pathObject.getFileName().toString(); + File pathFile = new File(path); + String parent = pathFile.getParent().replace('\\','/'); + String name = pathFile.getName(); mkdirs(parent, mode, uid, gid, userName, groupNames); @@ -59,13 +114,13 @@ public class FilerClient { public boolean mv(String oldPath, String newPath) { - Path oldPathObject = Paths.get(oldPath); - String oldParent = oldPathObject.getParent().toString(); - String oldName = oldPathObject.getFileName().toString(); + File oldPathFile = new File(oldPath); + String oldParent = oldPathFile.getParent().replace('\\','/'); + String oldName = oldPathFile.getName(); - Path newPathObject = Paths.get(newPath); - String newParent = newPathObject.getParent().toString(); - String newName = newPathObject.getFileName().toString(); + File newPathFile = new File(newPath); + String newParent = newPathFile.getParent().replace('\\','/'); + String newName = newPathFile.getName(); return atomicRenameEntry(oldParent, oldName, newParent, newName); @@ -73,9 +128,9 @@ public class FilerClient { public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) { - Path pathObject = Paths.get(path); - String parent = pathObject.getParent().toString(); - String name = pathObject.getFileName().toString(); + File pathFile = new File(path); + String parent = pathFile.getParent().replace('\\','/'); + String name = pathFile.getName(); return deleteEntry( parent, @@ -92,9 +147,9 @@ public class FilerClient { public boolean touch(String path, int mode, int uid, int gid, String userName, String[] groupNames) { - Path pathObject = Paths.get(path); - String parent = pathObject.getParent().toString(); - String name = pathObject.getFileName().toString(); + File pathFile = new File(path); + String parent = pathFile.getParent().replace('\\','/'); + String name = pathFile.getName(); FilerProto.Entry entry = lookupEntry(parent, name); if (entry == null) { @@ -156,7 +211,7 @@ public class FilerClient { List results = new ArrayList(); String lastFileName = ""; for (int limit = Integer.MAX_VALUE; limit > 0; ) { - List t = listEntries(path, "", lastFileName, 1024); + List t = listEntries(path, "", lastFileName, 1024, false); if (t == null) { break; } @@ -173,31 +228,35 @@ public class FilerClient { return results; } - public List listEntries(String path, String entryPrefix, String lastEntryName, int limit) { - Iterator iter = filerGrpcClient.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() + public List listEntries(String path, String entryPrefix, String lastEntryName, int limit, boolean includeLastEntry) { + Iterator iter = this.getBlockingStub().listEntries(FilerProto.ListEntriesRequest.newBuilder() .setDirectory(path) .setPrefix(entryPrefix) .setStartFromFileName(lastEntryName) + .setInclusiveStartFrom(includeLastEntry) .setLimit(limit) .build()); List entries = new ArrayList<>(); - while (iter.hasNext()){ + while (iter.hasNext()) { FilerProto.ListEntriesResponse resp = iter.next(); - entries.add(fixEntryAfterReading(resp.getEntry())); + entries.add(afterEntryDeserialization(resp.getEntry())); } return entries; } public FilerProto.Entry lookupEntry(String directory, String entryName) { try { - FilerProto.Entry entry = filerGrpcClient.getBlockingStub().lookupDirectoryEntry( + FilerProto.Entry entry = this.getBlockingStub().lookupDirectoryEntry( FilerProto.LookupDirectoryEntryRequest.newBuilder() .setDirectory(directory) .setName(entryName) .build()).getEntry(); - return fixEntryAfterReading(entry); + if (entry == null) { + return null; + } + return afterEntryDeserialization(entry); } catch (Exception e) { - if (e.getMessage().indexOf("filer: no entry is found in filer store")>0){ + if (e.getMessage().indexOf("filer: no entry is found in filer store") > 0) { return null; } LOG.warn("lookupEntry {}/{}: {}", directory, entryName, e); @@ -205,28 +264,32 @@ public class FilerClient { } } - public boolean createEntry(String parent, FilerProto.Entry entry) { try { - filerGrpcClient.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder() - .setDirectory(parent) - .setEntry(entry) - .build()); + FilerProto.CreateEntryResponse createEntryResponse = + this.getBlockingStub().createEntry(FilerProto.CreateEntryRequest.newBuilder() + .setDirectory(parent) + .setEntry(entry) + .build()); + if (Strings.isNullOrEmpty(createEntryResponse.getError())) { + return true; + } + LOG.warn("createEntry {}/{} error: {}", parent, entry.getName(), createEntryResponse.getError()); + return false; } catch (Exception e) { LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e); return false; } - return true; } public boolean updateEntry(String parent, FilerProto.Entry entry) { try { - filerGrpcClient.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder() + this.getBlockingStub().updateEntry(FilerProto.UpdateEntryRequest.newBuilder() .setDirectory(parent) .setEntry(entry) .build()); } catch (Exception e) { - LOG.warn("createEntry {}/{}: {}", parent, entry.getName(), e); + LOG.warn("updateEntry {}/{}: {}", parent, entry.getName(), e); return false; } return true; @@ -234,7 +297,7 @@ public class FilerClient { public boolean deleteEntry(String parent, String entryName, boolean isDeleteFileChunk, boolean isRecursive, boolean ignoreRecusiveError) { try { - filerGrpcClient.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder() + this.getBlockingStub().deleteEntry(FilerProto.DeleteEntryRequest.newBuilder() .setDirectory(parent) .setName(entryName) .setIsDeleteData(isDeleteFileChunk) @@ -250,7 +313,7 @@ public class FilerClient { public boolean atomicRenameEntry(String oldParent, String oldName, String newParent, String newName) { try { - filerGrpcClient.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder() + this.getBlockingStub().atomicRenameEntry(FilerProto.AtomicRenameEntryRequest.newBuilder() .setOldDirectory(oldParent) .setOldName(oldName) .setNewDirectory(newParent) @@ -263,24 +326,13 @@ public class FilerClient { return true; } - private FilerProto.Entry fixEntryAfterReading(FilerProto.Entry entry) { - if (entry.getChunksList().size() <= 0) { - return entry; - } - String fileId = entry.getChunks(0).getFileId(); - if (fileId != null && fileId.length() != 0) { - return entry; - } - FilerProto.Entry.Builder entryBuilder = entry.toBuilder(); - entryBuilder.clearChunks(); - for (FilerProto.FileChunk chunk : entry.getChunksList()) { - FilerProto.FileChunk.Builder chunkBuilder = chunk.toBuilder(); - FilerProto.FileId fid = chunk.getFid(); - fileId = String.format("%d,%d%x", fid.getVolumeId(), fid.getFileKey(), fid.getCookie()); - chunkBuilder.setFileId(fileId); - entryBuilder.addChunks(chunkBuilder); - } - return entryBuilder.build(); + public Iterator watch(String prefix, String clientName, long sinceNs) { + return this.getBlockingStub().subscribeMetadata(FilerProto.SubscribeMetadataRequest.newBuilder() + .setPathPrefix(prefix) + .setClientName(clientName) + .setSinceNs(sinceNs) + .build() + ); } } diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java index 3626c76de..6c57e2e0d 100644 --- a/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java +++ b/other/java/client/src/main/java/seaweedfs/client/FilerGrpcClient.java @@ -9,17 +9,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.net.ssl.SSLException; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.TimeUnit; public class FilerGrpcClient { private static final Logger logger = LoggerFactory.getLogger(FilerGrpcClient.class); - - private final ManagedChannel channel; - private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub; - private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub; - private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub; - static SslContext sslContext; static { @@ -30,6 +26,20 @@ public class FilerGrpcClient { } } + public final int VOLUME_SERVER_ACCESS_DIRECT = 0; + public final int VOLUME_SERVER_ACCESS_PUBLIC_URL = 1; + public final int VOLUME_SERVER_ACCESS_FILER_PROXY = 2; + public final Map vidLocations = new HashMap<>(); + private final ManagedChannel channel; + private final SeaweedFilerGrpc.SeaweedFilerBlockingStub blockingStub; + private final SeaweedFilerGrpc.SeaweedFilerStub asyncStub; + private final SeaweedFilerGrpc.SeaweedFilerFutureStub futureStub; + private boolean cipher = false; + private String collection = ""; + private String replication = ""; + private int volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT; + private String filerAddress; + public FilerGrpcClient(String host, int grpcPort) { this(host, grpcPort, sslContext); } @@ -37,20 +47,43 @@ public class FilerGrpcClient { public FilerGrpcClient(String host, int grpcPort, SslContext sslContext) { this(sslContext == null ? - ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext() : + ManagedChannelBuilder.forAddress(host, grpcPort).usePlaintext() + .maxInboundMessageSize(1024 * 1024 * 1024) : NettyChannelBuilder.forAddress(host, grpcPort) + .maxInboundMessageSize(1024 * 1024 * 1024) .negotiationType(NegotiationType.TLS) .sslContext(sslContext)); + filerAddress = String.format("%s:%d", host, grpcPort - 10000); + + FilerProto.GetFilerConfigurationResponse filerConfigurationResponse = + this.getBlockingStub().getFilerConfiguration( + FilerProto.GetFilerConfigurationRequest.newBuilder().build()); + cipher = filerConfigurationResponse.getCipher(); + collection = filerConfigurationResponse.getCollection(); + replication = filerConfigurationResponse.getReplication(); + } - public FilerGrpcClient(ManagedChannelBuilder channelBuilder) { + private FilerGrpcClient(ManagedChannelBuilder channelBuilder) { channel = channelBuilder.build(); blockingStub = SeaweedFilerGrpc.newBlockingStub(channel); asyncStub = SeaweedFilerGrpc.newStub(channel); futureStub = SeaweedFilerGrpc.newFutureStub(channel); } + public boolean isCipher() { + return cipher; + } + + public String getCollection() { + return collection; + } + + public String getReplication() { + return replication; + } + public void shutdown() throws InterruptedException { channel.shutdown().awaitTermination(5, TimeUnit.SECONDS); } @@ -67,4 +100,39 @@ public class FilerGrpcClient { return futureStub; } + public void setAccessVolumeServerDirectly() { + this.volumeServerAccess = VOLUME_SERVER_ACCESS_DIRECT; + } + + public boolean isAccessVolumeServerDirectly() { + return this.volumeServerAccess == VOLUME_SERVER_ACCESS_DIRECT; + } + + public void setAccessVolumeServerByPublicUrl() { + this.volumeServerAccess = VOLUME_SERVER_ACCESS_PUBLIC_URL; + } + + public boolean isAccessVolumeServerByPublicUrl() { + return this.volumeServerAccess == VOLUME_SERVER_ACCESS_PUBLIC_URL; + } + + public void setAccessVolumeServerByFilerProxy() { + this.volumeServerAccess = VOLUME_SERVER_ACCESS_FILER_PROXY; + } + + public boolean isAccessVolumeServerByFilerProxy() { + return this.volumeServerAccess == VOLUME_SERVER_ACCESS_FILER_PROXY; + } + + public String getChunkUrl(String chunkId, String url, String publicUrl) { + switch (this.volumeServerAccess) { + case VOLUME_SERVER_ACCESS_PUBLIC_URL: + return String.format("http://%s/%s", publicUrl, chunkId); + case VOLUME_SERVER_ACCESS_FILER_PROXY: + return String.format("http://%s/?proxyChunkId=%s", this.filerAddress, chunkId); + default: + return String.format("http://%s/%s", url, chunkId); + } + } + } diff --git a/other/java/client/src/main/java/seaweedfs/client/Gzip.java b/other/java/client/src/main/java/seaweedfs/client/Gzip.java new file mode 100644 index 000000000..4909094f5 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/Gzip.java @@ -0,0 +1,41 @@ +package seaweedfs.client; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +public class Gzip { + public static byte[] compress(byte[] data) throws IOException { + ByteArrayOutputStream bos = new ByteArrayOutputStream(data.length); + GZIPOutputStream gzip = new GZIPOutputStream(bos); + gzip.write(data); + gzip.close(); + byte[] compressed = bos.toByteArray(); + bos.close(); + return compressed; + } + + public static byte[] decompress(byte[] compressed) { + try { + ByteArrayInputStream bis = new ByteArrayInputStream(compressed); + GZIPInputStream gis = new GZIPInputStream(bis); + return readAll(gis); + } catch (Exception e) { + return compressed; + } + } + + private static byte[] readAll(InputStream input) throws IOException { + try (ByteArrayOutputStream output = new ByteArrayOutputStream()) { + byte[] buffer = new byte[4096]; + int n; + while (-1 != (n = input.read(buffer))) { + output.write(buffer, 0, n); + } + return output.toByteArray(); + } + } +} diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java new file mode 100644 index 000000000..8d0ebd755 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedCipher.java @@ -0,0 +1,55 @@ +package seaweedfs.client; + +import javax.crypto.Cipher; +import javax.crypto.spec.GCMParameterSpec; +import javax.crypto.spec.SecretKeySpec; +import java.security.SecureRandom; + +public class SeaweedCipher { + // AES-GCM parameters + public static final int AES_KEY_SIZE = 256; // in bits + public static final int GCM_NONCE_LENGTH = 12; // in bytes + public static final int GCM_TAG_LENGTH = 16; // in bytes + + private static SecureRandom random = new SecureRandom(); + + public static byte[] genCipherKey() throws Exception { + byte[] key = new byte[AES_KEY_SIZE / 8]; + random.nextBytes(key); + return key; + } + + public static byte[] encrypt(byte[] clearTextbytes, byte[] cipherKey) throws Exception { + return encrypt(clearTextbytes, 0, clearTextbytes.length, cipherKey); + } + + public static byte[] encrypt(byte[] clearTextbytes, int offset, int length, byte[] cipherKey) throws Exception { + + final byte[] nonce = new byte[GCM_NONCE_LENGTH]; + random.nextBytes(nonce); + GCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, nonce); + SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES"); + + Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding"); + AES_cipherInstance.init(Cipher.ENCRYPT_MODE, keySpec, spec); + + byte[] encryptedText = AES_cipherInstance.doFinal(clearTextbytes, offset, length); + + byte[] iv = AES_cipherInstance.getIV(); + byte[] message = new byte[GCM_NONCE_LENGTH + clearTextbytes.length + GCM_TAG_LENGTH]; + System.arraycopy(iv, 0, message, 0, GCM_NONCE_LENGTH); + System.arraycopy(encryptedText, 0, message, GCM_NONCE_LENGTH, encryptedText.length); + + return message; + } + + public static byte[] decrypt(byte[] encryptedText, byte[] cipherKey) throws Exception { + final Cipher AES_cipherInstance = Cipher.getInstance("AES/GCM/NoPadding"); + GCMParameterSpec params = new GCMParameterSpec(GCM_TAG_LENGTH * 8, encryptedText, 0, GCM_NONCE_LENGTH); + SecretKeySpec keySpec = new SecretKeySpec(cipherKey, "AES"); + AES_cipherInstance.init(Cipher.DECRYPT_MODE, keySpec, params); + byte[] decryptedText = AES_cipherInstance.doFinal(encryptedText, GCM_NONCE_LENGTH, encryptedText.length - GCM_NONCE_LENGTH); + return decryptedText; + } + +} diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java new file mode 100644 index 000000000..4e40ce1b6 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java @@ -0,0 +1,208 @@ +package seaweedfs.client; + +// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.List; + +public class SeaweedInputStream extends InputStream { + + private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class); + private static final IOException EXCEPTION_STREAM_IS_CLOSED = new IOException("Stream is closed!"); + + private final FilerClient filerClient; + private final String path; + private final FilerProto.Entry entry; + private final List visibleIntervalList; + private final long contentLength; + + private long position = 0; // cursor of the file + + private boolean closed = false; + + public SeaweedInputStream( + final FilerClient filerClient, + final String fullpath) throws IOException { + this.path = fullpath; + this.filerClient = filerClient; + this.entry = filerClient.lookupEntry( + SeaweedOutputStream.getParentDirectory(fullpath), + SeaweedOutputStream.getFileName(fullpath)); + this.contentLength = SeaweedRead.fileSize(entry); + + this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList()); + + LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList); + + } + + public SeaweedInputStream( + final FilerClient filerClient, + final String path, + final FilerProto.Entry entry) throws IOException { + this.filerClient = filerClient; + this.path = path; + this.entry = entry; + this.contentLength = SeaweedRead.fileSize(entry); + + this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList()); + + LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList); + + } + + public String getPath() { + return path; + } + + @Override + public int read() throws IOException { + byte[] b = new byte[1]; + int numberOfBytesRead = read(b, 0, 1); + if (numberOfBytesRead < 0) { + return -1; + } else { + return (b[0] & 0xFF); + } + } + + @Override + public int read(final byte[] b, final int off, final int len) throws IOException { + + if (b == null) { + throw new IllegalArgumentException("null byte array passed in to read() method"); + } + if (off >= b.length) { + throw new IllegalArgumentException("offset greater than length of array"); + } + if (len < 0) { + throw new IllegalArgumentException("requested read length is less than zero"); + } + if (len > (b.length - off)) { + throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer"); + } + + ByteBuffer buf = ByteBuffer.wrap(b, off, len); + return read(buf); + + } + + // implement ByteBufferReadable + public synchronized int read(ByteBuffer buf) throws IOException { + + if (position < 0) { + throw new IllegalArgumentException("attempting to read from negative offset"); + } + if (position >= contentLength) { + return -1; // Hadoop prefers -1 to EOFException + } + + long bytesRead = 0; + int len = buf.remaining(); + int start = (int) this.position; + if (start+len <= entry.getContent().size()) { + entry.getContent().substring(start, start+len).copyTo(buf); + } else { + bytesRead = SeaweedRead.read(this.filerClient, this.visibleIntervalList, this.position, buf, SeaweedRead.fileSize(entry)); + } + + if (bytesRead > Integer.MAX_VALUE) { + throw new IOException("Unexpected Content-Length"); + } + + if (bytesRead > 0) { + this.position += bytesRead; + } + + return (int) bytesRead; + } + + public synchronized void seek(long n) throws IOException { + if (closed) { + throw EXCEPTION_STREAM_IS_CLOSED; + } + if (n < 0) { + throw new EOFException("Cannot seek to a negative offset"); + } + if (n > contentLength) { + throw new EOFException("Attempted to seek or read past the end of the file"); + } + this.position = n; + } + + @Override + public synchronized long skip(long n) throws IOException { + if (closed) { + throw EXCEPTION_STREAM_IS_CLOSED; + } + if (this.position == contentLength) { + if (n > 0) { + throw new EOFException("Attempted to seek or read past the end of the file"); + } + } + long newPos = this.position + n; + if (newPos < 0) { + newPos = 0; + n = newPos - this.position; + } + if (newPos > contentLength) { + newPos = contentLength; + n = newPos - this.position; + } + seek(newPos); + return n; + } + + /** + * Return the size of the remaining available bytes + * if the size is less than or equal to {@link Integer#MAX_VALUE}, + * otherwise, return {@link Integer#MAX_VALUE}. + *

+ * This is to match the behavior of DFSInputStream.available(), + * which some clients may rely on (HBase write-ahead log reading in + * particular). + */ + @Override + public synchronized int available() throws IOException { + if (closed) { + throw EXCEPTION_STREAM_IS_CLOSED; + } + final long remaining = this.contentLength - this.position; + return remaining <= Integer.MAX_VALUE + ? (int) remaining : Integer.MAX_VALUE; + } + + /** + * Returns the length of the file that this stream refers to. Note that the length returned is the length + * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file, + * they wont be reflected in the returned length. + * + * @return length of the file. + * @throws IOException if the stream is closed + */ + public long length() throws IOException { + if (closed) { + throw EXCEPTION_STREAM_IS_CLOSED; + } + return contentLength; + } + + public synchronized long getPos() throws IOException { + if (closed) { + throw EXCEPTION_STREAM_IS_CLOSED; + } + return position; + } + + @Override + public synchronized void close() throws IOException { + closed = true; + } + +} diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java similarity index 56% rename from other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java rename to other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java index 7b488a5da..ba298a713 100644 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedOutputStream.java @@ -1,48 +1,50 @@ -package seaweed.hdfs; +package seaweedfs.client; // adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.fs.Path; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import seaweedfs.client.FilerGrpcClient; -import seaweedfs.client.FilerProto; -import seaweedfs.client.SeaweedWrite; import java.io.IOException; import java.io.InterruptedIOException; import java.io.OutputStream; +import java.nio.Buffer; +import java.nio.ByteBuffer; import java.util.concurrent.*; -import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory; - public class SeaweedOutputStream extends OutputStream { private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class); - - private final FilerGrpcClient filerGrpcClient; - private final Path path; + protected final boolean supportFlush = true; + private final FilerClient filerClient; + private final String path; private final int bufferSize; private final int maxConcurrentRequestCount; private final ThreadPoolExecutor threadExecutor; private final ExecutorCompletionService completionService; + private final ConcurrentLinkedDeque writeOperations; + private final boolean shouldSaveMetadata = false; private FilerProto.Entry.Builder entry; private long position; private boolean closed; - private boolean supportFlush = true; private volatile IOException lastError; private long lastFlushOffset; private long lastTotalAppendOffset = 0; - private byte[] buffer; - private int bufferIndex; - private ConcurrentLinkedDeque writeOperations; + private ByteBuffer buffer; + private long outputIndex; private String replication = "000"; - public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry, + public SeaweedOutputStream(FilerClient filerClient, final String fullpath) { + this(filerClient, fullpath, "000"); + } + + public SeaweedOutputStream(FilerClient filerClient, final String fullpath, final String replication) { + this(filerClient, fullpath, null, 0, 8 * 1024 * 1024, "000"); + } + + public SeaweedOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry, final long position, final int bufferSize, final String replication) { - this.filerGrpcClient = filerGrpcClient; + this.filerClient = filerClient; this.replication = replication; this.path = path; this.position = position; @@ -50,30 +52,65 @@ public class SeaweedOutputStream extends OutputStream { this.lastError = null; this.lastFlushOffset = 0; this.bufferSize = bufferSize; - this.buffer = new byte[bufferSize]; - this.bufferIndex = 0; + this.buffer = ByteBufferPool.request(bufferSize); + this.outputIndex = 0; this.writeOperations = new ConcurrentLinkedDeque<>(); - this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors(); + this.maxConcurrentRequestCount = Runtime.getRuntime().availableProcessors(); this.threadExecutor - = new ThreadPoolExecutor(maxConcurrentRequestCount, - maxConcurrentRequestCount, - 10L, - TimeUnit.SECONDS, - new LinkedBlockingQueue()); + = new ThreadPoolExecutor(maxConcurrentRequestCount, + maxConcurrentRequestCount, + 120L, + TimeUnit.SECONDS, + new LinkedBlockingQueue()); this.completionService = new ExecutorCompletionService<>(this.threadExecutor); this.entry = entry; + if (this.entry == null) { + long now = System.currentTimeMillis() / 1000L; + + this.entry = FilerProto.Entry.newBuilder() + .setName(getFileName(path)) + .setIsDirectory(false) + .setAttributes(FilerProto.FuseAttributes.newBuilder() + .setFileMode(0755) + .setReplication(replication) + .setCrtime(now) + .setMtime(now) + .clearGroupName() + ); + } } - private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException { + public static String getParentDirectory(String path) { + int protoIndex = path.indexOf("://"); + if (protoIndex >= 0) { + int pathStart = path.indexOf("/", protoIndex+3); + path = path.substring(pathStart); + } + if (path.equals("/")) { + return path; + } + int lastSlashIndex = path.lastIndexOf("/"); + if (lastSlashIndex == 0) { + return "/"; + } + return path.substring(0, lastSlashIndex); + } - LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry); + public static String getFileName(String path) { + if (path.indexOf("/") < 0) { + return path; + } + int lastSlashIndex = path.lastIndexOf("/"); + return path.substring(lastSlashIndex + 1); + } + private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException { try { - SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry); + SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry); } catch (Exception ex) { throw new IOException(ex); } @@ -87,34 +124,40 @@ public class SeaweedOutputStream extends OutputStream { @Override public synchronized void write(final byte[] data, final int off, final int length) - throws IOException { + throws IOException { maybeThrowLastError(); - Preconditions.checkArgument(data != null, "null data"); + if (data == null) { + return; + } if (off < 0 || length < 0 || length > data.length - off) { throw new IndexOutOfBoundsException(); } + // System.out.println(path + " write [" + (outputIndex + off) + "," + ((outputIndex + off) + length) + ")"); + int currentOffset = off; - int writableBytes = bufferSize - bufferIndex; + int writableBytes = bufferSize - buffer.position(); int numberOfBytesToWrite = length; while (numberOfBytesToWrite > 0) { - if (writableBytes <= numberOfBytesToWrite) { - System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes); - bufferIndex += writableBytes; - writeCurrentBufferToService(); - currentOffset += writableBytes; - numberOfBytesToWrite = numberOfBytesToWrite - writableBytes; - } else { - System.arraycopy(data, currentOffset, buffer, bufferIndex, numberOfBytesToWrite); - bufferIndex += numberOfBytesToWrite; - numberOfBytesToWrite = 0; + + if (numberOfBytesToWrite < writableBytes) { + buffer.put(data, currentOffset, numberOfBytesToWrite); + outputIndex += numberOfBytesToWrite; + break; } - writableBytes = bufferSize - bufferIndex; + // System.out.println(path + " [" + (outputIndex + currentOffset) + "," + ((outputIndex + currentOffset) + writableBytes) + ") " + buffer.capacity()); + buffer.put(data, currentOffset, writableBytes); + outputIndex += writableBytes; + currentOffset += writableBytes; + writeCurrentBufferToService(); + numberOfBytesToWrite = numberOfBytesToWrite - writableBytes; + writableBytes = bufferSize - buffer.position(); } + } /** @@ -149,47 +192,53 @@ public class SeaweedOutputStream extends OutputStream { flushInternal(); threadExecutor.shutdown(); } finally { - lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED); + lastError = new IOException("Stream is closed!"); + ByteBufferPool.release(buffer); buffer = null; - bufferIndex = 0; + outputIndex = 0; closed = true; writeOperations.clear(); if (!threadExecutor.isShutdown()) { threadExecutor.shutdownNow(); } } + } private synchronized void writeCurrentBufferToService() throws IOException { - if (bufferIndex == 0) { + if (buffer.position() == 0) { return; } - final byte[] bytes = buffer; - final int bytesLength = bufferIndex; + position += submitWriteBufferToService(buffer, position); - buffer = new byte[bufferSize]; - bufferIndex = 0; - final long offset = position; - position += bytesLength; + buffer = ByteBufferPool.request(bufferSize); + + } - if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) { + private synchronized int submitWriteBufferToService(final ByteBuffer bufferToWrite, final long writePosition) throws IOException { + + ((Buffer)bufferToWrite).flip(); + int bytesLength = bufferToWrite.limit() - bufferToWrite.position(); + + if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount) { waitForTaskToComplete(); } - - final Future job = completionService.submit(new Callable() { - @Override - public Void call() throws Exception { - // originally: client.append(path, offset, bytes, 0, bytesLength); - SeaweedWrite.writeData(entry, replication, filerGrpcClient, offset, bytes, 0, bytesLength); - return null; - } + final Future job = completionService.submit(() -> { + // System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")"); + SeaweedWrite.writeData(entry, replication, filerClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), path); + // System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")"); + ByteBufferPool.release(bufferToWrite); + return null; }); - writeOperations.add(new WriteOperation(job, offset, bytesLength)); + writeOperations.add(new WriteOperation(job, writePosition, bytesLength)); // Try to shrink the queue shrinkWriteOperationQueue(); + + return bytesLength; + } private void waitForTaskToComplete() throws IOException { @@ -231,13 +280,13 @@ public class SeaweedOutputStream extends OutputStream { } } - private synchronized void flushInternal() throws IOException { + protected synchronized void flushInternal() throws IOException { maybeThrowLastError(); writeCurrentBufferToService(); flushWrittenBytesToService(); } - private synchronized void flushInternalAsync() throws IOException { + protected synchronized void flushInternalAsync() throws IOException { maybeThrowLastError(); writeCurrentBufferToService(); flushWrittenBytesToServiceAsync(); @@ -270,10 +319,6 @@ public class SeaweedOutputStream extends OutputStream { private final long length; WriteOperation(final Future task, final long startOffset, final long length) { - Preconditions.checkNotNull(task, "task"); - Preconditions.checkArgument(startOffset >= 0, "startOffset"); - Preconditions.checkArgument(length >= 0, "length"); - this.task = task; this.startOffset = startOffset; this.length = length; diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java index 2efa64580..384636601 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java @@ -1,88 +1,200 @@ package seaweedfs.client; +import org.apache.http.Header; +import org.apache.http.HeaderElement; import org.apache.http.HttpEntity; import org.apache.http.HttpHeaders; -import org.apache.http.HttpResponse; -import org.apache.http.client.HttpClient; +import org.apache.http.client.entity.GzipDecompressingEntity; +import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; -import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.util.EntityUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import java.io.Closeable; import java.io.IOException; -import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.*; public class SeaweedRead { - // private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class); + private static final Logger LOG = LoggerFactory.getLogger(SeaweedRead.class); + + static ChunkCache chunkCache = new ChunkCache(4); + static VolumeIdCache volumeIdCache = new VolumeIdCache(4 * 1024); // returns bytesRead - public static long read(FilerGrpcClient filerGrpcClient, List visibleIntervals, - final long position, final byte[] buffer, final int bufferOffset, - final int bufferLength) throws IOException { + public static long read(FilerClient filerClient, List visibleIntervals, + final long position, final ByteBuffer buf, final long fileSize) throws IOException { + + List chunkViews = viewFromVisibles(visibleIntervals, position, buf.remaining()); - List chunkViews = viewFromVisibles(visibleIntervals, position, bufferLength); + Map knownLocations = new HashMap<>(); FilerProto.LookupVolumeRequest.Builder lookupRequest = FilerProto.LookupVolumeRequest.newBuilder(); for (ChunkView chunkView : chunkViews) { String vid = parseVolumeId(chunkView.fileId); - lookupRequest.addVolumeIds(vid); + FilerProto.Locations locations = volumeIdCache.getLocations(vid); + if (locations == null) { + lookupRequest.addVolumeIds(vid); + } else { + knownLocations.put(vid, locations); + } } - FilerProto.LookupVolumeResponse lookupResponse = filerGrpcClient - .getBlockingStub().lookupVolume(lookupRequest.build()); - - Map vid2Locations = lookupResponse.getLocationsMapMap(); + if (lookupRequest.getVolumeIdsCount() > 0) { + FilerProto.LookupVolumeResponse lookupResponse = filerClient + .getBlockingStub().lookupVolume(lookupRequest.build()); + Map vid2Locations = lookupResponse.getLocationsMapMap(); + for (Map.Entry entry : vid2Locations.entrySet()) { + volumeIdCache.setLocations(entry.getKey(), entry.getValue()); + knownLocations.put(entry.getKey(), entry.getValue()); + } + } //TODO parallel this long readCount = 0; - int startOffset = bufferOffset; + long startOffset = position; for (ChunkView chunkView : chunkViews) { - FilerProto.Locations locations = vid2Locations.get(parseVolumeId(chunkView.fileId)); - if (locations.getLocationsCount() == 0) { + + if (startOffset < chunkView.logicOffset) { + long gap = chunkView.logicOffset - startOffset; + LOG.debug("zero [{},{})", startOffset, startOffset + gap); + buf.position(buf.position()+ (int)gap); + readCount += gap; + startOffset += gap; + } + + FilerProto.Locations locations = knownLocations.get(parseVolumeId(chunkView.fileId)); + if (locations == null || locations.getLocationsCount() == 0) { + LOG.error("failed to locate {}", chunkView.fileId); // log here! return 0; } - int len = readChunkView(position, buffer, startOffset, chunkView, locations); + int len = readChunkView(filerClient, startOffset, buf, chunkView, locations); + + LOG.debug("read [{},{}) {} size {}", startOffset, startOffset + len, chunkView.fileId, chunkView.size); readCount += len; startOffset += len; } + long limit = Math.min(buf.limit(), fileSize); + + if (startOffset < limit) { + long gap = limit - startOffset; + LOG.debug("zero2 [{},{})", startOffset, startOffset + gap); + buf.position(buf.position()+ (int)gap); + readCount += gap; + startOffset += gap; + } + return readCount; } - private static int readChunkView(long position, byte[] buffer, int startOffset, ChunkView chunkView, FilerProto.Locations locations) throws IOException { - HttpClient client = new DefaultHttpClient(); - HttpGet request = new HttpGet( - String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId)); + private static int readChunkView(FilerClient filerClient, long startOffset, ByteBuffer buf, ChunkView chunkView, FilerProto.Locations locations) throws IOException { + + byte[] chunkData = chunkCache.getChunk(chunkView.fileId); + + if (chunkData == null) { + chunkData = doFetchFullChunkData(filerClient, chunkView, locations); + chunkCache.setChunk(chunkView.fileId, chunkData); + } + + int len = (int) chunkView.size; + LOG.debug("readChunkView fid:{} chunkData.length:{} chunkView.offset:{} chunkView[{};{}) startOffset:{}", + chunkView.fileId, chunkData.length, chunkView.offset, chunkView.logicOffset, chunkView.logicOffset + chunkView.size, startOffset); + buf.put(chunkData, (int) (startOffset - chunkView.logicOffset + chunkView.offset), len); + + return len; + } + + public static byte[] doFetchFullChunkData(FilerClient filerClient, ChunkView chunkView, FilerProto.Locations locations) throws IOException { + + byte[] data = null; + IOException lastException = null; + for (long waitTime = 1000L; waitTime < 10 * 1000; waitTime += waitTime / 2) { + for (FilerProto.Location location : locations.getLocationsList()) { + String url = filerClient.getChunkUrl(chunkView.fileId, location.getUrl(), location.getPublicUrl()); + try { + data = doFetchOneFullChunkData(chunkView, url); + lastException = null; + break; + } catch (IOException ioe) { + LOG.debug("doFetchFullChunkData {} :{}", url, ioe); + lastException = ioe; + } + } + if (data != null) { + break; + } + try { + Thread.sleep(waitTime); + } catch (InterruptedException e) { + } + } - if (!chunkView.isFullChunk) { - request.setHeader(HttpHeaders.ACCEPT_ENCODING, ""); - request.setHeader(HttpHeaders.RANGE, - String.format("bytes=%d-%d", chunkView.offset, chunkView.offset + chunkView.size)); + if (lastException != null) { + throw lastException; } + LOG.debug("doFetchFullChunkData fid:{} chunkData.length:{}", chunkView.fileId, data.length); + + return data; + + } + + private static byte[] doFetchOneFullChunkData(ChunkView chunkView, String url) throws IOException { + + HttpGet request = new HttpGet(url); + + request.setHeader(HttpHeaders.ACCEPT_ENCODING, "gzip"); + + byte[] data = null; + + CloseableHttpResponse response = SeaweedUtil.getClosableHttpClient().execute(request); + try { - HttpResponse response = client.execute(request); HttpEntity entity = response.getEntity(); - int len = (int) (chunkView.logicOffset - position + chunkView.size); - OutputStream outputStream = new ByteBufferOutputStream(ByteBuffer.wrap(buffer, startOffset, len)); - entity.writeTo(outputStream); - // LOG.debug("* read chunkView:{} startOffset:{} length:{}", chunkView, startOffset, len); + Header contentEncodingHeader = entity.getContentEncoding(); + + if (contentEncodingHeader != null) { + HeaderElement[] encodings = contentEncodingHeader.getElements(); + for (int i = 0; i < encodings.length; i++) { + if (encodings[i].getName().equalsIgnoreCase("gzip")) { + entity = new GzipDecompressingEntity(entity); + break; + } + } + } + + data = EntityUtils.toByteArray(entity); - return len; + EntityUtils.consume(entity); } finally { - if (client instanceof Closeable) { - Closeable t = (Closeable) client; - t.close(); + response.close(); + request.releaseConnection(); + } + + if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) { + try { + data = SeaweedCipher.decrypt(data, chunkView.cipherKey); + } catch (Exception e) { + throw new IOException("fail to decrypt", e); } } + + if (chunkView.isCompressed) { + data = Gzip.decompress(data); + } + + LOG.debug("doFetchOneFullChunkData url:{} chunkData.length:{}", url, data.length); + + return data; + } protected static List viewFromVisibles(List visibleIntervals, long offset, long size) { @@ -90,27 +202,40 @@ public class SeaweedRead { long stop = offset + size; for (VisibleInterval chunk : visibleIntervals) { - if (chunk.start <= offset && offset < chunk.stop && offset < stop) { + long chunkStart = Math.max(offset, chunk.start); + long chunkStop = Math.min(stop, chunk.stop); + if (chunkStart < chunkStop) { boolean isFullChunk = chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop; views.add(new ChunkView( - chunk.fileId, - offset - chunk.start, - Math.min(chunk.stop, stop) - offset, - offset, - isFullChunk + chunk.fileId, + chunkStart - chunk.start + chunk.chunkOffset, + chunkStop - chunkStart, + chunkStart, + isFullChunk, + chunk.cipherKey, + chunk.isCompressed )); - offset = Math.min(chunk.stop, stop); } } return views; } - public static List nonOverlappingVisibleIntervals(List chunkList) { + public static List nonOverlappingVisibleIntervals( + final FilerClient filerClient, List chunkList) throws IOException { + + chunkList = FileChunkManifest.resolveChunkManifest(filerClient, chunkList); + FilerProto.FileChunk[] chunks = chunkList.toArray(new FilerProto.FileChunk[0]); Arrays.sort(chunks, new Comparator() { @Override public int compare(FilerProto.FileChunk a, FilerProto.FileChunk b) { - return (int) (a.getMtime() - b.getMtime()); + // if just a.getMtime() - b.getMtime(), it will overflow! + if (a.getMtime() < b.getMtime()) { + return -1; + } else if (a.getMtime() > b.getMtime()) { + return 1; + } + return 0; } }); @@ -127,11 +252,14 @@ public class SeaweedRead { List newVisibles, FilerProto.FileChunk chunk) { VisibleInterval newV = new VisibleInterval( - chunk.getOffset(), - chunk.getOffset() + chunk.getSize(), - chunk.getFileId(), - chunk.getMtime(), - true + chunk.getOffset(), + chunk.getOffset() + chunk.getSize(), + chunk.getFileId(), + chunk.getMtime(), + 0, + true, + chunk.getCipherKey().toByteArray(), + chunk.getIsCompressed() ); // easy cases to speed up @@ -147,21 +275,27 @@ public class SeaweedRead { for (VisibleInterval v : visibles) { if (v.start < chunk.getOffset() && chunk.getOffset() < v.stop) { newVisibles.add(new VisibleInterval( - v.start, - chunk.getOffset(), - v.fileId, - v.modifiedTime, - false + v.start, + chunk.getOffset(), + v.fileId, + v.modifiedTime, + v.chunkOffset, + false, + v.cipherKey, + v.isCompressed )); } long chunkStop = chunk.getOffset() + chunk.getSize(); if (v.start < chunkStop && chunkStop < v.stop) { newVisibles.add(new VisibleInterval( - chunkStop, - v.stop, - v.fileId, - v.modifiedTime, - false + chunkStop, + v.stop, + v.fileId, + v.modifiedTime, + v.chunkOffset + (chunkStop - v.start), + false, + v.cipherKey, + v.isCompressed )); } if (chunkStop <= v.start || v.stop <= chunk.getOffset()) { @@ -191,6 +325,10 @@ public class SeaweedRead { return fileId; } + public static long fileSize(FilerProto.Entry entry) { + return Math.max(totalSize(entry.getChunksList()), entry.getAttributes().getFileSize()); + } + public static long totalSize(List chunksList) { long size = 0; for (FilerProto.FileChunk chunk : chunksList) { @@ -207,25 +345,33 @@ public class SeaweedRead { public final long stop; public final long modifiedTime; public final String fileId; + public final long chunkOffset; public final boolean isFullChunk; + public final byte[] cipherKey; + public final boolean isCompressed; - public VisibleInterval(long start, long stop, String fileId, long modifiedTime, boolean isFullChunk) { + public VisibleInterval(long start, long stop, String fileId, long modifiedTime, long chunkOffset, boolean isFullChunk, byte[] cipherKey, boolean isCompressed) { this.start = start; this.stop = stop; this.modifiedTime = modifiedTime; this.fileId = fileId; + this.chunkOffset = chunkOffset; this.isFullChunk = isFullChunk; + this.cipherKey = cipherKey; + this.isCompressed = isCompressed; } @Override public String toString() { return "VisibleInterval{" + - "start=" + start + - ", stop=" + stop + - ", modifiedTime=" + modifiedTime + - ", fileId='" + fileId + '\'' + - ", isFullChunk=" + isFullChunk + - '}'; + "start=" + start + + ", stop=" + stop + + ", modifiedTime=" + modifiedTime + + ", fileId='" + fileId + '\'' + + ", isFullChunk=" + isFullChunk + + ", cipherKey=" + Arrays.toString(cipherKey) + + ", isCompressed=" + isCompressed + + '}'; } } @@ -235,24 +381,30 @@ public class SeaweedRead { public final long size; public final long logicOffset; public final boolean isFullChunk; + public final byte[] cipherKey; + public final boolean isCompressed; - public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk) { + public ChunkView(String fileId, long offset, long size, long logicOffset, boolean isFullChunk, byte[] cipherKey, boolean isCompressed) { this.fileId = fileId; this.offset = offset; this.size = size; this.logicOffset = logicOffset; this.isFullChunk = isFullChunk; + this.cipherKey = cipherKey; + this.isCompressed = isCompressed; } @Override public String toString() { return "ChunkView{" + - "fileId='" + fileId + '\'' + - ", offset=" + offset + - ", size=" + size + - ", logicOffset=" + logicOffset + - ", isFullChunk=" + isFullChunk + - '}'; + "fileId='" + fileId + '\'' + + ", offset=" + offset + + ", size=" + size + + ", logicOffset=" + logicOffset + + ", isFullChunk=" + isFullChunk + + ", cipherKey=" + Arrays.toString(cipherKey) + + ", isCompressed=" + isCompressed + + '}'; } } diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedUtil.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedUtil.java new file mode 100644 index 000000000..c465d935f --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedUtil.java @@ -0,0 +1,30 @@ +package seaweedfs.client; + +import org.apache.http.impl.DefaultConnectionReuseStrategy; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.DefaultConnectionKeepAliveStrategy; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; + +public class SeaweedUtil { + + static PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(); + static CloseableHttpClient httpClient; + + static { + // Increase max total connection to 200 + cm.setMaxTotal(200); + // Increase default max connection per route to 20 + cm.setDefaultMaxPerRoute(20); + + httpClient = HttpClientBuilder.create() + .setConnectionManager(cm) + .setConnectionReuseStrategy(DefaultConnectionReuseStrategy.INSTANCE) + .setKeepAliveStrategy(DefaultConnectionKeepAliveStrategy.INSTANCE) + .build(); + } + + public static CloseableHttpClient getClosableHttpClient() { + return httpClient; + } +} diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java index 0663e8d98..f8c0c76b6 100644 --- a/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java +++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java @@ -1,68 +1,114 @@ package seaweedfs.client; -import org.apache.http.HttpResponse; -import org.apache.http.client.HttpClient; +import com.google.protobuf.ByteString; +import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.mime.HttpMultipartMode; import org.apache.http.entity.mime.MultipartEntityBuilder; -import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.util.EntityUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.ByteArrayInputStream; -import java.io.Closeable; import java.io.IOException; import java.io.InputStream; +import java.security.SecureRandom; +import java.util.List; public class SeaweedWrite { + private static final Logger LOG = LoggerFactory.getLogger(SeaweedWrite.class); + + private static final SecureRandom random = new SecureRandom(); + public static void writeData(FilerProto.Entry.Builder entry, final String replication, - final FilerGrpcClient filerGrpcClient, + final FilerClient filerClient, final long offset, final byte[] bytes, - final long bytesOffset, final long bytesLength) throws IOException { - FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume( + final long bytesOffset, final long bytesLength, + final String path) throws IOException { + FilerProto.FileChunk.Builder chunkBuilder = writeChunk( + replication, filerClient, offset, bytes, bytesOffset, bytesLength, path); + synchronized (entry) { + entry.addChunks(chunkBuilder); + } + } + + public static FilerProto.FileChunk.Builder writeChunk(final String replication, + final FilerClient filerClient, + final long offset, + final byte[] bytes, + final long bytesOffset, + final long bytesLength, + final String path) throws IOException { + FilerProto.AssignVolumeResponse response = filerClient.getBlockingStub().assignVolume( FilerProto.AssignVolumeRequest.newBuilder() - .setCollection("") - .setReplication(replication) + .setCollection(filerClient.getCollection()) + .setReplication(replication == null ? filerClient.getReplication() : replication) .setDataCenter("") - .setReplication("") .setTtlSec(0) + .setPath(path) .build()); String fileId = response.getFileId(); - String url = response.getUrl(); String auth = response.getAuth(); - String targetUrl = String.format("http://%s/%s", url, fileId); - String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength); + String targetUrl = filerClient.getChunkUrl(fileId, response.getUrl(), response.getPublicUrl()); - entry.addChunks(FilerProto.FileChunk.newBuilder() + ByteString cipherKeyString = com.google.protobuf.ByteString.EMPTY; + byte[] cipherKey = null; + if (filerClient.isCipher()) { + cipherKey = genCipherKey(); + cipherKeyString = ByteString.copyFrom(cipherKey); + } + + String etag = multipartUpload(targetUrl, auth, bytes, bytesOffset, bytesLength, cipherKey); + + LOG.debug("write file chunk {} size {}", targetUrl, bytesLength); + + return FilerProto.FileChunk.newBuilder() .setFileId(fileId) .setOffset(offset) .setSize(bytesLength) .setMtime(System.currentTimeMillis() / 10000L) .setETag(etag) - ); - + .setCipherKey(cipherKeyString); } - public static void writeMeta(final FilerGrpcClient filerGrpcClient, - final String parentDirectory, final FilerProto.Entry.Builder entry) { - filerGrpcClient.getBlockingStub().createEntry( - FilerProto.CreateEntryRequest.newBuilder() - .setDirectory(parentDirectory) - .setEntry(entry) - .build() - ); + public static void writeMeta(final FilerClient filerClient, + final String parentDirectory, + final FilerProto.Entry.Builder entry) throws IOException { + + synchronized (entry) { + List chunks = FileChunkManifest.maybeManifestize(filerClient, entry.getChunksList(), parentDirectory); + entry.clearChunks(); + entry.addAllChunks(chunks); + filerClient.getBlockingStub().createEntry( + FilerProto.CreateEntryRequest.newBuilder() + .setDirectory(parentDirectory) + .setEntry(entry) + .build() + ); + } } private static String multipartUpload(String targetUrl, String auth, final byte[] bytes, - final long bytesOffset, final long bytesLength) throws IOException { - - HttpClient client = new DefaultHttpClient(); - - InputStream inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength); + final long bytesOffset, final long bytesLength, + byte[] cipherKey) throws IOException { + + InputStream inputStream = null; + if (cipherKey == null || cipherKey.length == 0) { + inputStream = new ByteArrayInputStream(bytes, (int) bytesOffset, (int) bytesLength); + } else { + try { + byte[] encryptedBytes = SeaweedCipher.encrypt(bytes, (int) bytesOffset, (int) bytesLength, cipherKey); + inputStream = new ByteArrayInputStream(encryptedBytes, 0, encryptedBytes.length); + } catch (Exception e) { + throw new IOException("fail to encrypt data", e); + } + } HttpPost post = new HttpPost(targetUrl); if (auth != null && auth.length() != 0) { @@ -74,8 +120,9 @@ public class SeaweedWrite { .addBinaryBody("upload", inputStream) .build()); + CloseableHttpResponse response = SeaweedUtil.getClosableHttpClient().execute(post); + try { - HttpResponse response = client.execute(post); String etag = response.getLastHeader("ETag").getValue(); @@ -83,13 +130,19 @@ public class SeaweedWrite { etag = etag.substring(1, etag.length() - 1); } + EntityUtils.consume(response.getEntity()); + return etag; } finally { - if (client instanceof Closeable) { - Closeable t = (Closeable) client; - t.close(); - } + response.close(); + post.releaseConnection(); } } + + private static byte[] genCipherKey() { + byte[] b = new byte[32]; + random.nextBytes(b); + return b; + } } diff --git a/other/java/client/src/main/java/seaweedfs/client/VolumeIdCache.java b/other/java/client/src/main/java/seaweedfs/client/VolumeIdCache.java new file mode 100644 index 000000000..fd2649cc2 --- /dev/null +++ b/other/java/client/src/main/java/seaweedfs/client/VolumeIdCache.java @@ -0,0 +1,36 @@ +package seaweedfs.client; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; + +import java.util.concurrent.TimeUnit; + +public class VolumeIdCache { + + private Cache cache = null; + + public VolumeIdCache(int maxEntries) { + if (maxEntries == 0) { + return; + } + this.cache = CacheBuilder.newBuilder() + .maximumSize(maxEntries) + .expireAfterAccess(5, TimeUnit.MINUTES) + .build(); + } + + public FilerProto.Locations getLocations(String volumeId) { + if (this.cache == null) { + return null; + } + return this.cache.getIfPresent(volumeId); + } + + public void setLocations(String volumeId, FilerProto.Locations locations) { + if (this.cache == null) { + return; + } + this.cache.put(volumeId, locations); + } + +} diff --git a/other/java/client/src/main/proto/filer.proto b/other/java/client/src/main/proto/filer.proto index ef847cbe7..ac4c9a0e7 100644 --- a/other/java/client/src/main/proto/filer.proto +++ b/other/java/client/src/main/proto/filer.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package filer_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "FilerProto"; @@ -21,6 +22,9 @@ service SeaweedFiler { rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) { } + rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) { + } + rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } @@ -33,6 +37,9 @@ service SeaweedFiler { rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) { } + rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) { + } + rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) { } @@ -42,6 +49,24 @@ service SeaweedFiler { rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { } + rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { + } + + rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { + } + + rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) { + } + + rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) { + } + + rpc KvGet (KvGetRequest) returns (KvGetResponse) { + } + + rpc KvPut (KvPutRequest) returns (KvPutResponse) { + } + } ////////////////////////////////////////////////// @@ -73,6 +98,9 @@ message Entry { repeated FileChunk chunks = 3; FuseAttributes attributes = 4; map extended = 5; + bytes hard_link_id = 7; + int32 hard_link_counter = 8; // only exists in hard link meta data + bytes content = 9; // if not empty, the file content } message FullEntry { @@ -85,6 +113,8 @@ message EventNotification { Entry new_entry = 2; bool delete_chunks = 3; string new_parent_path = 4; + bool is_from_other_cluster = 5; + repeated int32 signatures = 6; } message FileChunk { @@ -96,6 +126,13 @@ message FileChunk { string source_file_id = 6; // to be deprecated FileId fid = 7; FileId source_fid = 8; + bytes cipher_key = 9; + bool is_compressed = 10; + bool is_chunk_manifest = 11; // content is a list of FileChunks +} + +message FileChunkManifest { + repeated FileChunk chunks = 1; } message FileId { @@ -118,23 +155,39 @@ message FuseAttributes { string user_name = 11; // for hdfs repeated string group_name = 12; // for hdfs string symlink_target = 13; + bytes md5 = 14; + string disk_type = 15; } message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; + bool is_from_other_cluster = 4; + repeated int32 signatures = 5; } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { string directory = 1; Entry entry = 2; + bool is_from_other_cluster = 3; + repeated int32 signatures = 4; } message UpdateEntryResponse { } +message AppendToEntryRequest { + string directory = 1; + string entry_name = 2; + repeated FileChunk chunks = 3; +} +message AppendToEntryResponse { +} + message DeleteEntryRequest { string directory = 1; string name = 2; @@ -142,9 +195,12 @@ message DeleteEntryRequest { bool is_delete_data = 4; bool is_recursive = 5; bool ignore_recursive_error = 6; + bool is_from_other_cluster = 7; + repeated int32 signatures = 8; } message DeleteEntryResponse { + string error = 1; } message AtomicRenameEntryRequest { @@ -163,6 +219,9 @@ message AssignVolumeRequest { string replication = 3; int32 ttl_sec = 4; string data_center = 5; + string path = 6; + string rack = 7; + string disk_type = 8; } message AssignVolumeResponse { @@ -171,6 +230,9 @@ message AssignVolumeResponse { string public_url = 3; int32 count = 4; string auth = 5; + string collection = 6; + string replication = 7; + string error = 8; } message LookupVolumeRequest { @@ -189,6 +251,16 @@ message LookupVolumeResponse { map locations_map = 1; } +message Collection { + string name = 1; +} +message CollectionListRequest { + bool include_normal_volumes = 1; + bool include_ec_volumes = 2; +} +message CollectionListResponse { + repeated Collection collections = 1; +} message DeleteCollectionRequest { string collection = 1; } @@ -200,11 +272,9 @@ message StatisticsRequest { string replication = 1; string collection = 2; string ttl = 3; + string disk_type = 4; } message StatisticsResponse { - string replication = 1; - string collection = 2; - string ttl = 3; uint64 total_size = 4; uint64 used_size = 5; uint64 file_count = 6; @@ -217,4 +287,80 @@ message GetFilerConfigurationResponse { string replication = 2; string collection = 3; uint32 max_mb = 4; + string dir_buckets = 5; + bool cipher = 7; + int32 signature = 8; + string metrics_address = 9; + int32 metrics_interval_sec = 10; +} + +message SubscribeMetadataRequest { + string client_name = 1; + string path_prefix = 2; + int64 since_ns = 3; + int32 signature = 4; +} +message SubscribeMetadataResponse { + string directory = 1; + EventNotification event_notification = 2; + int64 ts_ns = 3; +} + +message LogEntry { + int64 ts_ns = 1; + int32 partition_key_hash = 2; + bytes data = 3; +} + +message KeepConnectedRequest { + string name = 1; + uint32 grpc_port = 2; + repeated string resources = 3; +} +message KeepConnectedResponse { +} + +message LocateBrokerRequest { + string resource = 1; +} +message LocateBrokerResponse { + bool found = 1; + // if found, send the exact address + // if not found, send the full list of existing brokers + message Resource { + string grpc_addresses = 1; + int32 resource_count = 2; + } + repeated Resource resources = 2; +} + +// Key-Value operations +message KvGetRequest { + bytes key = 1; +} +message KvGetResponse { + bytes value = 1; + string error = 2; +} +message KvPutRequest { + bytes key = 1; + bytes value = 2; +} +message KvPutResponse { + string error = 1; +} + +// path-based configurations +message FilerConf { + int32 version = 1; + message PathConf { + string location_prefix = 1; + string collection = 2; + string replication = 3; + string ttl = 4; + string disk_type = 5; + bool fsync = 6; + uint32 volume_growth_count = 7; + } + repeated PathConf locations = 2; } diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java new file mode 100644 index 000000000..7b5e53e19 --- /dev/null +++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedCipherTest.java @@ -0,0 +1,42 @@ +package seaweedfs.client; + +import org.junit.Test; + +import java.util.Base64; + +import static seaweedfs.client.SeaweedCipher.decrypt; +import static seaweedfs.client.SeaweedCipher.encrypt; + +public class SeaweedCipherTest { + + @Test + public void testSameAsGoImplemnetation() throws Exception { + byte[] secretKey = "256-bit key for AES 256 GCM encr".getBytes(); + + String plainText = "Now we need to generate a 256-bit key for AES 256 GCM"; + + System.out.println("Original Text : " + plainText); + + byte[] cipherText = encrypt(plainText.getBytes(), secretKey); + System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText)); + + byte[] decryptedText = decrypt(cipherText, secretKey); + System.out.println("DeCrypted Text : " + new String(decryptedText)); + } + + @Test + public void testEncryptDecrypt() throws Exception { + byte[] secretKey = SeaweedCipher.genCipherKey(); + + String plainText = "Now we need to generate a 256-bit key for AES 256 GCM"; + + System.out.println("Original Text : " + plainText); + + byte[] cipherText = encrypt(plainText.getBytes(), secretKey); + System.out.println("Encrypted Text : " + Base64.getEncoder().encodeToString(cipherText)); + + byte[] decryptedText = decrypt(cipherText, secretKey); + System.out.println("DeCrypted Text : " + new String(decryptedText)); + } + +} diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java index ccfcdb117..44b833c90 100644 --- a/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java +++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedReadTest.java @@ -3,13 +3,14 @@ package seaweedfs.client; import org.junit.Assert; import org.junit.Test; +import java.io.IOException; import java.util.ArrayList; import java.util.List; public class SeaweedReadTest { @Test - public void testNonOverlappingVisibleIntervals() { + public void testNonOverlappingVisibleIntervals() throws IOException { List chunks = new ArrayList<>(); chunks.add(FilerProto.FileChunk.newBuilder() .setFileId("aaa") @@ -24,7 +25,7 @@ public class SeaweedReadTest { .setMtime(2000) .build()); - List visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(chunks); + List visibleIntervals = SeaweedRead.nonOverlappingVisibleIntervals(null, chunks); for (SeaweedRead.VisibleInterval visibleInterval : visibleIntervals) { System.out.println("visible:" + visibleInterval); } diff --git a/other/java/examples/pom.xml b/other/java/examples/pom.xml new file mode 100644 index 000000000..950a3f494 --- /dev/null +++ b/other/java/examples/pom.xml @@ -0,0 +1,32 @@ + + + 4.0.0 + + org.example + unzip + 1.0-SNAPSHOT + + + com.github.chrislusf + seaweedfs-client + 1.6.4 + compile + + + com.github.chrislusf + seaweedfs-hadoop2-client + 1.6.4 + compile + + + org.apache.hadoop + hadoop-common + 2.9.2 + compile + + + + + \ No newline at end of file diff --git a/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleReadFile.java b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleReadFile.java new file mode 100644 index 000000000..d2eb94135 --- /dev/null +++ b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleReadFile.java @@ -0,0 +1,48 @@ +package com.seaweedfs.examples; + +import seaweedfs.client.FilerClient; +import seaweedfs.client.SeaweedInputStream; + +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; + +public class ExampleReadFile { + + public static void main(String[] args) throws IOException { + + FilerClient filerClient = new FilerClient("localhost", 18888); + + long startTime = System.currentTimeMillis(); + parseZip("/Users/chris/tmp/test.zip"); + + long startTime2 = System.currentTimeMillis(); + + long localProcessTime = startTime2 - startTime; + + SeaweedInputStream seaweedInputStream = new SeaweedInputStream( + filerClient, "/test.zip"); + parseZip(seaweedInputStream); + + long swProcessTime = System.currentTimeMillis() - startTime2; + + System.out.println("Local time: " + localProcessTime); + System.out.println("SeaweedFS time: " + swProcessTime); + + } + + public static void parseZip(String filename) throws IOException { + FileInputStream fileInputStream = new FileInputStream(filename); + parseZip(fileInputStream); + } + + public static void parseZip(InputStream is) throws IOException { + ZipInputStream zin = new ZipInputStream(is); + ZipEntry ze; + while ((ze = zin.getNextEntry()) != null) { + System.out.println(ze.getName()); + } + } +} diff --git a/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWatchFileChanges.java b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWatchFileChanges.java new file mode 100644 index 000000000..72c572d31 --- /dev/null +++ b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWatchFileChanges.java @@ -0,0 +1,46 @@ +package com.seaweedfs.examples; + +import seaweedfs.client.FilerClient; +import seaweedfs.client.FilerProto; + +import java.io.IOException; +import java.util.Date; +import java.util.Iterator; + +public class ExampleWatchFileChanges { + + public static void main(String[] args) throws IOException { + + FilerClient filerClient = new FilerClient("localhost", 18888); + + long sinceNs = (System.currentTimeMillis() - 3600 * 1000) * 1000000L; + + Iterator watch = filerClient.watch( + "/buckets", + "exampleClientName", + sinceNs + ); + + System.out.println("Connected to filer, subscribing from " + new Date()); + + while (watch.hasNext()) { + FilerProto.SubscribeMetadataResponse event = watch.next(); + FilerProto.EventNotification notification = event.getEventNotification(); + if (!event.getDirectory().equals(notification.getNewParentPath())) { + // move an entry to a new directory, possibly with a new name + if (notification.hasOldEntry() && notification.hasNewEntry()) { + System.out.println("moved " + event.getDirectory() + "/" + notification.getOldEntry().getName() + " to " + notification.getNewParentPath() + "/" + notification.getNewEntry().getName()); + } else { + System.out.println("this should not happen."); + } + } else if (notification.hasNewEntry() && !notification.hasOldEntry()) { + System.out.println("created entry " + event.getDirectory() + "/" + notification.getNewEntry().getName()); + } else if (!notification.hasNewEntry() && notification.hasOldEntry()) { + System.out.println("deleted entry " + event.getDirectory() + "/" + notification.getOldEntry().getName()); + } else if (notification.hasNewEntry() && notification.hasOldEntry()) { + System.out.println("updated entry " + event.getDirectory() + "/" + notification.getNewEntry().getName()); + } + } + + } +} diff --git a/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile.java b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile.java new file mode 100644 index 000000000..26b74028f --- /dev/null +++ b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile.java @@ -0,0 +1,47 @@ +package com.seaweedfs.examples; + +import seaweedfs.client.FilerClient; +import seaweedfs.client.SeaweedInputStream; +import seaweedfs.client.SeaweedOutputStream; + +import java.io.IOException; +import java.io.InputStream; +import java.util.zip.ZipEntry; +import java.util.zip.ZipInputStream; + +public class ExampleWriteFile { + + public static void main(String[] args) throws IOException { + + FilerClient filerClient = new FilerClient("localhost", 18888); + + SeaweedInputStream seaweedInputStream = new SeaweedInputStream(filerClient, "/test.zip"); + unZipFiles(filerClient, seaweedInputStream); + + } + + public static void unZipFiles(FilerClient filerClient, InputStream is) throws IOException { + ZipInputStream zin = new ZipInputStream(is); + ZipEntry ze; + while ((ze = zin.getNextEntry()) != null) { + + String filename = ze.getName(); + if (filename.indexOf("/") >= 0) { + filename = filename.substring(filename.lastIndexOf("/") + 1); + } + if (filename.length()==0) { + continue; + } + + SeaweedOutputStream seaweedOutputStream = new SeaweedOutputStream(filerClient, "/test/"+filename); + byte[] bytesIn = new byte[16 * 1024]; + int read = 0; + while ((read = zin.read(bytesIn))!=-1) { + seaweedOutputStream.write(bytesIn,0,read); + } + seaweedOutputStream.close(); + + System.out.println(ze.getName()); + } + } +} diff --git a/other/java/examples/src/main/java/com/seaweedfs/examples/HdfsCopyFile.java b/other/java/examples/src/main/java/com/seaweedfs/examples/HdfsCopyFile.java new file mode 100644 index 000000000..006c581c9 --- /dev/null +++ b/other/java/examples/src/main/java/com/seaweedfs/examples/HdfsCopyFile.java @@ -0,0 +1,25 @@ +package com.seaweedfs.examples; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; + +import java.io.*; + +public class HdfsCopyFile { + public static void main(String[] args) throws IOException { + Configuration configuration = new Configuration(); + + configuration.set("fs.defaultFS", "seaweedfs://localhost:8888"); + configuration.set("fs.seaweedfs.impl", "seaweed.hdfs.SeaweedFileSystem"); + + FileSystem fs = FileSystem.get(configuration); + String source = "/Users/chris/tmp/test.zip"; + String destination = "/buckets/spark/test01.zip"; + InputStream in = new BufferedInputStream(new FileInputStream(source)); + + OutputStream out = fs.create(new Path(destination)); + IOUtils.copyBytes(in, out, 4096, true); + } +} diff --git a/other/java/hdfs-over-ftp/pom.xml b/other/java/hdfs-over-ftp/pom.xml new file mode 100644 index 000000000..0db422db5 --- /dev/null +++ b/other/java/hdfs-over-ftp/pom.xml @@ -0,0 +1,120 @@ + + + 4.0.0 + + hdfs-over-ftp + hdfs-over-ftp + 1.0 + + + org.springframework.boot + spring-boot-starter-parent + 2.4.3 + + + + + org.springframework.boot + spring-boot-starter + + + org.springframework.boot + spring-boot-starter-web + + + io.springfox + springfox-swagger2 + 2.9.2 + + + io.springfox + springfox-swagger-ui + 2.9.2 + + + org.apache.hadoop + hadoop-common + 3.2.1 + + + org.apache.hadoop + hadoop-client + 3.2.1 + + + org.apache.ftpserver + ftpserver-core + 1.1.1 + + + com.github.chrislusf + seaweedfs-hadoop3-client + 1.6.2 + + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.1 + + 1.8 + 1.8 + UTF-8 + + + ${java.home}/lib/rt.jar + + + + + + org.apache.maven.plugins + maven-jar-plugin + 2.6 + + + + org.apache.hadoop.seaweed.ftp.ApplicationServer + true + lib/ + + + ./ + + + + + + + maven-assembly-plugin + + false + + src/main/resources/assembly.xml + + + + + make-assembly + package + + single + + + + + + + + \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/ApplicationServer.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/ApplicationServer.java new file mode 100644 index 000000000..b8ef1d840 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/ApplicationServer.java @@ -0,0 +1,14 @@ +package org.apache.hadoop.seaweed.ftp; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + + +@SpringBootApplication +public class ApplicationServer { + + public static void main(String[] args) { + SpringApplication.run(ApplicationServer.class, args); + } + +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/config/SwaggerConfig.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/config/SwaggerConfig.java new file mode 100644 index 000000000..3c395493d --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/config/SwaggerConfig.java @@ -0,0 +1,27 @@ +package org.apache.hadoop.seaweed.ftp.config; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import springfox.documentation.builders.ApiInfoBuilder; +import springfox.documentation.builders.PathSelectors; +import springfox.documentation.builders.RequestHandlerSelectors; +import springfox.documentation.spi.DocumentationType; +import springfox.documentation.spring.web.plugins.Docket; +import springfox.documentation.swagger2.annotations.EnableSwagger2; + +@Configuration +@EnableSwagger2 +public class SwaggerConfig { + @Bean + public Docket createRestApi() { + return new Docket(DocumentationType.SWAGGER_2) + .pathMapping("/") + .select() + .apis(RequestHandlerSelectors.basePackage("org.apache.hadoop.seaweed.ftp")) + .paths(PathSelectors.any()) + .build().apiInfo(new ApiInfoBuilder() + .title("FTP API Doc") + .version("1.0") + .build()); + } +} \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/FtpManagerController.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/FtpManagerController.java new file mode 100644 index 000000000..7a5a4e74d --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/FtpManagerController.java @@ -0,0 +1,71 @@ +package org.apache.hadoop.seaweed.ftp.controller; + +import io.swagger.annotations.Api; +import io.swagger.annotations.ApiOperation; +import org.apache.hadoop.seaweed.ftp.service.HFtpService; +import org.apache.hadoop.seaweed.ftp.controller.vo.Result; +import org.apache.log4j.Logger; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PutMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.util.HashMap; +import java.util.Map; + +@RestController +@RequestMapping("/manager") +@Api(tags = "FTP操作管理") +public class FtpManagerController { + + private static Logger log = Logger.getLogger(FtpManagerController.class); + + @Autowired + private HFtpService hdfsOverFtpServer; + + @GetMapping("/status") + @ApiOperation("查看FTP服务状态") + public Result status() { + Map map = new HashMap<>(); + try { + boolean status = hdfsOverFtpServer.statusServer(); + map.put("is_running", status); + return new Result(true, map, "FTP 服务状态获取成功"); + }catch (Exception e) { + log.error(e); + map.put("is_running", false); + return new Result(true, map, "FTP 服务状态获取成功"); + } + } + + @PutMapping("/start") + @ApiOperation("启动FTP服务") + public Result start() { + try { + boolean status = hdfsOverFtpServer.statusServer(); + if(!status) { + hdfsOverFtpServer.startServer(); + } + return new Result(true, "FTP 服务启动成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "FTP 服务启动失败"); + } + } + + @PutMapping("/stop") + @ApiOperation("停止FTP服务") + public Result stop() { + try { + boolean status = hdfsOverFtpServer.statusServer(); + if(status) { + hdfsOverFtpServer.stopServer(); + } + return new Result(true, "FTP 服务停止成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "FTP 服务停止失败"); + } + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/UserController.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/UserController.java new file mode 100644 index 000000000..c4d2261b3 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/UserController.java @@ -0,0 +1,98 @@ +package org.apache.hadoop.seaweed.ftp.controller; + +import io.swagger.annotations.Api; +import io.swagger.annotations.ApiOperation; +import org.apache.ftpserver.ftplet.User; +import org.apache.ftpserver.usermanager.Md5PasswordEncryptor; +import org.apache.ftpserver.usermanager.UserFactory; +import org.apache.hadoop.seaweed.ftp.controller.vo.FtpUser; +import org.apache.hadoop.seaweed.ftp.controller.vo.Result; +import org.apache.hadoop.seaweed.ftp.users.HdfsUserManager; +import org.apache.log4j.Logger; +import org.springframework.web.bind.annotation.*; + +import java.io.File; + +@RestController +@RequestMapping("/user") +@Api(tags = "FTP用户管理") +public class UserController { + + private static Logger log = Logger.getLogger(UserController.class); + + /*** + * { + * "name": "test", + * "password": "test", + * "homeDirectory": "/buckets/test/" + * } + * @param ftpUser + * @return + */ + @PostMapping("/add") + @ApiOperation("新增/编辑用户") + public Result add(@RequestBody FtpUser ftpUser) { + try { + HdfsUserManager userManagerFactory = new HdfsUserManager(); + userManagerFactory.setFile(new File(System.getProperty("user.dir") + File.separator + "users.properties")); + userManagerFactory.setPasswordEncryptor(new Md5PasswordEncryptor()); + + UserFactory userFactory = new UserFactory(); + userFactory.setHomeDirectory(ftpUser.getHomeDirectory()); + userFactory.setName(ftpUser.getName()); + userFactory.setPassword(ftpUser.getPassword()); + userFactory.setEnabled(ftpUser.isEnabled()); + userFactory.setMaxIdleTime(ftpUser.getMaxIdleTime()); + + User user = userFactory.createUser(); + userManagerFactory.save(user, ftpUser.isRenamePush()); + return new Result(true, "新建用户成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "新建用户失败"); + } + } + + @DeleteMapping("/delete/{user}") + @ApiOperation("删除用户") + public Result delete(@PathVariable(value = "user") String user) { + try { + HdfsUserManager userManagerFactory = new HdfsUserManager(); + userManagerFactory.setFile(new File(System.getProperty("user.dir") + File.separator + "users.properties")); + userManagerFactory.delete(user); + return new Result(true, "删除用户成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "删除用户失败"); + } + } + + @GetMapping("/show/{userName}") + @ApiOperation("查看用户") + public Result show(@PathVariable(value = "userName") String userName) { + try { + HdfsUserManager userManagerFactory = new HdfsUserManager(); + userManagerFactory.setFile(new File(System.getProperty("user.dir") + File.separator + "users.properties")); + User user = userManagerFactory.getUserByName(userName); + FtpUser ftpUser = new FtpUser(user.getHomeDirectory(), user.getPassword(), user.getEnabled(), user.getName(), user.getMaxIdleTime(), HdfsUserManager.getUserRenamePush(userName)); + return new Result(true, ftpUser, "获取用户信息成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "获取用户信息失败"); + } + } + + @GetMapping("/list") + @ApiOperation("列举用户") + public Result list() { + try { + HdfsUserManager userManagerFactory = new HdfsUserManager(); + userManagerFactory.setFile(new File(System.getProperty("user.dir") + File.separator + "users.properties")); + String[] allUserNames = userManagerFactory.getAllUserNames(); + return new Result(true, allUserNames, "列举用户成功"); + }catch (Exception e) { + log.error(e); + return new Result(false, "列举用户失败"); + } + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/FtpUser.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/FtpUser.java new file mode 100644 index 000000000..953d08603 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/FtpUser.java @@ -0,0 +1,71 @@ +package org.apache.hadoop.seaweed.ftp.controller.vo; + +public class FtpUser { + + private String homeDirectory; + private String password; + private boolean enabled; + private String name; + private int maxIdleTime; + private boolean renamePush; + + public FtpUser() { + } + + public FtpUser(String homeDirectory, String password, boolean enabled, String name, int maxIdleTime, boolean renamePush) { + this.homeDirectory = homeDirectory; + this.password = password; + this.enabled = enabled; + this.name = name; + this.maxIdleTime = maxIdleTime; + this.renamePush = renamePush; + } + + public String getHomeDirectory() { + return homeDirectory; + } + + public void setHomeDirectory(String homeDirectory) { + this.homeDirectory = homeDirectory; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public boolean isEnabled() { + return enabled; + } + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public int getMaxIdleTime() { + return maxIdleTime; + } + + public void setMaxIdleTime(int maxIdleTime) { + this.maxIdleTime = maxIdleTime; + } + + public boolean isRenamePush() { + return renamePush; + } + + public void setRenamePush(boolean renamePush) { + this.renamePush = renamePush; + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/Result.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/Result.java new file mode 100644 index 000000000..b6a480ba7 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/controller/vo/Result.java @@ -0,0 +1,43 @@ +package org.apache.hadoop.seaweed.ftp.controller.vo; + +public class Result { + + private boolean status; + private Object data; + private String message; + + public Result(boolean status, String message) { + this.status = status; + this.message = message; + } + + public Result(boolean status, Object data, String message) { + this.status = status; + this.message = message; + this.data = data; + } + + public boolean isStatus() { + return status; + } + + public void setStatus(boolean status) { + this.status = status; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public Object getData() { + return data; + } + + public void setData(Object data) { + this.data = data; + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/HFtpService.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/HFtpService.java new file mode 100644 index 000000000..c3fa31872 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/HFtpService.java @@ -0,0 +1,102 @@ +package org.apache.hadoop.seaweed.ftp.service; + +import org.apache.ftpserver.DataConnectionConfiguration; +import org.apache.ftpserver.DataConnectionConfigurationFactory; +import org.apache.ftpserver.FtpServer; +import org.apache.ftpserver.FtpServerFactory; +import org.apache.ftpserver.command.CommandFactoryFactory; +import org.apache.ftpserver.listener.ListenerFactory; +import org.apache.hadoop.seaweed.ftp.service.filesystem.HdfsFileSystemManager; +import org.apache.hadoop.seaweed.ftp.service.filesystem.HdfsOverFtpSystem; +import org.apache.hadoop.seaweed.ftp.users.HdfsUserManager; +import org.apache.log4j.Logger; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +import java.io.File; + +/** + * reference: https://github.com/iponweb/hdfs-over-ftp + */ +@Component +public class HFtpService { + + private static Logger log = Logger.getLogger(HFtpService.class); + + @Value("${ftp.port}") + private int port = 0; + + @Value("${ftp.passive-address}") + private String passiveAddress; + + @Value("${ftp.passive-ports}") + private String passivePorts; + + @Value("${hdfs.uri}") + private String hdfsUri; + + @Value("${seaweedFs.enable}") + private boolean seaweedFsEnable; + + @Value("${seaweedFs.access}") + private String seaweedFsAccess; + + @Value("${seaweedFs.replication}") + private String seaweedFsReplication; + + private FtpServer ftpServer = null; + + public void startServer() throws Exception { + log.info("Starting HDFS-Over-Ftp server. port: " + port + " passive-address: " + passiveAddress + " passive-ports: " + passivePorts + " hdfs-uri: " + hdfsUri); + + HdfsOverFtpSystem.setHdfsUri(hdfsUri); + HdfsOverFtpSystem.setSeaweedFsEnable(seaweedFsEnable); + HdfsOverFtpSystem.setSeaweedFsAccess(seaweedFsAccess); + HdfsOverFtpSystem.setSeaweedFsReplication(seaweedFsReplication); + + FtpServerFactory server = new FtpServerFactory(); + server.setFileSystem(new HdfsFileSystemManager()); + + ListenerFactory factory = new ListenerFactory(); + factory.setPort(port); + + DataConnectionConfigurationFactory dccFactory = new DataConnectionConfigurationFactory(); + dccFactory.setPassiveAddress("0.0.0.0"); + dccFactory.setPassivePorts(passivePorts); + dccFactory.setPassiveExternalAddress(passiveAddress); + DataConnectionConfiguration dcc = dccFactory.createDataConnectionConfiguration(); + factory.setDataConnectionConfiguration(dcc); + + server.addListener("default", factory.createListener()); + + HdfsUserManager userManager = new HdfsUserManager(); + final File file = loadResource("/users.properties"); + userManager.setFile(file); + server.setUserManager(userManager); + + CommandFactoryFactory cmFact = new CommandFactoryFactory(); + cmFact.setUseDefaultCommands(true); + server.setCommandFactory(cmFact.createCommandFactory()); + + // start the server + ftpServer = server.createServer(); + ftpServer.start(); + } + + public void stopServer() { + log.info("Stopping Hdfs-Over-Ftp server. port: " + port + " passive-address: " + passiveAddress + " passive-ports: " + passivePorts + " hdfs-uri: " + hdfsUri); + ftpServer.stop(); + } + + public boolean statusServer() { + try { + return !ftpServer.isStopped(); + }catch (Exception e) { + return false; + } + } + + private static File loadResource(String resourceName) { + return new File(System.getProperty("user.dir") + resourceName); + } +} \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileObject.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileObject.java new file mode 100644 index 000000000..e97c2dc14 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileObject.java @@ -0,0 +1,333 @@ +package org.apache.hadoop.seaweed.ftp.service.filesystem; + +import org.apache.ftpserver.ftplet.FtpFile; +import org.apache.ftpserver.ftplet.User; +import org.apache.hadoop.fs.*; +import org.apache.hadoop.seaweed.ftp.users.HdfsUser; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/** + * This class implements all actions to HDFS + */ +public class HdfsFileObject implements FtpFile { + + private final Logger log = LoggerFactory.getLogger(HdfsFileObject.class); + + private Path homePath; + private Path path; + private Path fullPath; + private HdfsUser user; + + /** + * Constructs HdfsFileObject from path + * + * @param path path to represent object + * @param user accessor of the object + */ + public HdfsFileObject(String homePath, String path, User user) { + this.homePath = new Path(homePath); + this.path = new Path(path); + this.fullPath = new Path(homePath + path); + this.user = (HdfsUser) user; + } + + public String getAbsolutePath() { + // strip the last '/' if necessary + String fullName = path.toString(); + int filelen = fullName.length(); + if ((filelen != 1) && (fullName.charAt(filelen - 1) == '/')) { + fullName = fullName.substring(0, filelen - 1); + } + + return fullName; + } + + public String getName() { + return path.getName(); + } + + /** + * HDFS has no hidden objects + * + * @return always false + */ + public boolean isHidden() { + return false; + } + + /** + * Checks if the object is a directory + * + * @return true if the object is a directory + */ + public boolean isDirectory() { + try { + log.debug("is directory? : " + fullPath); + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fs = dfs.getFileStatus(fullPath); + return fs.isDir(); + } catch (IOException e) { + log.debug(fullPath + " is not dir", e); + return false; + } + } + + /** + * Checks if the object is a file + * + * @return true if the object is a file + */ + public boolean isFile() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + return dfs.isFile(fullPath); + } catch (IOException e) { + log.debug(fullPath + " is not file", e); + return false; + } + } + + /** + * Checks if the object does exist + * + * @return true if the object does exist + */ + public boolean doesExist() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + dfs.getFileStatus(fullPath); + return true; + } catch (IOException e) { + // log.debug(path + " does not exist", e); + return false; + } + } + + public boolean isReadable() { + return true; + } + + public boolean isWritable() { + return true; + } + + public boolean isRemovable() { + return true; + } + + /** + * Get owner of the object + * + * @return owner of the object + */ + public String getOwnerName() { + return "root"; + /* + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fs = dfs.getFileStatus(fullPath); + String owner = fs.getOwner(); + if(owner.length() == 0) { + return "root"; + } + return owner; + } catch (IOException e) { + e.printStackTrace(); + return null; + } + */ + } + + /** + * Get group of the object + * + * @return group of the object + */ + public String getGroupName() { + return "root"; + /* + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fs = dfs.getFileStatus(fullPath); + String group = fs.getGroup(); + if(group.length() == 0) { + return "root"; + } + return group; + } catch (IOException e) { + e.printStackTrace(); + return null; + } + */ + } + + /** + * Get link count + * + * @return 3 is for a directory and 1 is for a file + */ + public int getLinkCount() { + return isDirectory() ? 3 : 1; + } + + /** + * Get last modification date + * + * @return last modification date as a long + */ + public long getLastModified() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fs = dfs.getFileStatus(fullPath); + return fs.getModificationTime(); + } catch (IOException e) { + e.printStackTrace(); + return 0; + } + } + + public boolean setLastModified(long l) { + return false; + } + + /** + * Get a size of the object + * + * @return size of the object in bytes + */ + public long getSize() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fs = dfs.getFileStatus(fullPath); + log.debug("getSize(): " + fullPath + " : " + fs.getLen()); + return fs.getLen(); + } catch (IOException e) { + e.printStackTrace(); + return 0; + } + } + + public Object getPhysicalFile() { + return null; + } + + /** + * Create a new dir from the object + * + * @return true if dir is created + */ + public boolean mkdir() { + try { + FileSystem fs = HdfsOverFtpSystem.getDfs(); + fs.mkdirs(fullPath); +// fs.setOwner(path, user.getName(), user.getMainGroup()); + return true; + } catch (IOException e) { + e.printStackTrace(); + return false; + } + } + + /** + * Delete object from the HDFS filesystem + * + * @return true if the object is deleted + */ + public boolean delete() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + dfs.delete(fullPath, true); + return true; + } catch (IOException e) { + e.printStackTrace(); + return false; + } + } + + public boolean move(FtpFile ftpFile) { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + dfs.rename(fullPath, new Path(fullPath.getParent() + File.separator + ftpFile.getName())); + return true; + } catch (IOException e) { + e.printStackTrace(); + return false; + } + } + + + /** + * List files of the directory + * + * @return List of files in the directory + */ + public List listFiles() { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FileStatus fileStats[] = dfs.listStatus(fullPath); + + // get the virtual name of the base directory + String virtualFileStr = getAbsolutePath(); + if (virtualFileStr.charAt(virtualFileStr.length() - 1) != '/') { + virtualFileStr += '/'; + } + + FtpFile[] virtualFiles = new FtpFile[fileStats.length]; + for (int i = 0; i < fileStats.length; i++) { + File fileObj = new File(fileStats[i].getPath().toString()); + String fileName = virtualFileStr + fileObj.getName(); + virtualFiles[i] = new HdfsFileObject(homePath.toString(), fileName, user); + } + return Collections.unmodifiableList(Arrays.asList(virtualFiles)); + } catch (IOException e) { + log.debug("", e); + return null; + } + } + + /** + * Creates output stream to write to the object + * + * @param l is not used here + * @return OutputStream + * @throws IOException + */ + public OutputStream createOutputStream(long l) { + try { + FileSystem fs = HdfsOverFtpSystem.getDfs(); + FSDataOutputStream out = fs.create(fullPath); +// fs.setOwner(fullPath, user.getName(), user.getMainGroup()); + return out; + } catch (IOException e) { + e.printStackTrace(); + return null; + } + } + + /** + * Creates input stream to read from the object + * + * @param l is not used here + * @return OutputStream + * @throws IOException + */ + public InputStream createInputStream(long l) { + try { + FileSystem dfs = HdfsOverFtpSystem.getDfs(); + FSDataInputStream in = dfs.open(fullPath); + return in; + } catch (IOException e) { + e.printStackTrace(); + return null; + } + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemManager.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemManager.java new file mode 100644 index 000000000..533c2c3aa --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemManager.java @@ -0,0 +1,14 @@ +package org.apache.hadoop.seaweed.ftp.service.filesystem; + +import org.apache.ftpserver.ftplet.FileSystemFactory; +import org.apache.ftpserver.ftplet.FileSystemView; +import org.apache.ftpserver.ftplet.User; + +/** + * Impelented FileSystemManager to use HdfsFileSystemView + */ +public class HdfsFileSystemManager implements FileSystemFactory { + public FileSystemView createFileSystemView(User user) { + return new HdfsFileSystemView(user); + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemView.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemView.java new file mode 100644 index 000000000..8b910e775 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsFileSystemView.java @@ -0,0 +1,104 @@ +package org.apache.hadoop.seaweed.ftp.service.filesystem; + +import org.apache.ftpserver.ftplet.FileSystemView; +import org.apache.ftpserver.ftplet.FtpFile; +import org.apache.ftpserver.ftplet.User; +import org.apache.hadoop.fs.Path; + +import java.io.File; + +/** + * Implemented FileSystemView to use HdfsFileObject + */ +public class HdfsFileSystemView implements FileSystemView { + + private String homePath; + private String currPath = File.separator; + private User user; + + /** + * Constructor - set the user object. + */ + protected HdfsFileSystemView(User user) { + if (user == null) { + throw new IllegalArgumentException("user can not be null"); + } + if (user.getHomeDirectory() == null) { + throw new IllegalArgumentException( + "User home directory can not be null"); + } + + this.homePath = user.getHomeDirectory(); + this.user = user; + } + + public FtpFile getHomeDirectory() { + return new HdfsFileObject(homePath, File.separator, user); + } + + public FtpFile getWorkingDirectory() { + FtpFile fileObj; + if (currPath.equals(File.separator)) { + fileObj = new HdfsFileObject(homePath, File.separator, user); + } else { + fileObj = new HdfsFileObject(homePath, currPath, user); + + } + return fileObj; + } + + public boolean changeWorkingDirectory(String dir) { + + Path path; + if (dir.startsWith(File.separator) || new Path(currPath).equals(new Path(dir))) { + path = new Path(dir); + } else if (currPath.length() > 1) { + path = new Path(currPath + File.separator + dir); + } else { + if(dir.startsWith("/")) { + path = new Path(dir); + }else { + path = new Path(File.separator + dir); + } + } + + // 防止退回根目录 + if (path.getName().equals("..")) { + path = new Path(File.separator); + } + + HdfsFileObject file = new HdfsFileObject(homePath, path.toString(), user); + if (file.isDirectory()) { + currPath = path.toString(); + return true; + } else { + return false; + } + } + + public FtpFile getFile(String file) { + String path; + if (file.startsWith(File.separator)) { + path = file; + } else if (currPath.length() > 1) { + path = currPath + File.separator + file; + } else { + path = File.separator + file; + } + return new HdfsFileObject(homePath, path, user); + } + + /** + * Is the file content random accessible? + */ + public boolean isRandomAccessible() { + return true; + } + + /** + * Dispose file system view - does nothing. + */ + public void dispose() { + } + +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsOverFtpSystem.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsOverFtpSystem.java new file mode 100644 index 000000000..149fd6857 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/service/filesystem/HdfsOverFtpSystem.java @@ -0,0 +1,72 @@ +package org.apache.hadoop.seaweed.ftp.service.filesystem; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +/** + * Class to store DFS connection + */ +public class HdfsOverFtpSystem { + + private static FileSystem fs = null; + + private static String hdfsUri; + + private static boolean seaweedFsEnable; + + private static String seaweedFsAccess; + + private static String seaweedFsReplication; + + private final static Logger log = LoggerFactory.getLogger(HdfsOverFtpSystem.class); + + private static void hdfsInit() throws IOException { + Configuration configuration = new Configuration(); + + configuration.set("fs.defaultFS", hdfsUri); + if(seaweedFsEnable) { + configuration.set("fs.seaweedfs.impl", "seaweed.hdfs.SeaweedFileSystem"); + configuration.set("fs.seaweed.volume.server.access", seaweedFsAccess); + configuration.set("fs.seaweed.replication", seaweedFsReplication); + } + fs = FileSystem.get(configuration); + log.info("HDFS load success"); + } + + /** + * Get dfs + * + * @return dfs + * @throws IOException + */ + public static FileSystem getDfs() throws IOException { + if (fs == null) { + hdfsInit(); + } + return fs; + } + + public static void setHdfsUri(String hdfsUri) { + HdfsOverFtpSystem.hdfsUri = hdfsUri; + } + + public static String getHdfsUri() { + return hdfsUri; + } + + public static void setSeaweedFsEnable(boolean seaweedFsEnable) { + HdfsOverFtpSystem.seaweedFsEnable = seaweedFsEnable; + } + + public static void setSeaweedFsAccess(String seaweedFsAccess) { + HdfsOverFtpSystem.seaweedFsAccess = seaweedFsAccess; + } + + public static void setSeaweedFsReplication(String seaweedFsReplication) { + HdfsOverFtpSystem.seaweedFsReplication = seaweedFsReplication; + } +} \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUser.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUser.java new file mode 100644 index 000000000..c82f6516f --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUser.java @@ -0,0 +1,239 @@ +package org.apache.hadoop.seaweed.ftp.users; + +import org.apache.ftpserver.ftplet.Authority; +import org.apache.ftpserver.ftplet.AuthorizationRequest; +import org.apache.ftpserver.ftplet.User; +import org.apache.log4j.Logger; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class HdfsUser implements User, Serializable { + + private static final long serialVersionUID = -47371353779731294L; + + private String name = null; + + private String password = null; + + private int maxIdleTimeSec = 0; // no limit + + private String homeDir = null; + + private boolean isEnabled = true; + + private List authorities = new ArrayList(); + + private ArrayList groups = new ArrayList(); + + private Logger log = Logger.getLogger(HdfsUser.class); + + /** + * Default constructor. + */ + public HdfsUser() { + } + + /** + * Copy constructor. + */ + public HdfsUser(User user) { + name = user.getName(); + password = user.getPassword(); + authorities = user.getAuthorities(); + maxIdleTimeSec = user.getMaxIdleTime(); + homeDir = user.getHomeDirectory(); + isEnabled = user.getEnabled(); + } + + public ArrayList getGroups() { + return groups; + } + + /** + * Get the main group of the user + * + * @return main group of the user + */ + public String getMainGroup() { + if (groups.size() > 0) { + return groups.get(0); + } else { + log.error("User " + name + " is not a memer of any group"); + return "error"; + } + } + + /** + * Checks if user is a member of the group + * + * @param group to check + * @return true if the user id a member of the group + */ + public boolean isGroupMember(String group) { + for (String userGroup : groups) { + if (userGroup.equals(group)) { + return true; + } + } + return false; + } + + /** + * Set users' groups + * + * @param groups to set + */ + public void setGroups(ArrayList groups) { + if (groups.size() < 1) { + log.error("User " + name + " is not a memer of any group"); + } + this.groups = groups; + } + + /** + * Get the user name. + */ + public String getName() { + return name; + } + + /** + * Set user name. + */ + public void setName(String name) { + this.name = name; + } + + /** + * Get the user password. + */ + public String getPassword() { + return password; + } + + /** + * Set user password. + */ + public void setPassword(String pass) { + password = pass; + } + + public List getAuthorities() { + if (authorities != null) { + return Collections.unmodifiableList(authorities); + } else { + return null; + } + } + + public void setAuthorities(List authorities) { + if (authorities != null) { + this.authorities = Collections.unmodifiableList(authorities); + } else { + this.authorities = null; + } + } + + /** + * Get the maximum idle time in second. + */ + public int getMaxIdleTime() { + return maxIdleTimeSec; + } + + /** + * Set the maximum idle time in second. + */ + public void setMaxIdleTime(int idleSec) { + maxIdleTimeSec = idleSec; + if (maxIdleTimeSec < 0) { + maxIdleTimeSec = 0; + } + } + + /** + * Get the user enable status. + */ + public boolean getEnabled() { + return isEnabled; + } + + /** + * Set the user enable status. + */ + public void setEnabled(boolean enb) { + isEnabled = enb; + } + + /** + * Get the user home directory. + */ + public String getHomeDirectory() { + return homeDir; + } + + /** + * Set the user home directory. + */ + public void setHomeDirectory(String home) { + homeDir = home; + } + + /** + * String representation. + */ + public String toString() { + return name; + } + + /** + * {@inheritDoc} + */ + public AuthorizationRequest authorize(AuthorizationRequest request) { + List authorities = getAuthorities(); + + // check for no authorities at all + if (authorities == null) { + return null; + } + + boolean someoneCouldAuthorize = false; + for (Authority authority : authorities) { + if (authority.canAuthorize(request)) { + someoneCouldAuthorize = true; + + request = authority.authorize(request); + + // authorization failed, return null + if (request == null) { + return null; + } + } + + } + + if (someoneCouldAuthorize) { + return request; + } else { + return null; + } + } + + /** + * {@inheritDoc} + */ + public List getAuthorities(Class clazz) { + List selected = new ArrayList(); + + for (Authority authority : authorities) { + if (authority.getClass().equals(clazz)) { + selected.add(authority); + } + } + + return selected; + } +} diff --git a/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUserManager.java b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUserManager.java new file mode 100644 index 000000000..7eb296160 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/java/org/apache/hadoop/seaweed/ftp/users/HdfsUserManager.java @@ -0,0 +1,453 @@ +package org.apache.hadoop.seaweed.ftp.users; + +import org.apache.ftpserver.FtpServerConfigurationException; +import org.apache.ftpserver.ftplet.*; +import org.apache.ftpserver.usermanager.*; +import org.apache.ftpserver.usermanager.impl.*; +import org.apache.ftpserver.util.BaseProperties; +import org.apache.ftpserver.util.IoUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.*; + +public class HdfsUserManager extends AbstractUserManager { + + private final Logger LOG = LoggerFactory + .getLogger(HdfsUserManager.class); + + private final static String DEPRECATED_PREFIX = "FtpServer.user."; + + private final static String PREFIX = "ftpserver.user."; + + private static BaseProperties userDataProp; + + private File userDataFile = new File("users.conf"); + + private boolean isConfigured = false; + + private PasswordEncryptor passwordEncryptor = new Md5PasswordEncryptor(); + + + /** + * Retrieve the file used to load and store users + * + * @return The file + */ + public File getFile() { + return userDataFile; + } + + /** + * Set the file used to store and read users. Must be set before + * {@link #configure()} is called. + * + * @param propFile A file containing users + */ + public void setFile(File propFile) { + if (isConfigured) { + throw new IllegalStateException("Must be called before configure()"); + } + + this.userDataFile = propFile; + } + + + /** + * Retrieve the password encryptor used for this user manager + * + * @return The password encryptor. Default to {@link Md5PasswordEncryptor} + * if no other has been provided + */ + public PasswordEncryptor getPasswordEncryptor() { + return passwordEncryptor; + } + + + /** + * Set the password encryptor to use for this user manager + * + * @param passwordEncryptor The password encryptor + */ + public void setPasswordEncryptor(PasswordEncryptor passwordEncryptor) { + this.passwordEncryptor = passwordEncryptor; + } + + + /** + * Lazy init the user manager + */ + private void lazyInit() { + if (!isConfigured) { + configure(); + } + } + + /** + * Configure user manager. + */ + public void configure() { + isConfigured = true; + try { + userDataProp = new BaseProperties(); + + if (userDataFile != null && userDataFile.exists()) { + FileInputStream fis = null; + try { + fis = new FileInputStream(userDataFile); + userDataProp.load(fis); + } finally { + IoUtils.close(fis); + } + } + } catch (IOException e) { + throw new FtpServerConfigurationException( + "Error loading user data file : " + + userDataFile.getAbsolutePath(), e); + } + + convertDeprecatedPropertyNames(); + } + + private void convertDeprecatedPropertyNames() { + Enumeration keys = userDataProp.propertyNames(); + + boolean doSave = false; + + while (keys.hasMoreElements()) { + String key = (String) keys.nextElement(); + + if (key.startsWith(DEPRECATED_PREFIX)) { + String newKey = PREFIX + + key.substring(DEPRECATED_PREFIX.length()); + userDataProp.setProperty(newKey, userDataProp.getProperty(key)); + userDataProp.remove(key); + + doSave = true; + } + } + + if (doSave) { + try { + saveUserData(); + } catch (FtpException e) { + throw new FtpServerConfigurationException( + "Failed to save updated user data", e); + } + } + } + + public synchronized void save(User usr, boolean renamePush) throws FtpException { + lazyInit(); + userDataProp.setProperty(PREFIX + usr.getName() + ".rename.push", renamePush); + save(usr); + } + + /** + * Save user data. Store the properties. + */ + public synchronized void save(User usr) throws FtpException { + lazyInit(); + + // null value check + if (usr.getName() == null) { + throw new NullPointerException("User name is null."); + } + String thisPrefix = PREFIX + usr.getName() + '.'; + + // set other properties + userDataProp.setProperty(thisPrefix + ATTR_PASSWORD, getPassword(usr)); + + String home = usr.getHomeDirectory(); + if (home == null) { + home = "/"; + } + userDataProp.setProperty(thisPrefix + ATTR_HOME, home); + userDataProp.setProperty(thisPrefix + ATTR_ENABLE, usr.getEnabled()); + userDataProp.setProperty(thisPrefix + ATTR_WRITE_PERM, usr + .authorize(new WriteRequest()) != null); + userDataProp.setProperty(thisPrefix + ATTR_MAX_IDLE_TIME, usr + .getMaxIdleTime()); + + TransferRateRequest transferRateRequest = new TransferRateRequest(); + transferRateRequest = (TransferRateRequest) usr + .authorize(transferRateRequest); + + if (transferRateRequest != null) { + userDataProp.setProperty(thisPrefix + ATTR_MAX_UPLOAD_RATE, + transferRateRequest.getMaxUploadRate()); + userDataProp.setProperty(thisPrefix + ATTR_MAX_DOWNLOAD_RATE, + transferRateRequest.getMaxDownloadRate()); + } else { + userDataProp.remove(thisPrefix + ATTR_MAX_UPLOAD_RATE); + userDataProp.remove(thisPrefix + ATTR_MAX_DOWNLOAD_RATE); + } + + // request that always will succeed + ConcurrentLoginRequest concurrentLoginRequest = new ConcurrentLoginRequest( + 0, 0); + concurrentLoginRequest = (ConcurrentLoginRequest) usr + .authorize(concurrentLoginRequest); + + if (concurrentLoginRequest != null) { + userDataProp.setProperty(thisPrefix + ATTR_MAX_LOGIN_NUMBER, + concurrentLoginRequest.getMaxConcurrentLogins()); + userDataProp.setProperty(thisPrefix + ATTR_MAX_LOGIN_PER_IP, + concurrentLoginRequest.getMaxConcurrentLoginsPerIP()); + } else { + userDataProp.remove(thisPrefix + ATTR_MAX_LOGIN_NUMBER); + userDataProp.remove(thisPrefix + ATTR_MAX_LOGIN_PER_IP); + } + + saveUserData(); + } + + /** + * @throws FtpException + */ + private void saveUserData() throws FtpException { + File dir = userDataFile.getAbsoluteFile().getParentFile(); + if (dir != null && !dir.exists() && !dir.mkdirs()) { + String dirName = dir.getAbsolutePath(); + throw new FtpServerConfigurationException( + "Cannot create directory for user data file : " + dirName); + } + + // save user data + FileOutputStream fos = null; + try { + fos = new FileOutputStream(userDataFile); + userDataProp.store(fos, "Generated file - don't edit (please)"); + } catch (IOException ex) { + LOG.error("Failed saving user data", ex); + throw new FtpException("Failed saving user data", ex); + } finally { + IoUtils.close(fos); + } + } + + + public synchronized void list() throws FtpException { + lazyInit(); + + Map dataMap = new HashMap(); + Enumeration propNames = (Enumeration) userDataProp.propertyNames(); + ArrayList a = Collections.list(propNames); + a.remove("i18nMap");//去除i18nMap + for(String attrName : a){ +// dataMap.put(attrName, propNames.); + } + + } + + /** + * Delete an user. Removes all this user entries from the properties. After + * removing the corresponding from the properties, save the data. + */ + public synchronized void delete(String usrName) throws FtpException { + lazyInit(); + + // remove entries from properties + String thisPrefix = PREFIX + usrName + '.'; + Enumeration propNames = userDataProp.propertyNames(); + ArrayList remKeys = new ArrayList(); + while (propNames.hasMoreElements()) { + String thisKey = propNames.nextElement().toString(); + if (thisKey.startsWith(thisPrefix)) { + remKeys.add(thisKey); + } + } + Iterator remKeysIt = remKeys.iterator(); + while (remKeysIt.hasNext()) { + userDataProp.remove(remKeysIt.next()); + } + + saveUserData(); + } + + /** + * Get user password. Returns the encrypted value. + *

+ *

+	 * If the password value is not null
+	 *    password = new password
+	 * else
+	 *   if user does exist
+	 *     password = old password
+	 *   else
+	 *     password = ""
+	 * 
+ */ + private String getPassword(User usr) { + String name = usr.getName(); + String password = usr.getPassword(); + + if (password != null) { + password = passwordEncryptor.encrypt(password); + } else { + String blankPassword = passwordEncryptor.encrypt(""); + + if (doesExist(name)) { + String key = PREFIX + name + '.' + ATTR_PASSWORD; + password = userDataProp.getProperty(key, blankPassword); + } else { + password = blankPassword; + } + } + return password; + } + + /** + * Get all user names. + */ + public synchronized String[] getAllUserNames() { + lazyInit(); + + // get all user names + String suffix = '.' + ATTR_HOME; + ArrayList ulst = new ArrayList(); + Enumeration allKeys = userDataProp.propertyNames(); + int prefixlen = PREFIX.length(); + int suffixlen = suffix.length(); + while (allKeys.hasMoreElements()) { + String key = (String) allKeys.nextElement(); + if (key.endsWith(suffix)) { + String name = key.substring(prefixlen); + int endIndex = name.length() - suffixlen; + name = name.substring(0, endIndex); + ulst.add(name); + } + } + + Collections.sort(ulst); + return ulst.toArray(new String[0]); + } + + private ArrayList parseGroups(String groupsLine) { + String groupsArray[] = groupsLine.split(","); + return new ArrayList(Arrays.asList(groupsArray)); + } + + public static synchronized boolean getUserRenamePush(String userName) { + return userDataProp.getBoolean(PREFIX + userName + ".rename.push", false); + } + + /** + * Load user data. + */ + public synchronized User getUserByName(String userName) { + lazyInit(); + + if (!doesExist(userName)) { + return null; + } + + String baseKey = PREFIX + userName + '.'; + HdfsUser user = new HdfsUser(); + user.setName(userName); + user.setEnabled(userDataProp.getBoolean(baseKey + ATTR_ENABLE, true)); + user.setHomeDirectory(userDataProp + .getProperty(baseKey + ATTR_HOME, "/")); + +// user.setGroups(parseGroups(userDataProp +// .getProperty(baseKey + "groups"))); + + List authorities = new ArrayList(); + + if (userDataProp.getBoolean(baseKey + ATTR_WRITE_PERM, false)) { + authorities.add(new WritePermission()); + } + + int maxLogin = userDataProp.getInteger(baseKey + ATTR_MAX_LOGIN_NUMBER, + 0); + int maxLoginPerIP = userDataProp.getInteger(baseKey + + ATTR_MAX_LOGIN_PER_IP, 0); + + authorities.add(new ConcurrentLoginPermission(maxLogin, maxLoginPerIP)); + + int uploadRate = userDataProp.getInteger( + baseKey + ATTR_MAX_UPLOAD_RATE, 0); + int downloadRate = userDataProp.getInteger(baseKey + + ATTR_MAX_DOWNLOAD_RATE, 0); + + authorities.add(new TransferRatePermission(downloadRate, uploadRate)); + + user.setAuthorities(authorities); + + user.setMaxIdleTime(userDataProp.getInteger(baseKey + + ATTR_MAX_IDLE_TIME, 0)); + + return user; + } + + /** + * User existance check + */ + public synchronized boolean doesExist(String name) { + lazyInit(); + + String key = PREFIX + name + '.' + ATTR_HOME; + return userDataProp.containsKey(key); + } + + /** + * User authenticate method + */ + public synchronized User authenticate(Authentication authentication) + throws AuthenticationFailedException { + lazyInit(); + + if (authentication instanceof UsernamePasswordAuthentication) { + UsernamePasswordAuthentication upauth = (UsernamePasswordAuthentication) authentication; + + String user = upauth.getUsername(); + String password = upauth.getPassword(); + + if (user == null) { + throw new AuthenticationFailedException("Authentication failed"); + } + + if (password == null) { + password = ""; + } + + String storedPassword = userDataProp.getProperty(PREFIX + user + '.' + + ATTR_PASSWORD); + + if (storedPassword == null) { + // user does not exist + throw new AuthenticationFailedException("Authentication failed"); + } + + if (passwordEncryptor.matches(password, storedPassword)) { + return getUserByName(user); + } else { + throw new AuthenticationFailedException("Authentication failed"); + } + + } else if (authentication instanceof AnonymousAuthentication) { + if (doesExist("anonymous")) { + return getUserByName("anonymous"); + } else { + throw new AuthenticationFailedException("Authentication failed"); + } + } else { + throw new IllegalArgumentException( + "Authentication not supported by this user manager"); + } + } + + /** + * Close the user manager - remove existing entries. + */ + public synchronized void dispose() { + if (userDataProp != null) { + userDataProp.clear(); + userDataProp = null; + } + } +} diff --git a/other/java/hdfs-over-ftp/src/main/resources/application.yml b/other/java/hdfs-over-ftp/src/main/resources/application.yml new file mode 100644 index 000000000..128bab1f9 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/resources/application.yml @@ -0,0 +1,15 @@ +server: + port: 8080 + +ftp: + port: 2222 + passive-address: localhost + passive-ports: 30000-30999 + +hdfs: + uri: seaweedfs://localhost:8888 + +seaweedFs: + enable: true + access: direct # direct/filerProxy/publicUrl + replication: "000" \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/src/main/resources/assembly.xml b/other/java/hdfs-over-ftp/src/main/resources/assembly.xml new file mode 100644 index 000000000..84fef56f8 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/resources/assembly.xml @@ -0,0 +1,39 @@ + + + + package + + + tar.gz + + false + + + + src/main/resources + / + + application.yml + logback-spring.xml + users.properties + kafka-producer.properties + + + + ${project.build.directory} + / + + *.jar + + + + + + false + lib + runtime + false + + + diff --git a/other/java/hdfs-over-ftp/src/main/resources/logback-spring.xml b/other/java/hdfs-over-ftp/src/main/resources/logback-spring.xml new file mode 100644 index 000000000..96b4c1d71 --- /dev/null +++ b/other/java/hdfs-over-ftp/src/main/resources/logback-spring.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n + + + + + + + ${LOG_HOME}/fileLog.log + + ${LOG_HOME}/fileLog.log.%d.%i + + 100 MB + + + + + %d %p (%file:%line\)- %m%n + + UTF-8 + + + + + + + + + + \ No newline at end of file diff --git a/other/java/hdfs-over-ftp/users.properties b/other/java/hdfs-over-ftp/users.properties new file mode 100644 index 000000000..aeeab8e35 --- /dev/null +++ b/other/java/hdfs-over-ftp/users.properties @@ -0,0 +1,12 @@ +#Generated file - don't edit (please) +#Thu Mar 11 19:11:12 CST 2021 +ftpserver.user.test.idletime=0 +ftpserver.user.test.maxloginperip=0 +ftpserver.user.test.userpassword=44664D4D827C740293D2AA244FB60445 +ftpserver.user.test.enableflag=true +ftpserver.user.test.maxloginnumber=0 +ftpserver.user.test.rename.push=true +ftpserver.user.test.homedirectory=/buckets/test/ +ftpserver.user.test.downloadrate=0 +ftpserver.user.test.writepermission=true +ftpserver.user.test.uploadrate=0 diff --git a/other/java/hdfs2/dependency-reduced-pom.xml b/other/java/hdfs2/dependency-reduced-pom.xml index d818bc878..503e5fbdf 100644 --- a/other/java/hdfs2/dependency-reduced-pom.xml +++ b/other/java/hdfs2/dependency-reduced-pom.xml @@ -15,8 +15,8 @@ maven-compiler-plugin - 7 - 7 + 8 + 8 @@ -120,6 +120,180 @@ + + + org.apache.hadoop + hadoop-client + 2.9.2 + provided + + + hadoop-hdfs-client + org.apache.hadoop + + + hadoop-mapreduce-client-app + org.apache.hadoop + + + hadoop-yarn-api + org.apache.hadoop + + + hadoop-mapreduce-client-core + org.apache.hadoop + + + hadoop-mapreduce-client-jobclient + org.apache.hadoop + + + hadoop-annotations + org.apache.hadoop + + + + + org.apache.hadoop + hadoop-common + 2.9.2 + provided + + + commons-cli + commons-cli + + + commons-math3 + org.apache.commons + + + xmlenc + xmlenc + + + commons-io + commons-io + + + commons-net + commons-net + + + commons-collections + commons-collections + + + servlet-api + javax.servlet + + + jetty + org.mortbay.jetty + + + jetty-util + org.mortbay.jetty + + + jetty-sslengine + org.mortbay.jetty + + + jsp-api + javax.servlet.jsp + + + jersey-core + com.sun.jersey + + + jersey-json + com.sun.jersey + + + jersey-server + com.sun.jersey + + + log4j + log4j + + + jets3t + net.java.dev.jets3t + + + commons-lang + commons-lang + + + commons-configuration + commons-configuration + + + commons-lang3 + org.apache.commons + + + slf4j-log4j12 + org.slf4j + + + jackson-core-asl + org.codehaus.jackson + + + jackson-mapper-asl + org.codehaus.jackson + + + avro + org.apache.avro + + + hadoop-auth + org.apache.hadoop + + + jsch + com.jcraft + + + curator-client + org.apache.curator + + + curator-recipes + org.apache.curator + + + htrace-core4 + org.apache.htrace + + + zookeeper + org.apache.zookeeper + + + commons-compress + org.apache.commons + + + stax2-api + org.codehaus.woodstox + + + woodstox-core + com.fasterxml.woodstox + + + hadoop-annotations + org.apache.hadoop + + + + ossrh @@ -127,7 +301,7 @@ - 1.2.4 + 1.6.4 2.9.2
diff --git a/other/java/hdfs2/pom.xml b/other/java/hdfs2/pom.xml index b8c8cb891..6eeba912e 100644 --- a/other/java/hdfs2/pom.xml +++ b/other/java/hdfs2/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.2.4 + 1.6.4 2.9.2 @@ -31,8 +31,8 @@ org.apache.maven.plugins maven-compiler-plugin - 7 - 7 + 8 + 8 @@ -147,6 +147,7 @@ org.apache.hadoop hadoop-client ${hadoop.version} + provided com.github.chrislusf @@ -157,6 +158,7 @@ org.apache.hadoop hadoop-common ${hadoop.version} + provided diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/BufferedByteBufferReadableInputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/BufferedByteBufferReadableInputStream.java new file mode 100644 index 000000000..3d0b68a52 --- /dev/null +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/BufferedByteBufferReadableInputStream.java @@ -0,0 +1,25 @@ +package seaweed.hdfs; + +import org.apache.hadoop.fs.*; + +import java.io.IOException; +import java.nio.ByteBuffer; + +public class BufferedByteBufferReadableInputStream extends BufferedFSInputStream implements ByteBufferReadable { + + public BufferedByteBufferReadableInputStream(FSInputStream in, int size) { + super(in, size); + if (!(in instanceof Seekable) || !(in instanceof PositionedReadable)) { + throw new IllegalArgumentException("In is not an instance of Seekable or PositionedReadable"); + } + } + + @Override + public int read(ByteBuffer buf) throws IOException { + if (this.in instanceof ByteBufferReadable) { + return ((ByteBufferReadable)this.in).read(buf); + } else { + throw new UnsupportedOperationException("Byte-buffer read unsupported by input stream"); + } + } +} diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBuffer.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBuffer.java deleted file mode 100644 index 926d0b83b..000000000 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBuffer.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package seaweed.hdfs; - -import java.util.concurrent.CountDownLatch; - -class ReadBuffer { - - private SeaweedInputStream stream; - private long offset; // offset within the file for the buffer - private int length; // actual length, set after the buffer is filles - private int requestedLength; // requested length of the read - private byte[] buffer; // the buffer itself - private int bufferindex = -1; // index in the buffers array in Buffer manager - private ReadBufferStatus status; // status of the buffer - private CountDownLatch latch = null; // signaled when the buffer is done reading, so any client - // waiting on this buffer gets unblocked - - // fields to help with eviction logic - private long timeStamp = 0; // tick at which buffer became available to read - private boolean isFirstByteConsumed = false; - private boolean isLastByteConsumed = false; - private boolean isAnyByteConsumed = false; - - public SeaweedInputStream getStream() { - return stream; - } - - public void setStream(SeaweedInputStream stream) { - this.stream = stream; - } - - public long getOffset() { - return offset; - } - - public void setOffset(long offset) { - this.offset = offset; - } - - public int getLength() { - return length; - } - - public void setLength(int length) { - this.length = length; - } - - public int getRequestedLength() { - return requestedLength; - } - - public void setRequestedLength(int requestedLength) { - this.requestedLength = requestedLength; - } - - public byte[] getBuffer() { - return buffer; - } - - public void setBuffer(byte[] buffer) { - this.buffer = buffer; - } - - public int getBufferindex() { - return bufferindex; - } - - public void setBufferindex(int bufferindex) { - this.bufferindex = bufferindex; - } - - public ReadBufferStatus getStatus() { - return status; - } - - public void setStatus(ReadBufferStatus status) { - this.status = status; - } - - public CountDownLatch getLatch() { - return latch; - } - - public void setLatch(CountDownLatch latch) { - this.latch = latch; - } - - public long getTimeStamp() { - return timeStamp; - } - - public void setTimeStamp(long timeStamp) { - this.timeStamp = timeStamp; - } - - public boolean isFirstByteConsumed() { - return isFirstByteConsumed; - } - - public void setFirstByteConsumed(boolean isFirstByteConsumed) { - this.isFirstByteConsumed = isFirstByteConsumed; - } - - public boolean isLastByteConsumed() { - return isLastByteConsumed; - } - - public void setLastByteConsumed(boolean isLastByteConsumed) { - this.isLastByteConsumed = isLastByteConsumed; - } - - public boolean isAnyByteConsumed() { - return isAnyByteConsumed; - } - - public void setAnyByteConsumed(boolean isAnyByteConsumed) { - this.isAnyByteConsumed = isAnyByteConsumed; - } - -} diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferManager.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferManager.java deleted file mode 100644 index 5b1e21529..000000000 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferManager.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package seaweed.hdfs; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collection; -import java.util.LinkedList; -import java.util.Queue; -import java.util.Stack; -import java.util.concurrent.CountDownLatch; - -/** - * The Read Buffer Manager for Rest AbfsClient. - */ -final class ReadBufferManager { - private static final Logger LOGGER = LoggerFactory.getLogger(ReadBufferManager.class); - - private static final int NUM_BUFFERS = 16; - private static final int BLOCK_SIZE = 4 * 1024 * 1024; - private static final int NUM_THREADS = 8; - private static final int THRESHOLD_AGE_MILLISECONDS = 3000; // have to see if 3 seconds is a good threshold - - private Thread[] threads = new Thread[NUM_THREADS]; - private byte[][] buffers; // array of byte[] buffers, to hold the data that is read - private Stack freeList = new Stack<>(); // indices in buffers[] array that are available - - private Queue readAheadQueue = new LinkedList<>(); // queue of requests that are not picked up by any worker thread yet - private LinkedList inProgressList = new LinkedList<>(); // requests being processed by worker threads - private LinkedList completedReadList = new LinkedList<>(); // buffers available for reading - private static final ReadBufferManager BUFFER_MANAGER; // singleton, initialized in static initialization block - - static { - BUFFER_MANAGER = new ReadBufferManager(); - BUFFER_MANAGER.init(); - } - - static ReadBufferManager getBufferManager() { - return BUFFER_MANAGER; - } - - private void init() { - buffers = new byte[NUM_BUFFERS][]; - for (int i = 0; i < NUM_BUFFERS; i++) { - buffers[i] = new byte[BLOCK_SIZE]; // same buffers are reused. The byte array never goes back to GC - freeList.add(i); - } - for (int i = 0; i < NUM_THREADS; i++) { - Thread t = new Thread(new ReadBufferWorker(i)); - t.setDaemon(true); - threads[i] = t; - t.setName("SeaweedFS-prefetch-" + i); - t.start(); - } - ReadBufferWorker.UNLEASH_WORKERS.countDown(); - } - - // hide instance constructor - private ReadBufferManager() { - } - - - /* - * - * SeaweedInputStream-facing methods - * - */ - - - /** - * {@link SeaweedInputStream} calls this method to queue read-aheads. - * - * @param stream The {@link SeaweedInputStream} for which to do the read-ahead - * @param requestedOffset The offset in the file which shoukd be read - * @param requestedLength The length to read - */ - void queueReadAhead(final SeaweedInputStream stream, final long requestedOffset, final int requestedLength) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Start Queueing readAhead for {} offset {} length {}", - stream.getPath(), requestedOffset, requestedLength); - } - ReadBuffer buffer; - synchronized (this) { - if (isAlreadyQueued(stream, requestedOffset)) { - return; // already queued, do not queue again - } - if (freeList.isEmpty() && !tryEvict()) { - return; // no buffers available, cannot queue anything - } - - buffer = new ReadBuffer(); - buffer.setStream(stream); - buffer.setOffset(requestedOffset); - buffer.setLength(0); - buffer.setRequestedLength(requestedLength); - buffer.setStatus(ReadBufferStatus.NOT_AVAILABLE); - buffer.setLatch(new CountDownLatch(1)); - - Integer bufferIndex = freeList.pop(); // will return a value, since we have checked size > 0 already - - buffer.setBuffer(buffers[bufferIndex]); - buffer.setBufferindex(bufferIndex); - readAheadQueue.add(buffer); - notifyAll(); - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}", - stream.getPath(), requestedOffset, buffer.getBufferindex()); - } - } - - - /** - * {@link SeaweedInputStream} calls this method read any bytes already available in a buffer (thereby saving a - * remote read). This returns the bytes if the data already exists in buffer. If there is a buffer that is reading - * the requested offset, then this method blocks until that read completes. If the data is queued in a read-ahead - * but not picked up by a worker thread yet, then it cancels that read-ahead and reports cache miss. This is because - * depending on worker thread availability, the read-ahead may take a while - the calling thread can do it's own - * read to get the data faster (copmared to the read waiting in queue for an indeterminate amount of time). - * - * @param stream the file to read bytes for - * @param position the offset in the file to do a read for - * @param length the length to read - * @param buffer the buffer to read data into. Note that the buffer will be written into from offset 0. - * @return the number of bytes read - */ - int getBlock(final SeaweedInputStream stream, final long position, final int length, final byte[] buffer) { - // not synchronized, so have to be careful with locking - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("getBlock for file {} position {} thread {}", - stream.getPath(), position, Thread.currentThread().getName()); - } - - waitForProcess(stream, position); - - int bytesRead = 0; - synchronized (this) { - bytesRead = getBlockFromCompletedQueue(stream, position, length, buffer); - } - if (bytesRead > 0) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Done read from Cache for {} position {} length {}", - stream.getPath(), position, bytesRead); - } - return bytesRead; - } - - // otherwise, just say we got nothing - calling thread can do its own read - return 0; - } - - /* - * - * Internal methods - * - */ - - private void waitForProcess(final SeaweedInputStream stream, final long position) { - ReadBuffer readBuf; - synchronized (this) { - clearFromReadAheadQueue(stream, position); - readBuf = getFromList(inProgressList, stream, position); - } - if (readBuf != null) { // if in in-progress queue, then block for it - try { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("got a relevant read buffer for file {} offset {} buffer idx {}", - stream.getPath(), readBuf.getOffset(), readBuf.getBufferindex()); - } - readBuf.getLatch().await(); // blocking wait on the caller stream's thread - // Note on correctness: readBuf gets out of inProgressList only in 1 place: after worker thread - // is done processing it (in doneReading). There, the latch is set after removing the buffer from - // inProgressList. So this latch is safe to be outside the synchronized block. - // Putting it in synchronized would result in a deadlock, since this thread would be holding the lock - // while waiting, so no one will be able to change any state. If this becomes more complex in the future, - // then the latch cane be removed and replaced with wait/notify whenever inProgressList is touched. - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("latch done for file {} buffer idx {} length {}", - stream.getPath(), readBuf.getBufferindex(), readBuf.getLength()); - } - } - } - - /** - * If any buffer in the completedlist can be reclaimed then reclaim it and return the buffer to free list. - * The objective is to find just one buffer - there is no advantage to evicting more than one. - * - * @return whether the eviction succeeeded - i.e., were we able to free up one buffer - */ - private synchronized boolean tryEvict() { - ReadBuffer nodeToEvict = null; - if (completedReadList.size() <= 0) { - return false; // there are no evict-able buffers - } - - // first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed) - for (ReadBuffer buf : completedReadList) { - if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) { - nodeToEvict = buf; - break; - } - } - if (nodeToEvict != null) { - return evict(nodeToEvict); - } - - // next, try buffers where any bytes have been consumed (may be a bad idea? have to experiment and see) - for (ReadBuffer buf : completedReadList) { - if (buf.isAnyByteConsumed()) { - nodeToEvict = buf; - break; - } - } - - if (nodeToEvict != null) { - return evict(nodeToEvict); - } - - // next, try any old nodes that have not been consumed - long earliestBirthday = Long.MAX_VALUE; - for (ReadBuffer buf : completedReadList) { - if (buf.getTimeStamp() < earliestBirthday) { - nodeToEvict = buf; - earliestBirthday = buf.getTimeStamp(); - } - } - if ((currentTimeMillis() - earliestBirthday > THRESHOLD_AGE_MILLISECONDS) && (nodeToEvict != null)) { - return evict(nodeToEvict); - } - - // nothing can be evicted - return false; - } - - private boolean evict(final ReadBuffer buf) { - freeList.push(buf.getBufferindex()); - completedReadList.remove(buf); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}", - buf.getBufferindex(), buf.getStream().getPath(), buf.getOffset(), buf.getLength()); - } - return true; - } - - private boolean isAlreadyQueued(final SeaweedInputStream stream, final long requestedOffset) { - // returns true if any part of the buffer is already queued - return (isInList(readAheadQueue, stream, requestedOffset) - || isInList(inProgressList, stream, requestedOffset) - || isInList(completedReadList, stream, requestedOffset)); - } - - private boolean isInList(final Collection list, final SeaweedInputStream stream, final long requestedOffset) { - return (getFromList(list, stream, requestedOffset) != null); - } - - private ReadBuffer getFromList(final Collection list, final SeaweedInputStream stream, final long requestedOffset) { - for (ReadBuffer buffer : list) { - if (buffer.getStream() == stream) { - if (buffer.getStatus() == ReadBufferStatus.AVAILABLE - && requestedOffset >= buffer.getOffset() - && requestedOffset < buffer.getOffset() + buffer.getLength()) { - return buffer; - } else if (requestedOffset >= buffer.getOffset() - && requestedOffset < buffer.getOffset() + buffer.getRequestedLength()) { - return buffer; - } - } - } - return null; - } - - private void clearFromReadAheadQueue(final SeaweedInputStream stream, final long requestedOffset) { - ReadBuffer buffer = getFromList(readAheadQueue, stream, requestedOffset); - if (buffer != null) { - readAheadQueue.remove(buffer); - notifyAll(); // lock is held in calling method - freeList.push(buffer.getBufferindex()); - } - } - - private int getBlockFromCompletedQueue(final SeaweedInputStream stream, final long position, final int length, - final byte[] buffer) { - ReadBuffer buf = getFromList(completedReadList, stream, position); - if (buf == null || position >= buf.getOffset() + buf.getLength()) { - return 0; - } - int cursor = (int) (position - buf.getOffset()); - int availableLengthInBuffer = buf.getLength() - cursor; - int lengthToCopy = Math.min(length, availableLengthInBuffer); - System.arraycopy(buf.getBuffer(), cursor, buffer, 0, lengthToCopy); - if (cursor == 0) { - buf.setFirstByteConsumed(true); - } - if (cursor + lengthToCopy == buf.getLength()) { - buf.setLastByteConsumed(true); - } - buf.setAnyByteConsumed(true); - return lengthToCopy; - } - - /* - * - * ReadBufferWorker-thread-facing methods - * - */ - - /** - * ReadBufferWorker thread calls this to get the next buffer that it should work on. - * - * @return {@link ReadBuffer} - * @throws InterruptedException if thread is interrupted - */ - ReadBuffer getNextBlockToRead() throws InterruptedException { - ReadBuffer buffer = null; - synchronized (this) { - //buffer = readAheadQueue.take(); // blocking method - while (readAheadQueue.size() == 0) { - wait(); - } - buffer = readAheadQueue.remove(); - notifyAll(); - if (buffer == null) { - return null; // should never happen - } - buffer.setStatus(ReadBufferStatus.READING_IN_PROGRESS); - inProgressList.add(buffer); - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("ReadBufferWorker picked file {} for offset {}", - buffer.getStream().getPath(), buffer.getOffset()); - } - return buffer; - } - - /** - * ReadBufferWorker thread calls this method to post completion. - * - * @param buffer the buffer whose read was completed - * @param result the {@link ReadBufferStatus} after the read operation in the worker thread - * @param bytesActuallyRead the number of bytes that the worker thread was actually able to read - */ - void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("ReadBufferWorker completed file {} for offset {} bytes {}", - buffer.getStream().getPath(), buffer.getOffset(), bytesActuallyRead); - } - synchronized (this) { - inProgressList.remove(buffer); - if (result == ReadBufferStatus.AVAILABLE && bytesActuallyRead > 0) { - buffer.setStatus(ReadBufferStatus.AVAILABLE); - buffer.setTimeStamp(currentTimeMillis()); - buffer.setLength(bytesActuallyRead); - completedReadList.add(buffer); - } else { - freeList.push(buffer.getBufferindex()); - // buffer should go out of scope after the end of the calling method in ReadBufferWorker, and eligible for GC - } - } - //outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results - buffer.getLatch().countDown(); // wake up waiting threads (if any) - } - - /** - * Similar to System.currentTimeMillis, except implemented with System.nanoTime(). - * System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization), - * making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core. - * Note: it is not monotonic across Sockets, and even within a CPU, its only the - * more recent parts which share a clock across all cores. - * - * @return current time in milliseconds - */ - private long currentTimeMillis() { - return System.nanoTime() / 1000 / 1000; - } -} diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferWorker.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferWorker.java deleted file mode 100644 index 6ffbc4644..000000000 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferWorker.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package seaweed.hdfs; - -import java.util.concurrent.CountDownLatch; - -class ReadBufferWorker implements Runnable { - - protected static final CountDownLatch UNLEASH_WORKERS = new CountDownLatch(1); - private int id; - - ReadBufferWorker(final int id) { - this.id = id; - } - - /** - * return the ID of ReadBufferWorker. - */ - public int getId() { - return this.id; - } - - /** - * Waits until a buffer becomes available in ReadAheadQueue. - * Once a buffer becomes available, reads the file specified in it and then posts results back to buffer manager. - * Rinse and repeat. Forever. - */ - public void run() { - try { - UNLEASH_WORKERS.await(); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - ReadBufferManager bufferManager = ReadBufferManager.getBufferManager(); - ReadBuffer buffer; - while (true) { - try { - buffer = bufferManager.getNextBlockToRead(); // blocks, until a buffer is available for this thread - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - return; - } - if (buffer != null) { - try { - // do the actual read, from the file. - int bytesRead = buffer.getStream().readRemote(buffer.getOffset(), buffer.getBuffer(), 0, buffer.getRequestedLength()); - bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager - } catch (Exception ex) { - bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0); - } - } - } - } -} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferStatus.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedAbstractFileSystem.java similarity index 64% rename from other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferStatus.java rename to other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedAbstractFileSystem.java index d63674977..e021401aa 100644 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferStatus.java +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedAbstractFileSystem.java @@ -18,12 +18,18 @@ package seaweed.hdfs; -/** - * The ReadBufferStatus for Rest AbfsClient - */ -public enum ReadBufferStatus { - NOT_AVAILABLE, // buffers sitting in readaheadqueue have this stats - READING_IN_PROGRESS, // reading is in progress on this buffer. Buffer should be in inProgressList - AVAILABLE, // data is available in buffer. It should be in completedList - READ_FAILED // read completed, but failed. +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DelegateToFileSystem; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +public class SeaweedAbstractFileSystem extends DelegateToFileSystem { + + SeaweedAbstractFileSystem(final URI uri, final Configuration conf) + throws IOException, URISyntaxException { + super(uri, new SeaweedFileSystem(), conf, "seaweedfs", false); + } + } diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java index d471d8440..25395db7a 100644 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystem.java @@ -5,31 +5,31 @@ import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import seaweedfs.client.FilerProto; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.util.EnumSet; import java.util.List; import java.util.Map; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; +public class SeaweedFileSystem extends FileSystem { -public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { - - public static final int FS_SEAWEED_DEFAULT_PORT = 8888; public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host"; public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; + public static final int FS_SEAWEED_DEFAULT_PORT = 8888; + public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size"; + public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication"; + public static final String FS_SEAWEED_VOLUME_SERVER_ACCESS = "fs.seaweed.volume.server.access"; + public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024; private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); - private static int BUFFER_SIZE = 16 * 1024 * 1024; private URI uri; private Path workingDirectory = new Path("/"); @@ -60,15 +60,19 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port; conf.setInt(FS_SEAWEED_FILER_PORT, port); - conf.setInt(IO_FILE_BUFFER_SIZE_KEY, BUFFER_SIZE); - setConf(conf); this.uri = uri; - seaweedFileSystemStore = new SeaweedFileSystemStore(host, port); + seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf); } + @Override + public void close() throws IOException { + super.close(); + this.seaweedFileSystemStore.close(); + } + @Override public FSDataInputStream open(Path path, int bufferSize) throws IOException { @@ -77,8 +81,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { path = qualify(path); try { - InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize); - return new FSDataInputStream(inputStream); + int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); + FSInputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics); + return new FSDataInputStream(new BufferedByteBufferReadableInputStream(inputStream, 4 * seaweedBufferSize)); } catch (Exception ex) { LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex); return null; @@ -94,8 +99,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { path = qualify(path); try { - String replicaPlacement = String.format("%03d", replication - 1); - OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement); + String replicaPlacement = this.getConf().get(FS_SEAWEED_REPLICATION, String.format("%03d", replication - 1)); + int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); + OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, seaweedBufferSize, replicaPlacement); return new FSDataOutputStream(outputStream, statistics); } catch (Exception ex) { LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex); @@ -105,8 +111,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { /** * {@inheritDoc} + * * @throws FileNotFoundException if the parent directory is not present -or - * is not a directory. + * is not a directory. */ @Override public FSDataOutputStream createNonRecursive(Path path, @@ -123,9 +130,10 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { throw new FileAlreadyExistsException("Not a directory: " + parent); } } + int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); return create(path, permission, flags.contains(CreateFlag.OVERWRITE), bufferSize, - replication, blockSize, progress); + replication, seaweedBufferSize, progress); } @Override @@ -135,7 +143,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { path = qualify(path); try { - OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, ""); + int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); + OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, seaweedBufferSize, ""); return new FSDataOutputStream(outputStream, statistics); } catch (Exception ex) { LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex); @@ -144,7 +153,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { } @Override - public boolean rename(Path src, Path dst) { + public boolean rename(Path src, Path dst) throws IOException { LOG.debug("rename path: {} => {}", src, dst); @@ -155,12 +164,13 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { if (src.equals(dst)) { return true; } - FileStatus dstFileStatus = getFileStatus(dst); + FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(dst); - String sourceFileName = src.getName(); Path adjustedDst = dst; - if (dstFileStatus != null) { + if (entry != null) { + FileStatus dstFileStatus = getFileStatus(dst); + String sourceFileName = src.getName(); if (!dstFileStatus.isDirectory()) { return false; } @@ -175,18 +185,20 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { } @Override - public boolean delete(Path path, boolean recursive) { + public boolean delete(Path path, boolean recursive) throws IOException { LOG.debug("delete path: {} recursive:{}", path, recursive); path = qualify(path); - FileStatus fileStatus = getFileStatus(path); + FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(path); - if (fileStatus == null) { + if (entry == null) { return true; } + FileStatus fileStatus = getFileStatus(path); + return seaweedFileSystemStore.deleteEntries(path, fileStatus.isDirectory(), recursive); } @@ -222,9 +234,9 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { path = qualify(path); - FileStatus fileStatus = getFileStatus(path); + FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(path); - if (fileStatus == null) { + if (entry == null) { UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); return seaweedFileSystemStore.createDirectory(path, currentUser, @@ -233,6 +245,8 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { } + FileStatus fileStatus = getFileStatus(path); + if (fileStatus.isDirectory()) { return true; } else { @@ -241,7 +255,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { } @Override - public FileStatus getFileStatus(Path path) { + public FileStatus getFileStatus(Path path) throws IOException { LOG.debug("getFileStatus path: {}", path); @@ -335,9 +349,7 @@ public class SeaweedFileSystem extends org.apache.hadoop.fs.FileSystem { @Override public void createSymlink(final Path target, final Path link, - final boolean createParent) throws AccessControlException, - FileAlreadyExistsException, FileNotFoundException, - ParentNotDirectoryException, UnsupportedFileSystemException, + final boolean createParent) throws IOException { // Supporting filesystems should override this method throw new UnsupportedOperationException( diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java index 774c090e8..f4e8c9349 100644 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java @@ -1,5 +1,7 @@ package seaweed.hdfs; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -7,30 +9,43 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import seaweedfs.client.FilerClient; -import seaweedfs.client.FilerGrpcClient; -import seaweedfs.client.FilerProto; -import seaweedfs.client.SeaweedRead; +import seaweedfs.client.*; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import static seaweed.hdfs.SeaweedFileSystem.*; + public class SeaweedFileSystemStore { private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class); - private FilerGrpcClient filerGrpcClient; private FilerClient filerClient; + private Configuration conf; - public SeaweedFileSystemStore(String host, int port) { + public SeaweedFileSystemStore(String host, int port, Configuration conf) { int grpcPort = 10000 + port; - filerGrpcClient = new FilerGrpcClient(host, grpcPort); - filerClient = new FilerClient(filerGrpcClient); + filerClient = new FilerClient(host, grpcPort); + this.conf = conf; + String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct"); + if (volumeServerAccessMode.equals("publicUrl")) { + filerClient.setAccessVolumeServerByPublicUrl(); + } else if (volumeServerAccessMode.equals("filerProxy")) { + filerClient.setAccessVolumeServerByFilerProxy(); + } + + } + + public void close() { + try { + this.filerClient.shutdown(); + } catch (InterruptedException e) { + e.printStackTrace(); + } } public static String getParentDirectory(Path path) { @@ -61,9 +76,19 @@ public class SeaweedFileSystemStore { ); } - public FileStatus[] listEntries(final Path path) { + public FileStatus[] listEntries(final Path path) throws IOException { LOG.debug("listEntries path: {}", path); + FileStatus pathStatus = getFileStatus(path); + + if (pathStatus == null) { + return new FileStatus[0]; + } + + if (!pathStatus.isDirectory()) { + return new FileStatus[]{pathStatus}; + } + List fileStatuses = new ArrayList(); List entries = filerClient.listEntries(path.toUri().getPath()); @@ -74,14 +99,16 @@ public class SeaweedFileSystemStore { fileStatuses.add(fileStatus); } + LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size()); return fileStatuses.toArray(new FileStatus[0]); + } - public FileStatus getFileStatus(final Path path) { + public FileStatus getFileStatus(final Path path) throws IOException { FilerProto.Entry entry = lookupEntry(path); if (entry == null) { - return null; + throw new FileNotFoundException("File does not exist: " + path); } LOG.debug("doGetFileStatus path:{} entry:{}", path, entry); @@ -111,10 +138,10 @@ public class SeaweedFileSystemStore { private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) { FilerProto.FuseAttributes attributes = entry.getAttributes(); - long length = SeaweedRead.totalSize(entry.getChunksList()); + long length = SeaweedRead.fileSize(entry); boolean isDir = entry.getIsDirectory(); int block_replication = 1; - int blocksize = 512; + int blocksize = this.conf.getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); long modification_time = attributes.getMtime() * 1000; // milliseconds long access_time = 0; FsPermission permission = FsPermission.createImmutable((short) attributes.getFileMode()); @@ -124,7 +151,7 @@ public class SeaweedFileSystemStore { modification_time, access_time, permission, owner, group, null, path); } - private FilerProto.Entry lookupEntry(Path path) { + public FilerProto.Entry lookupEntry(Path path) { return filerClient.lookupEntry(getParentDirectory(path), path.getName()); @@ -170,9 +197,10 @@ public class SeaweedFileSystemStore { if (existingEntry != null) { entry = FilerProto.Entry.newBuilder(); entry.mergeFrom(existingEntry); + entry.clearContent(); entry.getAttributesBuilder().setMtime(now); LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry); - writePosition = SeaweedRead.totalSize(existingEntry.getChunksList()); + writePosition = SeaweedRead.fileSize(existingEntry); replication = existingEntry.getAttributes().getReplication(); } } @@ -189,30 +217,27 @@ public class SeaweedFileSystemStore { .clearGroupName() .addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames())) ); + SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry); } - return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication); + return new SeaweedHadoopOutputStream(filerClient, path.toString(), entry, writePosition, bufferSize, replication); } - public InputStream openFileForRead(final Path path, FileSystem.Statistics statistics, - int bufferSize) throws IOException { + public FSInputStream openFileForRead(final Path path, FileSystem.Statistics statistics) throws IOException { - LOG.debug("openFileForRead path:{} bufferSize:{}", path, bufferSize); + LOG.debug("openFileForRead path:{}", path); - int readAheadQueueDepth = 2; FilerProto.Entry entry = lookupEntry(path); if (entry == null) { throw new FileNotFoundException("read non-exist file " + path); } - return new SeaweedInputStream(filerGrpcClient, + return new SeaweedHadoopInputStream(filerClient, statistics, path.toUri().getPath(), - entry, - bufferSize, - readAheadQueueDepth); + entry); } public void setOwner(Path path, String owner, String group) { diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java new file mode 100644 index 000000000..f26eae597 --- /dev/null +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java @@ -0,0 +1,150 @@ +package seaweed.hdfs; + +// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream + +import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.fs.FSInputStream; +import org.apache.hadoop.fs.FileSystem.Statistics; +import seaweedfs.client.FilerClient; +import seaweedfs.client.FilerProto; +import seaweedfs.client.SeaweedInputStream; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; + +public class SeaweedHadoopInputStream extends FSInputStream implements ByteBufferReadable { + + private final SeaweedInputStream seaweedInputStream; + private final Statistics statistics; + + public SeaweedHadoopInputStream( + final FilerClient filerClient, + final Statistics statistics, + final String path, + final FilerProto.Entry entry) throws IOException { + this.seaweedInputStream = new SeaweedInputStream(filerClient, path, entry); + this.statistics = statistics; + } + + @Override + public int read() throws IOException { + return seaweedInputStream.read(); + } + + @Override + public int read(final byte[] b, final int off, final int len) throws IOException { + return seaweedInputStream.read(b, off, len); + } + + // implement ByteBufferReadable + @Override + public synchronized int read(ByteBuffer buf) throws IOException { + int bytesRead = seaweedInputStream.read(buf); + + if (bytesRead > 0) { + if (statistics != null) { + statistics.incrementBytesRead(bytesRead); + } + } + + return bytesRead; + } + + /** + * Seek to given position in stream. + * + * @param n position to seek to + * @throws IOException if there is an error + * @throws EOFException if attempting to seek past end of file + */ + @Override + public synchronized void seek(long n) throws IOException { + seaweedInputStream.seek(n); + } + + @Override + public synchronized long skip(long n) throws IOException { + return seaweedInputStream.skip(n); + } + + /** + * Return the size of the remaining available bytes + * if the size is less than or equal to {@link Integer#MAX_VALUE}, + * otherwise, return {@link Integer#MAX_VALUE}. + *

+ * This is to match the behavior of DFSInputStream.available(), + * which some clients may rely on (HBase write-ahead log reading in + * particular). + */ + @Override + public synchronized int available() throws IOException { + return seaweedInputStream.available(); + } + + /** + * Returns the length of the file that this stream refers to. Note that the length returned is the length + * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file, + * they wont be reflected in the returned length. + * + * @return length of the file. + * @throws IOException if the stream is closed + */ + public long length() throws IOException { + return seaweedInputStream.length(); + } + + /** + * Return the current offset from the start of the file + * + * @throws IOException throws {@link IOException} if there is an error + */ + @Override + public synchronized long getPos() throws IOException { + return seaweedInputStream.getPos(); + } + + /** + * Seeks a different copy of the data. Returns true if + * found a new source, false otherwise. + * + * @throws IOException throws {@link IOException} if there is an error + */ + @Override + public boolean seekToNewSource(long l) throws IOException { + return false; + } + + @Override + public synchronized void close() throws IOException { + seaweedInputStream.close(); + } + + /** + * Not supported by this stream. Throws {@link UnsupportedOperationException} + * + * @param readlimit ignored + */ + @Override + public synchronized void mark(int readlimit) { + throw new UnsupportedOperationException("mark()/reset() not supported on this stream"); + } + + /** + * Not supported by this stream. Throws {@link UnsupportedOperationException} + */ + @Override + public synchronized void reset() throws IOException { + throw new UnsupportedOperationException("mark()/reset() not supported on this stream"); + } + + /** + * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false. + * + * @return always {@code false} + */ + @Override + public boolean markSupported() { + return false; + } +} diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java new file mode 100644 index 000000000..da5b56bbc --- /dev/null +++ b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java @@ -0,0 +1,16 @@ +package seaweed.hdfs; + +// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream + +import seaweedfs.client.FilerClient; +import seaweedfs.client.FilerProto; +import seaweedfs.client.SeaweedOutputStream; + +public class SeaweedHadoopOutputStream extends SeaweedOutputStream { + + public SeaweedHadoopOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry, + final long position, final int bufferSize, final String replication) { + super(filerClient, path, entry, position, bufferSize, replication); + } + +} diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedInputStream.java b/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedInputStream.java deleted file mode 100644 index 90c14c772..000000000 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedInputStream.java +++ /dev/null @@ -1,371 +0,0 @@ -package seaweed.hdfs; - -// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream - -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.fs.FSInputStream; -import org.apache.hadoop.fs.FileSystem.Statistics; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import seaweedfs.client.FilerGrpcClient; -import seaweedfs.client.FilerProto; -import seaweedfs.client.SeaweedRead; - -import java.io.EOFException; -import java.io.IOException; -import java.util.List; - -public class SeaweedInputStream extends FSInputStream { - - private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class); - - private final FilerGrpcClient filerGrpcClient; - private final Statistics statistics; - private final String path; - private final FilerProto.Entry entry; - private final List visibleIntervalList; - private final long contentLength; - private final int bufferSize; // default buffer size - private final int readAheadQueueDepth; // initialized in constructor - private final boolean readAheadEnabled; // whether enable readAhead; - - private byte[] buffer = null; // will be initialized on first use - - private long fCursor = 0; // cursor of buffer within file - offset of next byte to read from remote server - private long fCursorAfterLastRead = -1; - private int bCursor = 0; // cursor of read within buffer - offset of next byte to be returned from buffer - private int limit = 0; // offset of next byte to be read into buffer from service (i.e., upper marker+1 - // of valid bytes in buffer) - private boolean closed = false; - - public SeaweedInputStream( - final FilerGrpcClient filerGrpcClient, - final Statistics statistics, - final String path, - final FilerProto.Entry entry, - final int bufferSize, - final int readAheadQueueDepth) { - this.filerGrpcClient = filerGrpcClient; - this.statistics = statistics; - this.path = path; - this.entry = entry; - this.contentLength = SeaweedRead.totalSize(entry.getChunksList()); - this.bufferSize = bufferSize; - this.readAheadQueueDepth = (readAheadQueueDepth >= 0) ? readAheadQueueDepth : Runtime.getRuntime().availableProcessors(); - this.readAheadEnabled = true; - - this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(entry.getChunksList()); - - LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList); - - } - - public String getPath() { - return path; - } - - @Override - public int read() throws IOException { - byte[] b = new byte[1]; - int numberOfBytesRead = read(b, 0, 1); - if (numberOfBytesRead < 0) { - return -1; - } else { - return (b[0] & 0xFF); - } - } - - @Override - public synchronized int read(final byte[] b, final int off, final int len) throws IOException { - int currentOff = off; - int currentLen = len; - int lastReadBytes; - int totalReadBytes = 0; - do { - lastReadBytes = readOneBlock(b, currentOff, currentLen); - if (lastReadBytes > 0) { - currentOff += lastReadBytes; - currentLen -= lastReadBytes; - totalReadBytes += lastReadBytes; - } - if (currentLen <= 0 || currentLen > b.length - currentOff) { - break; - } - } while (lastReadBytes > 0); - return totalReadBytes > 0 ? totalReadBytes : lastReadBytes; - } - - private int readOneBlock(final byte[] b, final int off, final int len) throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } - - Preconditions.checkNotNull(b); - - if (len == 0) { - return 0; - } - - if (this.available() == 0) { - return -1; - } - - if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } - - //If buffer is empty, then fill the buffer. - if (bCursor == limit) { - //If EOF, then return -1 - if (fCursor >= contentLength) { - return -1; - } - - long bytesRead = 0; - //reset buffer to initial state - i.e., throw away existing data - bCursor = 0; - limit = 0; - if (buffer == null) { - buffer = new byte[bufferSize]; - } - - // Enable readAhead when reading sequentially - if (-1 == fCursorAfterLastRead || fCursorAfterLastRead == fCursor || b.length >= bufferSize) { - bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false); - } else { - bytesRead = readInternal(fCursor, buffer, 0, b.length, true); - } - - if (bytesRead == -1) { - return -1; - } - - limit += bytesRead; - fCursor += bytesRead; - fCursorAfterLastRead = fCursor; - } - - //If there is anything in the buffer, then return lesser of (requested bytes) and (bytes in buffer) - //(bytes returned may be less than requested) - int bytesRemaining = limit - bCursor; - int bytesToRead = Math.min(len, bytesRemaining); - System.arraycopy(buffer, bCursor, b, off, bytesToRead); - bCursor += bytesToRead; - if (statistics != null) { - statistics.incrementBytesRead(bytesToRead); - } - return bytesToRead; - } - - - private int readInternal(final long position, final byte[] b, final int offset, final int length, - final boolean bypassReadAhead) throws IOException { - if (readAheadEnabled && !bypassReadAhead) { - // try reading from read-ahead - if (offset != 0) { - throw new IllegalArgumentException("readahead buffers cannot have non-zero buffer offsets"); - } - int receivedBytes; - - // queue read-aheads - int numReadAheads = this.readAheadQueueDepth; - long nextSize; - long nextOffset = position; - while (numReadAheads > 0 && nextOffset < contentLength) { - nextSize = Math.min((long) bufferSize, contentLength - nextOffset); - ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize); - nextOffset = nextOffset + nextSize; - numReadAheads--; - } - - // try reading from buffers first - receivedBytes = ReadBufferManager.getBufferManager().getBlock(this, position, length, b); - if (receivedBytes > 0) { - return receivedBytes; - } - - // got nothing from read-ahead, do our own read now - receivedBytes = readRemote(position, b, offset, length); - return receivedBytes; - } else { - return readRemote(position, b, offset, length); - } - } - - int readRemote(long position, byte[] b, int offset, int length) throws IOException { - if (position < 0) { - throw new IllegalArgumentException("attempting to read from negative offset"); - } - if (position >= contentLength) { - return -1; // Hadoop prefers -1 to EOFException - } - if (b == null) { - throw new IllegalArgumentException("null byte array passed in to read() method"); - } - if (offset >= b.length) { - throw new IllegalArgumentException("offset greater than length of array"); - } - if (length < 0) { - throw new IllegalArgumentException("requested read length is less than zero"); - } - if (length > (b.length - offset)) { - throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer"); - } - - long bytesRead = SeaweedRead.read(filerGrpcClient, visibleIntervalList, position, b, offset, length); - if (bytesRead > Integer.MAX_VALUE) { - throw new IOException("Unexpected Content-Length"); - } - return (int) bytesRead; - } - - /** - * Seek to given position in stream. - * - * @param n position to seek to - * @throws IOException if there is an error - * @throws EOFException if attempting to seek past end of file - */ - @Override - public synchronized void seek(long n) throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } - if (n < 0) { - throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK); - } - if (n > contentLength) { - throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF); - } - - if (n >= fCursor - limit && n <= fCursor) { // within buffer - bCursor = (int) (n - (fCursor - limit)); - return; - } - - // next read will read from here - fCursor = n; - - //invalidate buffer - limit = 0; - bCursor = 0; - } - - @Override - public synchronized long skip(long n) throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } - long currentPos = getPos(); - if (currentPos == contentLength) { - if (n > 0) { - throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF); - } - } - long newPos = currentPos + n; - if (newPos < 0) { - newPos = 0; - n = newPos - currentPos; - } - if (newPos > contentLength) { - newPos = contentLength; - n = newPos - currentPos; - } - seek(newPos); - return n; - } - - /** - * Return the size of the remaining available bytes - * if the size is less than or equal to {@link Integer#MAX_VALUE}, - * otherwise, return {@link Integer#MAX_VALUE}. - *

- * This is to match the behavior of DFSInputStream.available(), - * which some clients may rely on (HBase write-ahead log reading in - * particular). - */ - @Override - public synchronized int available() throws IOException { - if (closed) { - throw new IOException( - FSExceptionMessages.STREAM_IS_CLOSED); - } - final long remaining = this.contentLength - this.getPos(); - return remaining <= Integer.MAX_VALUE - ? (int) remaining : Integer.MAX_VALUE; - } - - /** - * Returns the length of the file that this stream refers to. Note that the length returned is the length - * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file, - * they wont be reflected in the returned length. - * - * @return length of the file. - * @throws IOException if the stream is closed - */ - public long length() throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } - return contentLength; - } - - /** - * Return the current offset from the start of the file - * - * @throws IOException throws {@link IOException} if there is an error - */ - @Override - public synchronized long getPos() throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } - return fCursor - limit + bCursor; - } - - /** - * Seeks a different copy of the data. Returns true if - * found a new source, false otherwise. - * - * @throws IOException throws {@link IOException} if there is an error - */ - @Override - public boolean seekToNewSource(long l) throws IOException { - return false; - } - - @Override - public synchronized void close() throws IOException { - closed = true; - buffer = null; // de-reference the buffer so it can be GC'ed sooner - } - - /** - * Not supported by this stream. Throws {@link UnsupportedOperationException} - * - * @param readlimit ignored - */ - @Override - public synchronized void mark(int readlimit) { - throw new UnsupportedOperationException("mark()/reset() not supported on this stream"); - } - - /** - * Not supported by this stream. Throws {@link UnsupportedOperationException} - */ - @Override - public synchronized void reset() throws IOException { - throw new UnsupportedOperationException("mark()/reset() not supported on this stream"); - } - - /** - * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false. - * - * @return always {@code false} - */ - @Override - public boolean markSupported() { - return false; - } -} diff --git a/other/java/hdfs3/dependency-reduced-pom.xml b/other/java/hdfs3/dependency-reduced-pom.xml index ca53ffd22..590d725d0 100644 --- a/other/java/hdfs3/dependency-reduced-pom.xml +++ b/other/java/hdfs3/dependency-reduced-pom.xml @@ -15,8 +15,8 @@ maven-compiler-plugin - 7 - 7 + 8 + 8 @@ -120,6 +120,188 @@ + + + org.apache.hadoop + hadoop-client + 3.1.1 + provided + + + hadoop-hdfs-client + org.apache.hadoop + + + hadoop-yarn-api + org.apache.hadoop + + + hadoop-yarn-client + org.apache.hadoop + + + hadoop-mapreduce-client-core + org.apache.hadoop + + + hadoop-mapreduce-client-jobclient + org.apache.hadoop + + + hadoop-annotations + org.apache.hadoop + + + + + org.apache.hadoop + hadoop-common + 3.1.1 + provided + + + commons-cli + commons-cli + + + commons-math3 + org.apache.commons + + + commons-io + commons-io + + + commons-net + commons-net + + + commons-collections + commons-collections + + + javax.servlet-api + javax.servlet + + + jetty-server + org.eclipse.jetty + + + jetty-util + org.eclipse.jetty + + + jetty-servlet + org.eclipse.jetty + + + jetty-webapp + org.eclipse.jetty + + + jsp-api + javax.servlet.jsp + + + jersey-core + com.sun.jersey + + + jersey-servlet + com.sun.jersey + + + jersey-json + com.sun.jersey + + + jersey-server + com.sun.jersey + + + log4j + log4j + + + commons-lang + commons-lang + + + commons-beanutils + commons-beanutils + + + commons-configuration2 + org.apache.commons + + + commons-lang3 + org.apache.commons + + + slf4j-log4j12 + org.slf4j + + + avro + org.apache.avro + + + re2j + com.google.re2j + + + hadoop-auth + org.apache.hadoop + + + jsch + com.jcraft + + + curator-client + org.apache.curator + + + curator-recipes + org.apache.curator + + + htrace-core4 + org.apache.htrace + + + zookeeper + org.apache.zookeeper + + + commons-compress + org.apache.commons + + + kerb-simplekdc + org.apache.kerby + + + jackson-databind + com.fasterxml.jackson.core + + + stax2-api + org.codehaus.woodstox + + + woodstox-core + com.fasterxml.woodstox + + + hadoop-annotations + org.apache.hadoop + + + + ossrh @@ -127,7 +309,7 @@ - 1.2.4 + 1.6.4 3.1.1 diff --git a/other/java/hdfs3/pom.xml b/other/java/hdfs3/pom.xml index f5207213c..2b11035f5 100644 --- a/other/java/hdfs3/pom.xml +++ b/other/java/hdfs3/pom.xml @@ -5,7 +5,7 @@ 4.0.0 - 1.2.4 + 1.6.4 3.1.1 @@ -31,8 +31,8 @@ org.apache.maven.plugins maven-compiler-plugin - 7 - 7 + 8 + 8 @@ -147,6 +147,7 @@ org.apache.hadoop hadoop-client ${hadoop.version} + provided com.github.chrislusf @@ -157,6 +158,7 @@ org.apache.hadoop hadoop-common ${hadoop.version} + provided diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/BufferedByteBufferReadableInputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/BufferedByteBufferReadableInputStream.java new file mode 100644 index 000000000..3d0b68a52 --- /dev/null +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/BufferedByteBufferReadableInputStream.java @@ -0,0 +1,25 @@ +package seaweed.hdfs; + +import org.apache.hadoop.fs.*; + +import java.io.IOException; +import java.nio.ByteBuffer; + +public class BufferedByteBufferReadableInputStream extends BufferedFSInputStream implements ByteBufferReadable { + + public BufferedByteBufferReadableInputStream(FSInputStream in, int size) { + super(in, size); + if (!(in instanceof Seekable) || !(in instanceof PositionedReadable)) { + throw new IllegalArgumentException("In is not an instance of Seekable or PositionedReadable"); + } + } + + @Override + public int read(ByteBuffer buf) throws IOException { + if (this.in instanceof ByteBufferReadable) { + return ((ByteBufferReadable)this.in).read(buf); + } else { + throw new UnsupportedOperationException("Byte-buffer read unsupported by input stream"); + } + } +} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java deleted file mode 100644 index 926d0b83b..000000000 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBuffer.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package seaweed.hdfs; - -import java.util.concurrent.CountDownLatch; - -class ReadBuffer { - - private SeaweedInputStream stream; - private long offset; // offset within the file for the buffer - private int length; // actual length, set after the buffer is filles - private int requestedLength; // requested length of the read - private byte[] buffer; // the buffer itself - private int bufferindex = -1; // index in the buffers array in Buffer manager - private ReadBufferStatus status; // status of the buffer - private CountDownLatch latch = null; // signaled when the buffer is done reading, so any client - // waiting on this buffer gets unblocked - - // fields to help with eviction logic - private long timeStamp = 0; // tick at which buffer became available to read - private boolean isFirstByteConsumed = false; - private boolean isLastByteConsumed = false; - private boolean isAnyByteConsumed = false; - - public SeaweedInputStream getStream() { - return stream; - } - - public void setStream(SeaweedInputStream stream) { - this.stream = stream; - } - - public long getOffset() { - return offset; - } - - public void setOffset(long offset) { - this.offset = offset; - } - - public int getLength() { - return length; - } - - public void setLength(int length) { - this.length = length; - } - - public int getRequestedLength() { - return requestedLength; - } - - public void setRequestedLength(int requestedLength) { - this.requestedLength = requestedLength; - } - - public byte[] getBuffer() { - return buffer; - } - - public void setBuffer(byte[] buffer) { - this.buffer = buffer; - } - - public int getBufferindex() { - return bufferindex; - } - - public void setBufferindex(int bufferindex) { - this.bufferindex = bufferindex; - } - - public ReadBufferStatus getStatus() { - return status; - } - - public void setStatus(ReadBufferStatus status) { - this.status = status; - } - - public CountDownLatch getLatch() { - return latch; - } - - public void setLatch(CountDownLatch latch) { - this.latch = latch; - } - - public long getTimeStamp() { - return timeStamp; - } - - public void setTimeStamp(long timeStamp) { - this.timeStamp = timeStamp; - } - - public boolean isFirstByteConsumed() { - return isFirstByteConsumed; - } - - public void setFirstByteConsumed(boolean isFirstByteConsumed) { - this.isFirstByteConsumed = isFirstByteConsumed; - } - - public boolean isLastByteConsumed() { - return isLastByteConsumed; - } - - public void setLastByteConsumed(boolean isLastByteConsumed) { - this.isLastByteConsumed = isLastByteConsumed; - } - - public boolean isAnyByteConsumed() { - return isAnyByteConsumed; - } - - public void setAnyByteConsumed(boolean isAnyByteConsumed) { - this.isAnyByteConsumed = isAnyByteConsumed; - } - -} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java deleted file mode 100644 index 5b1e21529..000000000 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferManager.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package seaweed.hdfs; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collection; -import java.util.LinkedList; -import java.util.Queue; -import java.util.Stack; -import java.util.concurrent.CountDownLatch; - -/** - * The Read Buffer Manager for Rest AbfsClient. - */ -final class ReadBufferManager { - private static final Logger LOGGER = LoggerFactory.getLogger(ReadBufferManager.class); - - private static final int NUM_BUFFERS = 16; - private static final int BLOCK_SIZE = 4 * 1024 * 1024; - private static final int NUM_THREADS = 8; - private static final int THRESHOLD_AGE_MILLISECONDS = 3000; // have to see if 3 seconds is a good threshold - - private Thread[] threads = new Thread[NUM_THREADS]; - private byte[][] buffers; // array of byte[] buffers, to hold the data that is read - private Stack freeList = new Stack<>(); // indices in buffers[] array that are available - - private Queue readAheadQueue = new LinkedList<>(); // queue of requests that are not picked up by any worker thread yet - private LinkedList inProgressList = new LinkedList<>(); // requests being processed by worker threads - private LinkedList completedReadList = new LinkedList<>(); // buffers available for reading - private static final ReadBufferManager BUFFER_MANAGER; // singleton, initialized in static initialization block - - static { - BUFFER_MANAGER = new ReadBufferManager(); - BUFFER_MANAGER.init(); - } - - static ReadBufferManager getBufferManager() { - return BUFFER_MANAGER; - } - - private void init() { - buffers = new byte[NUM_BUFFERS][]; - for (int i = 0; i < NUM_BUFFERS; i++) { - buffers[i] = new byte[BLOCK_SIZE]; // same buffers are reused. The byte array never goes back to GC - freeList.add(i); - } - for (int i = 0; i < NUM_THREADS; i++) { - Thread t = new Thread(new ReadBufferWorker(i)); - t.setDaemon(true); - threads[i] = t; - t.setName("SeaweedFS-prefetch-" + i); - t.start(); - } - ReadBufferWorker.UNLEASH_WORKERS.countDown(); - } - - // hide instance constructor - private ReadBufferManager() { - } - - - /* - * - * SeaweedInputStream-facing methods - * - */ - - - /** - * {@link SeaweedInputStream} calls this method to queue read-aheads. - * - * @param stream The {@link SeaweedInputStream} for which to do the read-ahead - * @param requestedOffset The offset in the file which shoukd be read - * @param requestedLength The length to read - */ - void queueReadAhead(final SeaweedInputStream stream, final long requestedOffset, final int requestedLength) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Start Queueing readAhead for {} offset {} length {}", - stream.getPath(), requestedOffset, requestedLength); - } - ReadBuffer buffer; - synchronized (this) { - if (isAlreadyQueued(stream, requestedOffset)) { - return; // already queued, do not queue again - } - if (freeList.isEmpty() && !tryEvict()) { - return; // no buffers available, cannot queue anything - } - - buffer = new ReadBuffer(); - buffer.setStream(stream); - buffer.setOffset(requestedOffset); - buffer.setLength(0); - buffer.setRequestedLength(requestedLength); - buffer.setStatus(ReadBufferStatus.NOT_AVAILABLE); - buffer.setLatch(new CountDownLatch(1)); - - Integer bufferIndex = freeList.pop(); // will return a value, since we have checked size > 0 already - - buffer.setBuffer(buffers[bufferIndex]); - buffer.setBufferindex(bufferIndex); - readAheadQueue.add(buffer); - notifyAll(); - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Done q-ing readAhead for file {} offset {} buffer idx {}", - stream.getPath(), requestedOffset, buffer.getBufferindex()); - } - } - - - /** - * {@link SeaweedInputStream} calls this method read any bytes already available in a buffer (thereby saving a - * remote read). This returns the bytes if the data already exists in buffer. If there is a buffer that is reading - * the requested offset, then this method blocks until that read completes. If the data is queued in a read-ahead - * but not picked up by a worker thread yet, then it cancels that read-ahead and reports cache miss. This is because - * depending on worker thread availability, the read-ahead may take a while - the calling thread can do it's own - * read to get the data faster (copmared to the read waiting in queue for an indeterminate amount of time). - * - * @param stream the file to read bytes for - * @param position the offset in the file to do a read for - * @param length the length to read - * @param buffer the buffer to read data into. Note that the buffer will be written into from offset 0. - * @return the number of bytes read - */ - int getBlock(final SeaweedInputStream stream, final long position, final int length, final byte[] buffer) { - // not synchronized, so have to be careful with locking - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("getBlock for file {} position {} thread {}", - stream.getPath(), position, Thread.currentThread().getName()); - } - - waitForProcess(stream, position); - - int bytesRead = 0; - synchronized (this) { - bytesRead = getBlockFromCompletedQueue(stream, position, length, buffer); - } - if (bytesRead > 0) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Done read from Cache for {} position {} length {}", - stream.getPath(), position, bytesRead); - } - return bytesRead; - } - - // otherwise, just say we got nothing - calling thread can do its own read - return 0; - } - - /* - * - * Internal methods - * - */ - - private void waitForProcess(final SeaweedInputStream stream, final long position) { - ReadBuffer readBuf; - synchronized (this) { - clearFromReadAheadQueue(stream, position); - readBuf = getFromList(inProgressList, stream, position); - } - if (readBuf != null) { // if in in-progress queue, then block for it - try { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("got a relevant read buffer for file {} offset {} buffer idx {}", - stream.getPath(), readBuf.getOffset(), readBuf.getBufferindex()); - } - readBuf.getLatch().await(); // blocking wait on the caller stream's thread - // Note on correctness: readBuf gets out of inProgressList only in 1 place: after worker thread - // is done processing it (in doneReading). There, the latch is set after removing the buffer from - // inProgressList. So this latch is safe to be outside the synchronized block. - // Putting it in synchronized would result in a deadlock, since this thread would be holding the lock - // while waiting, so no one will be able to change any state. If this becomes more complex in the future, - // then the latch cane be removed and replaced with wait/notify whenever inProgressList is touched. - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("latch done for file {} buffer idx {} length {}", - stream.getPath(), readBuf.getBufferindex(), readBuf.getLength()); - } - } - } - - /** - * If any buffer in the completedlist can be reclaimed then reclaim it and return the buffer to free list. - * The objective is to find just one buffer - there is no advantage to evicting more than one. - * - * @return whether the eviction succeeeded - i.e., were we able to free up one buffer - */ - private synchronized boolean tryEvict() { - ReadBuffer nodeToEvict = null; - if (completedReadList.size() <= 0) { - return false; // there are no evict-able buffers - } - - // first, try buffers where all bytes have been consumed (approximated as first and last bytes consumed) - for (ReadBuffer buf : completedReadList) { - if (buf.isFirstByteConsumed() && buf.isLastByteConsumed()) { - nodeToEvict = buf; - break; - } - } - if (nodeToEvict != null) { - return evict(nodeToEvict); - } - - // next, try buffers where any bytes have been consumed (may be a bad idea? have to experiment and see) - for (ReadBuffer buf : completedReadList) { - if (buf.isAnyByteConsumed()) { - nodeToEvict = buf; - break; - } - } - - if (nodeToEvict != null) { - return evict(nodeToEvict); - } - - // next, try any old nodes that have not been consumed - long earliestBirthday = Long.MAX_VALUE; - for (ReadBuffer buf : completedReadList) { - if (buf.getTimeStamp() < earliestBirthday) { - nodeToEvict = buf; - earliestBirthday = buf.getTimeStamp(); - } - } - if ((currentTimeMillis() - earliestBirthday > THRESHOLD_AGE_MILLISECONDS) && (nodeToEvict != null)) { - return evict(nodeToEvict); - } - - // nothing can be evicted - return false; - } - - private boolean evict(final ReadBuffer buf) { - freeList.push(buf.getBufferindex()); - completedReadList.remove(buf); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Evicting buffer idx {}; was used for file {} offset {} length {}", - buf.getBufferindex(), buf.getStream().getPath(), buf.getOffset(), buf.getLength()); - } - return true; - } - - private boolean isAlreadyQueued(final SeaweedInputStream stream, final long requestedOffset) { - // returns true if any part of the buffer is already queued - return (isInList(readAheadQueue, stream, requestedOffset) - || isInList(inProgressList, stream, requestedOffset) - || isInList(completedReadList, stream, requestedOffset)); - } - - private boolean isInList(final Collection list, final SeaweedInputStream stream, final long requestedOffset) { - return (getFromList(list, stream, requestedOffset) != null); - } - - private ReadBuffer getFromList(final Collection list, final SeaweedInputStream stream, final long requestedOffset) { - for (ReadBuffer buffer : list) { - if (buffer.getStream() == stream) { - if (buffer.getStatus() == ReadBufferStatus.AVAILABLE - && requestedOffset >= buffer.getOffset() - && requestedOffset < buffer.getOffset() + buffer.getLength()) { - return buffer; - } else if (requestedOffset >= buffer.getOffset() - && requestedOffset < buffer.getOffset() + buffer.getRequestedLength()) { - return buffer; - } - } - } - return null; - } - - private void clearFromReadAheadQueue(final SeaweedInputStream stream, final long requestedOffset) { - ReadBuffer buffer = getFromList(readAheadQueue, stream, requestedOffset); - if (buffer != null) { - readAheadQueue.remove(buffer); - notifyAll(); // lock is held in calling method - freeList.push(buffer.getBufferindex()); - } - } - - private int getBlockFromCompletedQueue(final SeaweedInputStream stream, final long position, final int length, - final byte[] buffer) { - ReadBuffer buf = getFromList(completedReadList, stream, position); - if (buf == null || position >= buf.getOffset() + buf.getLength()) { - return 0; - } - int cursor = (int) (position - buf.getOffset()); - int availableLengthInBuffer = buf.getLength() - cursor; - int lengthToCopy = Math.min(length, availableLengthInBuffer); - System.arraycopy(buf.getBuffer(), cursor, buffer, 0, lengthToCopy); - if (cursor == 0) { - buf.setFirstByteConsumed(true); - } - if (cursor + lengthToCopy == buf.getLength()) { - buf.setLastByteConsumed(true); - } - buf.setAnyByteConsumed(true); - return lengthToCopy; - } - - /* - * - * ReadBufferWorker-thread-facing methods - * - */ - - /** - * ReadBufferWorker thread calls this to get the next buffer that it should work on. - * - * @return {@link ReadBuffer} - * @throws InterruptedException if thread is interrupted - */ - ReadBuffer getNextBlockToRead() throws InterruptedException { - ReadBuffer buffer = null; - synchronized (this) { - //buffer = readAheadQueue.take(); // blocking method - while (readAheadQueue.size() == 0) { - wait(); - } - buffer = readAheadQueue.remove(); - notifyAll(); - if (buffer == null) { - return null; // should never happen - } - buffer.setStatus(ReadBufferStatus.READING_IN_PROGRESS); - inProgressList.add(buffer); - } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("ReadBufferWorker picked file {} for offset {}", - buffer.getStream().getPath(), buffer.getOffset()); - } - return buffer; - } - - /** - * ReadBufferWorker thread calls this method to post completion. - * - * @param buffer the buffer whose read was completed - * @param result the {@link ReadBufferStatus} after the read operation in the worker thread - * @param bytesActuallyRead the number of bytes that the worker thread was actually able to read - */ - void doneReading(final ReadBuffer buffer, final ReadBufferStatus result, final int bytesActuallyRead) { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("ReadBufferWorker completed file {} for offset {} bytes {}", - buffer.getStream().getPath(), buffer.getOffset(), bytesActuallyRead); - } - synchronized (this) { - inProgressList.remove(buffer); - if (result == ReadBufferStatus.AVAILABLE && bytesActuallyRead > 0) { - buffer.setStatus(ReadBufferStatus.AVAILABLE); - buffer.setTimeStamp(currentTimeMillis()); - buffer.setLength(bytesActuallyRead); - completedReadList.add(buffer); - } else { - freeList.push(buffer.getBufferindex()); - // buffer should go out of scope after the end of the calling method in ReadBufferWorker, and eligible for GC - } - } - //outside the synchronized, since anyone receiving a wake-up from the latch must see safe-published results - buffer.getLatch().countDown(); // wake up waiting threads (if any) - } - - /** - * Similar to System.currentTimeMillis, except implemented with System.nanoTime(). - * System.currentTimeMillis can go backwards when system clock is changed (e.g., with NTP time synchronization), - * making it unsuitable for measuring time intervals. nanotime is strictly monotonically increasing per CPU core. - * Note: it is not monotonic across Sockets, and even within a CPU, its only the - * more recent parts which share a clock across all cores. - * - * @return current time in milliseconds - */ - private long currentTimeMillis() { - return System.nanoTime() / 1000 / 1000; - } -} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java deleted file mode 100644 index 6ffbc4644..000000000 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/ReadBufferWorker.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package seaweed.hdfs; - -import java.util.concurrent.CountDownLatch; - -class ReadBufferWorker implements Runnable { - - protected static final CountDownLatch UNLEASH_WORKERS = new CountDownLatch(1); - private int id; - - ReadBufferWorker(final int id) { - this.id = id; - } - - /** - * return the ID of ReadBufferWorker. - */ - public int getId() { - return this.id; - } - - /** - * Waits until a buffer becomes available in ReadAheadQueue. - * Once a buffer becomes available, reads the file specified in it and then posts results back to buffer manager. - * Rinse and repeat. Forever. - */ - public void run() { - try { - UNLEASH_WORKERS.await(); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - } - ReadBufferManager bufferManager = ReadBufferManager.getBufferManager(); - ReadBuffer buffer; - while (true) { - try { - buffer = bufferManager.getNextBlockToRead(); // blocks, until a buffer is available for this thread - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - return; - } - if (buffer != null) { - try { - // do the actual read, from the file. - int bytesRead = buffer.getStream().readRemote(buffer.getOffset(), buffer.getBuffer(), 0, buffer.getRequestedLength()); - bufferManager.doneReading(buffer, ReadBufferStatus.AVAILABLE, bytesRead); // post result back to ReadBufferManager - } catch (Exception ex) { - bufferManager.doneReading(buffer, ReadBufferStatus.READ_FAILED, 0); - } - } - } - } -} diff --git a/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferStatus.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedAbstractFileSystem.java similarity index 64% rename from other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferStatus.java rename to other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedAbstractFileSystem.java index d63674977..e021401aa 100644 --- a/other/java/hdfs2/src/main/java/seaweed/hdfs/ReadBufferStatus.java +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedAbstractFileSystem.java @@ -18,12 +18,18 @@ package seaweed.hdfs; -/** - * The ReadBufferStatus for Rest AbfsClient - */ -public enum ReadBufferStatus { - NOT_AVAILABLE, // buffers sitting in readaheadqueue have this stats - READING_IN_PROGRESS, // reading is in progress on this buffer. Buffer should be in inProgressList - AVAILABLE, // data is available in buffer. It should be in completedList - READ_FAILED // read completed, but failed. +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DelegateToFileSystem; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +public class SeaweedAbstractFileSystem extends DelegateToFileSystem { + + SeaweedAbstractFileSystem(final URI uri, final Configuration conf) + throws IOException, URISyntaxException { + super(uri, new SeaweedFileSystem(), conf, "seaweedfs", false); + } + } diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java index c12da8261..25395db7a 100644 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystem.java @@ -5,31 +5,31 @@ import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import seaweedfs.client.FilerProto; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.util.EnumSet; import java.util.List; import java.util.Map; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; - public class SeaweedFileSystem extends FileSystem { - public static final int FS_SEAWEED_DEFAULT_PORT = 8888; public static final String FS_SEAWEED_FILER_HOST = "fs.seaweed.filer.host"; public static final String FS_SEAWEED_FILER_PORT = "fs.seaweed.filer.port"; + public static final int FS_SEAWEED_DEFAULT_PORT = 8888; + public static final String FS_SEAWEED_BUFFER_SIZE = "fs.seaweed.buffer.size"; + public static final String FS_SEAWEED_REPLICATION = "fs.seaweed.replication"; + public static final String FS_SEAWEED_VOLUME_SERVER_ACCESS = "fs.seaweed.volume.server.access"; + public static final int FS_SEAWEED_DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024; private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystem.class); - private static int BUFFER_SIZE = 16 * 1024 * 1024; private URI uri; private Path workingDirectory = new Path("/"); @@ -60,15 +60,19 @@ public class SeaweedFileSystem extends FileSystem { port = (port == -1) ? FS_SEAWEED_DEFAULT_PORT : port; conf.setInt(FS_SEAWEED_FILER_PORT, port); - conf.setInt(IO_FILE_BUFFER_SIZE_KEY, BUFFER_SIZE); - setConf(conf); this.uri = uri; - seaweedFileSystemStore = new SeaweedFileSystemStore(host, port); + seaweedFileSystemStore = new SeaweedFileSystemStore(host, port, conf); } + @Override + public void close() throws IOException { + super.close(); + this.seaweedFileSystemStore.close(); + } + @Override public FSDataInputStream open(Path path, int bufferSize) throws IOException { @@ -77,8 +81,9 @@ public class SeaweedFileSystem extends FileSystem { path = qualify(path); try { - InputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics, bufferSize); - return new FSDataInputStream(inputStream); + int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); + FSInputStream inputStream = seaweedFileSystemStore.openFileForRead(path, statistics); + return new FSDataInputStream(new BufferedByteBufferReadableInputStream(inputStream, 4 * seaweedBufferSize)); } catch (Exception ex) { LOG.warn("open path: {} bufferSize:{}", path, bufferSize, ex); return null; @@ -94,8 +99,9 @@ public class SeaweedFileSystem extends FileSystem { path = qualify(path); try { - String replicaPlacement = String.format("%03d", replication - 1); - OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, bufferSize, replicaPlacement); + String replicaPlacement = this.getConf().get(FS_SEAWEED_REPLICATION, String.format("%03d", replication - 1)); + int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); + OutputStream outputStream = seaweedFileSystemStore.createFile(path, overwrite, permission, seaweedBufferSize, replicaPlacement); return new FSDataOutputStream(outputStream, statistics); } catch (Exception ex) { LOG.warn("create path: {} bufferSize:{} blockSize:{}", path, bufferSize, blockSize, ex); @@ -105,8 +111,9 @@ public class SeaweedFileSystem extends FileSystem { /** * {@inheritDoc} + * * @throws FileNotFoundException if the parent directory is not present -or - * is not a directory. + * is not a directory. */ @Override public FSDataOutputStream createNonRecursive(Path path, @@ -123,9 +130,10 @@ public class SeaweedFileSystem extends FileSystem { throw new FileAlreadyExistsException("Not a directory: " + parent); } } + int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); return create(path, permission, flags.contains(CreateFlag.OVERWRITE), bufferSize, - replication, blockSize, progress); + replication, seaweedBufferSize, progress); } @Override @@ -135,7 +143,8 @@ public class SeaweedFileSystem extends FileSystem { path = qualify(path); try { - OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, bufferSize, ""); + int seaweedBufferSize = this.getConf().getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); + OutputStream outputStream = seaweedFileSystemStore.createFile(path, false, null, seaweedBufferSize, ""); return new FSDataOutputStream(outputStream, statistics); } catch (Exception ex) { LOG.warn("append path: {} bufferSize:{}", path, bufferSize, ex); @@ -144,7 +153,7 @@ public class SeaweedFileSystem extends FileSystem { } @Override - public boolean rename(Path src, Path dst) { + public boolean rename(Path src, Path dst) throws IOException { LOG.debug("rename path: {} => {}", src, dst); @@ -155,12 +164,13 @@ public class SeaweedFileSystem extends FileSystem { if (src.equals(dst)) { return true; } - FileStatus dstFileStatus = getFileStatus(dst); + FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(dst); - String sourceFileName = src.getName(); Path adjustedDst = dst; - if (dstFileStatus != null) { + if (entry != null) { + FileStatus dstFileStatus = getFileStatus(dst); + String sourceFileName = src.getName(); if (!dstFileStatus.isDirectory()) { return false; } @@ -175,18 +185,20 @@ public class SeaweedFileSystem extends FileSystem { } @Override - public boolean delete(Path path, boolean recursive) { + public boolean delete(Path path, boolean recursive) throws IOException { LOG.debug("delete path: {} recursive:{}", path, recursive); path = qualify(path); - FileStatus fileStatus = getFileStatus(path); + FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(path); - if (fileStatus == null) { + if (entry == null) { return true; } + FileStatus fileStatus = getFileStatus(path); + return seaweedFileSystemStore.deleteEntries(path, fileStatus.isDirectory(), recursive); } @@ -222,9 +234,9 @@ public class SeaweedFileSystem extends FileSystem { path = qualify(path); - FileStatus fileStatus = getFileStatus(path); + FilerProto.Entry entry = seaweedFileSystemStore.lookupEntry(path); - if (fileStatus == null) { + if (entry == null) { UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); return seaweedFileSystemStore.createDirectory(path, currentUser, @@ -233,6 +245,8 @@ public class SeaweedFileSystem extends FileSystem { } + FileStatus fileStatus = getFileStatus(path); + if (fileStatus.isDirectory()) { return true; } else { @@ -241,7 +255,7 @@ public class SeaweedFileSystem extends FileSystem { } @Override - public FileStatus getFileStatus(Path path) { + public FileStatus getFileStatus(Path path) throws IOException { LOG.debug("getFileStatus path: {}", path); @@ -335,9 +349,7 @@ public class SeaweedFileSystem extends FileSystem { @Override public void createSymlink(final Path target, final Path link, - final boolean createParent) throws AccessControlException, - FileAlreadyExistsException, FileNotFoundException, - ParentNotDirectoryException, UnsupportedFileSystemException, + final boolean createParent) throws IOException { // Supporting filesystems should override this method throw new UnsupportedOperationException( diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java index 774c090e8..f4e8c9349 100644 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedFileSystemStore.java @@ -1,5 +1,7 @@ package seaweed.hdfs; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -7,30 +9,43 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import seaweedfs.client.FilerClient; -import seaweedfs.client.FilerGrpcClient; -import seaweedfs.client.FilerProto; -import seaweedfs.client.SeaweedRead; +import seaweedfs.client.*; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import static seaweed.hdfs.SeaweedFileSystem.*; + public class SeaweedFileSystemStore { private static final Logger LOG = LoggerFactory.getLogger(SeaweedFileSystemStore.class); - private FilerGrpcClient filerGrpcClient; private FilerClient filerClient; + private Configuration conf; - public SeaweedFileSystemStore(String host, int port) { + public SeaweedFileSystemStore(String host, int port, Configuration conf) { int grpcPort = 10000 + port; - filerGrpcClient = new FilerGrpcClient(host, grpcPort); - filerClient = new FilerClient(filerGrpcClient); + filerClient = new FilerClient(host, grpcPort); + this.conf = conf; + String volumeServerAccessMode = this.conf.get(FS_SEAWEED_VOLUME_SERVER_ACCESS, "direct"); + if (volumeServerAccessMode.equals("publicUrl")) { + filerClient.setAccessVolumeServerByPublicUrl(); + } else if (volumeServerAccessMode.equals("filerProxy")) { + filerClient.setAccessVolumeServerByFilerProxy(); + } + + } + + public void close() { + try { + this.filerClient.shutdown(); + } catch (InterruptedException e) { + e.printStackTrace(); + } } public static String getParentDirectory(Path path) { @@ -61,9 +76,19 @@ public class SeaweedFileSystemStore { ); } - public FileStatus[] listEntries(final Path path) { + public FileStatus[] listEntries(final Path path) throws IOException { LOG.debug("listEntries path: {}", path); + FileStatus pathStatus = getFileStatus(path); + + if (pathStatus == null) { + return new FileStatus[0]; + } + + if (!pathStatus.isDirectory()) { + return new FileStatus[]{pathStatus}; + } + List fileStatuses = new ArrayList(); List entries = filerClient.listEntries(path.toUri().getPath()); @@ -74,14 +99,16 @@ public class SeaweedFileSystemStore { fileStatuses.add(fileStatus); } + LOG.debug("listEntries path: {} size {}", fileStatuses, fileStatuses.size()); return fileStatuses.toArray(new FileStatus[0]); + } - public FileStatus getFileStatus(final Path path) { + public FileStatus getFileStatus(final Path path) throws IOException { FilerProto.Entry entry = lookupEntry(path); if (entry == null) { - return null; + throw new FileNotFoundException("File does not exist: " + path); } LOG.debug("doGetFileStatus path:{} entry:{}", path, entry); @@ -111,10 +138,10 @@ public class SeaweedFileSystemStore { private FileStatus doGetFileStatus(Path path, FilerProto.Entry entry) { FilerProto.FuseAttributes attributes = entry.getAttributes(); - long length = SeaweedRead.totalSize(entry.getChunksList()); + long length = SeaweedRead.fileSize(entry); boolean isDir = entry.getIsDirectory(); int block_replication = 1; - int blocksize = 512; + int blocksize = this.conf.getInt(FS_SEAWEED_BUFFER_SIZE, FS_SEAWEED_DEFAULT_BUFFER_SIZE); long modification_time = attributes.getMtime() * 1000; // milliseconds long access_time = 0; FsPermission permission = FsPermission.createImmutable((short) attributes.getFileMode()); @@ -124,7 +151,7 @@ public class SeaweedFileSystemStore { modification_time, access_time, permission, owner, group, null, path); } - private FilerProto.Entry lookupEntry(Path path) { + public FilerProto.Entry lookupEntry(Path path) { return filerClient.lookupEntry(getParentDirectory(path), path.getName()); @@ -170,9 +197,10 @@ public class SeaweedFileSystemStore { if (existingEntry != null) { entry = FilerProto.Entry.newBuilder(); entry.mergeFrom(existingEntry); + entry.clearContent(); entry.getAttributesBuilder().setMtime(now); LOG.debug("createFile merged entry path:{} entry:{} from:{}", path, entry, existingEntry); - writePosition = SeaweedRead.totalSize(existingEntry.getChunksList()); + writePosition = SeaweedRead.fileSize(existingEntry); replication = existingEntry.getAttributes().getReplication(); } } @@ -189,30 +217,27 @@ public class SeaweedFileSystemStore { .clearGroupName() .addAllGroupName(Arrays.asList(userGroupInformation.getGroupNames())) ); + SeaweedWrite.writeMeta(filerClient, getParentDirectory(path), entry); } - return new SeaweedOutputStream(filerGrpcClient, path, entry, writePosition, bufferSize, replication); + return new SeaweedHadoopOutputStream(filerClient, path.toString(), entry, writePosition, bufferSize, replication); } - public InputStream openFileForRead(final Path path, FileSystem.Statistics statistics, - int bufferSize) throws IOException { + public FSInputStream openFileForRead(final Path path, FileSystem.Statistics statistics) throws IOException { - LOG.debug("openFileForRead path:{} bufferSize:{}", path, bufferSize); + LOG.debug("openFileForRead path:{}", path); - int readAheadQueueDepth = 2; FilerProto.Entry entry = lookupEntry(path); if (entry == null) { throw new FileNotFoundException("read non-exist file " + path); } - return new SeaweedInputStream(filerGrpcClient, + return new SeaweedHadoopInputStream(filerClient, statistics, path.toUri().getPath(), - entry, - bufferSize, - readAheadQueueDepth); + entry); } public void setOwner(Path path, String owner, String group) { diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java new file mode 100644 index 000000000..f26eae597 --- /dev/null +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopInputStream.java @@ -0,0 +1,150 @@ +package seaweed.hdfs; + +// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream + +import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.fs.FSInputStream; +import org.apache.hadoop.fs.FileSystem.Statistics; +import seaweedfs.client.FilerClient; +import seaweedfs.client.FilerProto; +import seaweedfs.client.SeaweedInputStream; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; + +public class SeaweedHadoopInputStream extends FSInputStream implements ByteBufferReadable { + + private final SeaweedInputStream seaweedInputStream; + private final Statistics statistics; + + public SeaweedHadoopInputStream( + final FilerClient filerClient, + final Statistics statistics, + final String path, + final FilerProto.Entry entry) throws IOException { + this.seaweedInputStream = new SeaweedInputStream(filerClient, path, entry); + this.statistics = statistics; + } + + @Override + public int read() throws IOException { + return seaweedInputStream.read(); + } + + @Override + public int read(final byte[] b, final int off, final int len) throws IOException { + return seaweedInputStream.read(b, off, len); + } + + // implement ByteBufferReadable + @Override + public synchronized int read(ByteBuffer buf) throws IOException { + int bytesRead = seaweedInputStream.read(buf); + + if (bytesRead > 0) { + if (statistics != null) { + statistics.incrementBytesRead(bytesRead); + } + } + + return bytesRead; + } + + /** + * Seek to given position in stream. + * + * @param n position to seek to + * @throws IOException if there is an error + * @throws EOFException if attempting to seek past end of file + */ + @Override + public synchronized void seek(long n) throws IOException { + seaweedInputStream.seek(n); + } + + @Override + public synchronized long skip(long n) throws IOException { + return seaweedInputStream.skip(n); + } + + /** + * Return the size of the remaining available bytes + * if the size is less than or equal to {@link Integer#MAX_VALUE}, + * otherwise, return {@link Integer#MAX_VALUE}. + *

+ * This is to match the behavior of DFSInputStream.available(), + * which some clients may rely on (HBase write-ahead log reading in + * particular). + */ + @Override + public synchronized int available() throws IOException { + return seaweedInputStream.available(); + } + + /** + * Returns the length of the file that this stream refers to. Note that the length returned is the length + * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file, + * they wont be reflected in the returned length. + * + * @return length of the file. + * @throws IOException if the stream is closed + */ + public long length() throws IOException { + return seaweedInputStream.length(); + } + + /** + * Return the current offset from the start of the file + * + * @throws IOException throws {@link IOException} if there is an error + */ + @Override + public synchronized long getPos() throws IOException { + return seaweedInputStream.getPos(); + } + + /** + * Seeks a different copy of the data. Returns true if + * found a new source, false otherwise. + * + * @throws IOException throws {@link IOException} if there is an error + */ + @Override + public boolean seekToNewSource(long l) throws IOException { + return false; + } + + @Override + public synchronized void close() throws IOException { + seaweedInputStream.close(); + } + + /** + * Not supported by this stream. Throws {@link UnsupportedOperationException} + * + * @param readlimit ignored + */ + @Override + public synchronized void mark(int readlimit) { + throw new UnsupportedOperationException("mark()/reset() not supported on this stream"); + } + + /** + * Not supported by this stream. Throws {@link UnsupportedOperationException} + */ + @Override + public synchronized void reset() throws IOException { + throw new UnsupportedOperationException("mark()/reset() not supported on this stream"); + } + + /** + * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false. + * + * @return always {@code false} + */ + @Override + public boolean markSupported() { + return false; + } +} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java new file mode 100644 index 000000000..1740312fe --- /dev/null +++ b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedHadoopOutputStream.java @@ -0,0 +1,64 @@ +package seaweed.hdfs; + +// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream + +import org.apache.hadoop.fs.StreamCapabilities; +import org.apache.hadoop.fs.Syncable; +import seaweedfs.client.FilerClient; +import seaweedfs.client.FilerProto; +import seaweedfs.client.SeaweedOutputStream; + +import java.io.IOException; +import java.util.Locale; + +public class SeaweedHadoopOutputStream extends SeaweedOutputStream implements Syncable, StreamCapabilities { + + public SeaweedHadoopOutputStream(FilerClient filerClient, final String path, FilerProto.Entry.Builder entry, + final long position, final int bufferSize, final String replication) { + super(filerClient, path, entry, position, bufferSize, replication); + } + + /** + * Similar to posix fsync, flush out the data in client's user buffer + * all the way to the disk device (but the disk may have it in its cache). + * + * @throws IOException if error occurs + */ + @Override + public void hsync() throws IOException { + if (supportFlush) { + flushInternal(); + } + } + + /** + * Flush out the data in client's user buffer. After the return of + * this call, new readers will see the data. + * + * @throws IOException if any error occurs + */ + @Override + public void hflush() throws IOException { + if (supportFlush) { + flushInternal(); + } + } + + /** + * Query the stream for a specific capability. + * + * @param capability string to query the stream support for. + * @return true for hsync and hflush. + */ + @Override + public boolean hasCapability(String capability) { + switch (capability.toLowerCase(Locale.ENGLISH)) { + case StreamCapabilities.HSYNC: + case StreamCapabilities.HFLUSH: + return supportFlush; + default: + return false; + } + } + +} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java deleted file mode 100644 index 90c14c772..000000000 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedInputStream.java +++ /dev/null @@ -1,371 +0,0 @@ -package seaweed.hdfs; - -// based on org.apache.hadoop.fs.azurebfs.services.AbfsInputStream - -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.fs.FSInputStream; -import org.apache.hadoop.fs.FileSystem.Statistics; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import seaweedfs.client.FilerGrpcClient; -import seaweedfs.client.FilerProto; -import seaweedfs.client.SeaweedRead; - -import java.io.EOFException; -import java.io.IOException; -import java.util.List; - -public class SeaweedInputStream extends FSInputStream { - - private static final Logger LOG = LoggerFactory.getLogger(SeaweedInputStream.class); - - private final FilerGrpcClient filerGrpcClient; - private final Statistics statistics; - private final String path; - private final FilerProto.Entry entry; - private final List visibleIntervalList; - private final long contentLength; - private final int bufferSize; // default buffer size - private final int readAheadQueueDepth; // initialized in constructor - private final boolean readAheadEnabled; // whether enable readAhead; - - private byte[] buffer = null; // will be initialized on first use - - private long fCursor = 0; // cursor of buffer within file - offset of next byte to read from remote server - private long fCursorAfterLastRead = -1; - private int bCursor = 0; // cursor of read within buffer - offset of next byte to be returned from buffer - private int limit = 0; // offset of next byte to be read into buffer from service (i.e., upper marker+1 - // of valid bytes in buffer) - private boolean closed = false; - - public SeaweedInputStream( - final FilerGrpcClient filerGrpcClient, - final Statistics statistics, - final String path, - final FilerProto.Entry entry, - final int bufferSize, - final int readAheadQueueDepth) { - this.filerGrpcClient = filerGrpcClient; - this.statistics = statistics; - this.path = path; - this.entry = entry; - this.contentLength = SeaweedRead.totalSize(entry.getChunksList()); - this.bufferSize = bufferSize; - this.readAheadQueueDepth = (readAheadQueueDepth >= 0) ? readAheadQueueDepth : Runtime.getRuntime().availableProcessors(); - this.readAheadEnabled = true; - - this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(entry.getChunksList()); - - LOG.debug("new path:{} entry:{} visibleIntervalList:{}", path, entry, visibleIntervalList); - - } - - public String getPath() { - return path; - } - - @Override - public int read() throws IOException { - byte[] b = new byte[1]; - int numberOfBytesRead = read(b, 0, 1); - if (numberOfBytesRead < 0) { - return -1; - } else { - return (b[0] & 0xFF); - } - } - - @Override - public synchronized int read(final byte[] b, final int off, final int len) throws IOException { - int currentOff = off; - int currentLen = len; - int lastReadBytes; - int totalReadBytes = 0; - do { - lastReadBytes = readOneBlock(b, currentOff, currentLen); - if (lastReadBytes > 0) { - currentOff += lastReadBytes; - currentLen -= lastReadBytes; - totalReadBytes += lastReadBytes; - } - if (currentLen <= 0 || currentLen > b.length - currentOff) { - break; - } - } while (lastReadBytes > 0); - return totalReadBytes > 0 ? totalReadBytes : lastReadBytes; - } - - private int readOneBlock(final byte[] b, final int off, final int len) throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } - - Preconditions.checkNotNull(b); - - if (len == 0) { - return 0; - } - - if (this.available() == 0) { - return -1; - } - - if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } - - //If buffer is empty, then fill the buffer. - if (bCursor == limit) { - //If EOF, then return -1 - if (fCursor >= contentLength) { - return -1; - } - - long bytesRead = 0; - //reset buffer to initial state - i.e., throw away existing data - bCursor = 0; - limit = 0; - if (buffer == null) { - buffer = new byte[bufferSize]; - } - - // Enable readAhead when reading sequentially - if (-1 == fCursorAfterLastRead || fCursorAfterLastRead == fCursor || b.length >= bufferSize) { - bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false); - } else { - bytesRead = readInternal(fCursor, buffer, 0, b.length, true); - } - - if (bytesRead == -1) { - return -1; - } - - limit += bytesRead; - fCursor += bytesRead; - fCursorAfterLastRead = fCursor; - } - - //If there is anything in the buffer, then return lesser of (requested bytes) and (bytes in buffer) - //(bytes returned may be less than requested) - int bytesRemaining = limit - bCursor; - int bytesToRead = Math.min(len, bytesRemaining); - System.arraycopy(buffer, bCursor, b, off, bytesToRead); - bCursor += bytesToRead; - if (statistics != null) { - statistics.incrementBytesRead(bytesToRead); - } - return bytesToRead; - } - - - private int readInternal(final long position, final byte[] b, final int offset, final int length, - final boolean bypassReadAhead) throws IOException { - if (readAheadEnabled && !bypassReadAhead) { - // try reading from read-ahead - if (offset != 0) { - throw new IllegalArgumentException("readahead buffers cannot have non-zero buffer offsets"); - } - int receivedBytes; - - // queue read-aheads - int numReadAheads = this.readAheadQueueDepth; - long nextSize; - long nextOffset = position; - while (numReadAheads > 0 && nextOffset < contentLength) { - nextSize = Math.min((long) bufferSize, contentLength - nextOffset); - ReadBufferManager.getBufferManager().queueReadAhead(this, nextOffset, (int) nextSize); - nextOffset = nextOffset + nextSize; - numReadAheads--; - } - - // try reading from buffers first - receivedBytes = ReadBufferManager.getBufferManager().getBlock(this, position, length, b); - if (receivedBytes > 0) { - return receivedBytes; - } - - // got nothing from read-ahead, do our own read now - receivedBytes = readRemote(position, b, offset, length); - return receivedBytes; - } else { - return readRemote(position, b, offset, length); - } - } - - int readRemote(long position, byte[] b, int offset, int length) throws IOException { - if (position < 0) { - throw new IllegalArgumentException("attempting to read from negative offset"); - } - if (position >= contentLength) { - return -1; // Hadoop prefers -1 to EOFException - } - if (b == null) { - throw new IllegalArgumentException("null byte array passed in to read() method"); - } - if (offset >= b.length) { - throw new IllegalArgumentException("offset greater than length of array"); - } - if (length < 0) { - throw new IllegalArgumentException("requested read length is less than zero"); - } - if (length > (b.length - offset)) { - throw new IllegalArgumentException("requested read length is more than will fit after requested offset in buffer"); - } - - long bytesRead = SeaweedRead.read(filerGrpcClient, visibleIntervalList, position, b, offset, length); - if (bytesRead > Integer.MAX_VALUE) { - throw new IOException("Unexpected Content-Length"); - } - return (int) bytesRead; - } - - /** - * Seek to given position in stream. - * - * @param n position to seek to - * @throws IOException if there is an error - * @throws EOFException if attempting to seek past end of file - */ - @Override - public synchronized void seek(long n) throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } - if (n < 0) { - throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK); - } - if (n > contentLength) { - throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF); - } - - if (n >= fCursor - limit && n <= fCursor) { // within buffer - bCursor = (int) (n - (fCursor - limit)); - return; - } - - // next read will read from here - fCursor = n; - - //invalidate buffer - limit = 0; - bCursor = 0; - } - - @Override - public synchronized long skip(long n) throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } - long currentPos = getPos(); - if (currentPos == contentLength) { - if (n > 0) { - throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF); - } - } - long newPos = currentPos + n; - if (newPos < 0) { - newPos = 0; - n = newPos - currentPos; - } - if (newPos > contentLength) { - newPos = contentLength; - n = newPos - currentPos; - } - seek(newPos); - return n; - } - - /** - * Return the size of the remaining available bytes - * if the size is less than or equal to {@link Integer#MAX_VALUE}, - * otherwise, return {@link Integer#MAX_VALUE}. - *

- * This is to match the behavior of DFSInputStream.available(), - * which some clients may rely on (HBase write-ahead log reading in - * particular). - */ - @Override - public synchronized int available() throws IOException { - if (closed) { - throw new IOException( - FSExceptionMessages.STREAM_IS_CLOSED); - } - final long remaining = this.contentLength - this.getPos(); - return remaining <= Integer.MAX_VALUE - ? (int) remaining : Integer.MAX_VALUE; - } - - /** - * Returns the length of the file that this stream refers to. Note that the length returned is the length - * as of the time the Stream was opened. Specifically, if there have been subsequent appends to the file, - * they wont be reflected in the returned length. - * - * @return length of the file. - * @throws IOException if the stream is closed - */ - public long length() throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } - return contentLength; - } - - /** - * Return the current offset from the start of the file - * - * @throws IOException throws {@link IOException} if there is an error - */ - @Override - public synchronized long getPos() throws IOException { - if (closed) { - throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - } - return fCursor - limit + bCursor; - } - - /** - * Seeks a different copy of the data. Returns true if - * found a new source, false otherwise. - * - * @throws IOException throws {@link IOException} if there is an error - */ - @Override - public boolean seekToNewSource(long l) throws IOException { - return false; - } - - @Override - public synchronized void close() throws IOException { - closed = true; - buffer = null; // de-reference the buffer so it can be GC'ed sooner - } - - /** - * Not supported by this stream. Throws {@link UnsupportedOperationException} - * - * @param readlimit ignored - */ - @Override - public synchronized void mark(int readlimit) { - throw new UnsupportedOperationException("mark()/reset() not supported on this stream"); - } - - /** - * Not supported by this stream. Throws {@link UnsupportedOperationException} - */ - @Override - public synchronized void reset() throws IOException { - throw new UnsupportedOperationException("mark()/reset() not supported on this stream"); - } - - /** - * gets whether mark and reset are supported by {@code ADLFileInputStream}. Always returns false. - * - * @return always {@code false} - */ - @Override - public boolean markSupported() { - return false; - } -} diff --git a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java b/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java deleted file mode 100644 index 4f307ff96..000000000 --- a/other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java +++ /dev/null @@ -1,335 +0,0 @@ -package seaweed.hdfs; - -// adapted from org.apache.hadoop.fs.azurebfs.services.AbfsOutputStream - -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.StreamCapabilities; -import org.apache.hadoop.fs.Syncable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import seaweedfs.client.FilerGrpcClient; -import seaweedfs.client.FilerProto; -import seaweedfs.client.SeaweedWrite; - -import java.io.IOException; -import java.io.InterruptedIOException; -import java.io.OutputStream; -import java.util.Locale; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentLinkedDeque; -import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory; - -public class SeaweedOutputStream extends OutputStream implements Syncable, StreamCapabilities { - - private static final Logger LOG = LoggerFactory.getLogger(SeaweedOutputStream.class); - - private final FilerGrpcClient filerGrpcClient; - private final Path path; - private final int bufferSize; - private final int maxConcurrentRequestCount; - private final ThreadPoolExecutor threadExecutor; - private final ExecutorCompletionService completionService; - private FilerProto.Entry.Builder entry; - private long position; - private boolean closed; - private boolean supportFlush = true; - private volatile IOException lastError; - private long lastFlushOffset; - private long lastTotalAppendOffset = 0; - private byte[] buffer; - private int bufferIndex; - private ConcurrentLinkedDeque writeOperations; - private String replication = "000"; - - public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry, - final long position, final int bufferSize, final String replication) { - this.filerGrpcClient = filerGrpcClient; - this.replication = replication; - this.path = path; - this.position = position; - this.closed = false; - this.lastError = null; - this.lastFlushOffset = 0; - this.bufferSize = bufferSize; - this.buffer = new byte[bufferSize]; - this.bufferIndex = 0; - this.writeOperations = new ConcurrentLinkedDeque<>(); - - this.maxConcurrentRequestCount = 4 * Runtime.getRuntime().availableProcessors(); - - this.threadExecutor - = new ThreadPoolExecutor(maxConcurrentRequestCount, - maxConcurrentRequestCount, - 10L, - TimeUnit.SECONDS, - new LinkedBlockingQueue()); - this.completionService = new ExecutorCompletionService<>(this.threadExecutor); - - this.entry = entry; - - } - - private synchronized void flushWrittenBytesToServiceInternal(final long offset) throws IOException { - - LOG.debug("SeaweedWrite.writeMeta path: {} entry:{}", path, entry); - - try { - SeaweedWrite.writeMeta(filerGrpcClient, getParentDirectory(path), entry); - } catch (Exception ex) { - throw new IOException(ex); - } - this.lastFlushOffset = offset; - } - - @Override - public void write(final int byteVal) throws IOException { - write(new byte[]{(byte) (byteVal & 0xFF)}); - } - - @Override - public synchronized void write(final byte[] data, final int off, final int length) - throws IOException { - maybeThrowLastError(); - - Preconditions.checkArgument(data != null, "null data"); - - if (off < 0 || length < 0 || length > data.length - off) { - throw new IndexOutOfBoundsException(); - } - - int currentOffset = off; - int writableBytes = bufferSize - bufferIndex; - int numberOfBytesToWrite = length; - - while (numberOfBytesToWrite > 0) { - if (writableBytes <= numberOfBytesToWrite) { - System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes); - bufferIndex += writableBytes; - writeCurrentBufferToService(); - currentOffset += writableBytes; - numberOfBytesToWrite = numberOfBytesToWrite - writableBytes; - } else { - System.arraycopy(data, currentOffset, buffer, bufferIndex, numberOfBytesToWrite); - bufferIndex += numberOfBytesToWrite; - numberOfBytesToWrite = 0; - } - - writableBytes = bufferSize - bufferIndex; - } - } - - /** - * Flushes this output stream and forces any buffered output bytes to be - * written out. If any data remains in the payload it is committed to the - * service. Data is queued for writing and forced out to the service - * before the call returns. - */ - @Override - public void flush() throws IOException { - if (supportFlush) { - flushInternalAsync(); - } - } - - /** - * Similar to posix fsync, flush out the data in client's user buffer - * all the way to the disk device (but the disk may have it in its cache). - * - * @throws IOException if error occurs - */ - @Override - public void hsync() throws IOException { - if (supportFlush) { - flushInternal(); - } - } - - /** - * Flush out the data in client's user buffer. After the return of - * this call, new readers will see the data. - * - * @throws IOException if any error occurs - */ - @Override - public void hflush() throws IOException { - if (supportFlush) { - flushInternal(); - } - } - - /** - * Query the stream for a specific capability. - * - * @param capability string to query the stream support for. - * @return true for hsync and hflush. - */ - @Override - public boolean hasCapability(String capability) { - switch (capability.toLowerCase(Locale.ENGLISH)) { - case StreamCapabilities.HSYNC: - case StreamCapabilities.HFLUSH: - return supportFlush; - default: - return false; - } - } - - /** - * Force all data in the output stream to be written to Azure storage. - * Wait to return until this is complete. Close the access to the stream and - * shutdown the upload thread pool. - * If the blob was created, its lease will be released. - * Any error encountered caught in threads and stored will be rethrown here - * after cleanup. - */ - @Override - public synchronized void close() throws IOException { - if (closed) { - return; - } - - LOG.debug("close path: {}", path); - try { - flushInternal(); - threadExecutor.shutdown(); - } finally { - lastError = new IOException(FSExceptionMessages.STREAM_IS_CLOSED); - buffer = null; - bufferIndex = 0; - closed = true; - writeOperations.clear(); - if (!threadExecutor.isShutdown()) { - threadExecutor.shutdownNow(); - } - } - } - - private synchronized void writeCurrentBufferToService() throws IOException { - if (bufferIndex == 0) { - return; - } - - final byte[] bytes = buffer; - final int bytesLength = bufferIndex; - - buffer = new byte[bufferSize]; - bufferIndex = 0; - final long offset = position; - position += bytesLength; - - if (threadExecutor.getQueue().size() >= maxConcurrentRequestCount * 2) { - waitForTaskToComplete(); - } - - final Future job = completionService.submit(new Callable() { - @Override - public Void call() throws Exception { - // originally: client.append(path, offset, bytes, 0, bytesLength); - SeaweedWrite.writeData(entry, replication, filerGrpcClient, offset, bytes, 0, bytesLength); - return null; - } - }); - - writeOperations.add(new WriteOperation(job, offset, bytesLength)); - - // Try to shrink the queue - shrinkWriteOperationQueue(); - } - - private void waitForTaskToComplete() throws IOException { - boolean completed; - for (completed = false; completionService.poll() != null; completed = true) { - // keep polling until there is no data - } - - if (!completed) { - try { - completionService.take(); - } catch (InterruptedException e) { - lastError = (IOException) new InterruptedIOException(e.toString()).initCause(e); - throw lastError; - } - } - } - - private void maybeThrowLastError() throws IOException { - if (lastError != null) { - throw lastError; - } - } - - /** - * Try to remove the completed write operations from the beginning of write - * operation FIFO queue. - */ - private synchronized void shrinkWriteOperationQueue() throws IOException { - try { - while (writeOperations.peek() != null && writeOperations.peek().task.isDone()) { - writeOperations.peek().task.get(); - lastTotalAppendOffset += writeOperations.peek().length; - writeOperations.remove(); - } - } catch (Exception e) { - lastError = new IOException(e); - throw lastError; - } - } - - private synchronized void flushInternal() throws IOException { - maybeThrowLastError(); - writeCurrentBufferToService(); - flushWrittenBytesToService(); - } - - private synchronized void flushInternalAsync() throws IOException { - maybeThrowLastError(); - writeCurrentBufferToService(); - flushWrittenBytesToServiceAsync(); - } - - private synchronized void flushWrittenBytesToService() throws IOException { - for (WriteOperation writeOperation : writeOperations) { - try { - writeOperation.task.get(); - } catch (Exception ex) { - lastError = new IOException(ex); - throw lastError; - } - } - LOG.debug("flushWrittenBytesToService: {} position:{}", path, position); - flushWrittenBytesToServiceInternal(position); - } - - private synchronized void flushWrittenBytesToServiceAsync() throws IOException { - shrinkWriteOperationQueue(); - - if (this.lastTotalAppendOffset > this.lastFlushOffset) { - this.flushWrittenBytesToServiceInternal(this.lastTotalAppendOffset); - } - } - - private static class WriteOperation { - private final Future task; - private final long startOffset; - private final long length; - - WriteOperation(final Future task, final long startOffset, final long length) { - Preconditions.checkNotNull(task, "task"); - Preconditions.checkArgument(startOffset >= 0, "startOffset"); - Preconditions.checkArgument(length >= 0, "length"); - - this.task = task; - this.startOffset = startOffset; - this.length = length; - } - } - -} diff --git a/other/java/s3copier/pom.xml b/other/java/s3copier/pom.xml index f8cb9e91c..c3ff30932 100644 --- a/other/java/s3copier/pom.xml +++ b/other/java/s3copier/pom.xml @@ -28,7 +28,7 @@ junit junit - 3.8.1 + 4.13.1 test diff --git a/other/metrics/grafana_seaweedfs.json b/other/metrics/grafana_seaweedfs.json new file mode 100644 index 000000000..074a3531f --- /dev/null +++ b/other/metrics/grafana_seaweedfs.json @@ -0,0 +1,1856 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS-DEV", + "label": "prometheus-dev", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "4.6.2" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "${DS_PROMETHEUS-DEV}", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": 10423, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "refresh": "30s", + "rows": [ + { + "collapse": true, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 46, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 90th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 49, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 45, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filer_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Request Duration 99th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(SeaweedFS_filer_request_total[1m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer QPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer", + "titleSize": "h6" + }, + { + "collapse": false, + "height": 250, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 56, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 Request Duration 90th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 57, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 Request Duration 99th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 55, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(SeaweedFS_s3_request_total[1m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "S3 API QPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "hideTimeOverride": false, + "id": 59, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "minSpan": 12, + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (type) (SeaweedFS_s3_request_total{type=~'PUT|COPY|POST|LIST'})*0.000005", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}} requests", + "refId": "A", + "step": 30 + }, + { + "expr": "sum (SeaweedFS_s3_request_total{type=~'PUT|COPY|POST|LIST'})*0.000005", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "All PUT, COPY, POST, LIST", + "refId": "C", + "step": 30 + }, + { + "expr": "sum (SeaweedFS_s3_request_total{type!~'PUT|COPY|POST|LIST'})*0.0000004", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "GET and all other", + "refId": "B" + }, + { + "expr": "sum by (type) (SeaweedFS_s3_request_total{type!~'PUT|COPY|POST|LIST'})*0.0000004", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{type}} requests", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": "1M", + "timeShift": null, + "title": "S3 API Monthly Cost if on AWS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "currencyUSD", + "label": "Cost in US$", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "currencyUSD", + "label": "Write Cost", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "S3 Gateway", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 252, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 47, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le, exported_instance))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{exported_instance}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 40, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_volumeServer_request_total[1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Server QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "fill": 1, + "id": 48, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_volumes) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{collection}} {{type}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_max_volumes)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Volume Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "fill": 1, + "id": 50, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (collection, type)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{collection}} {{type}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Collection and Type", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "fill": 1, + "id": 51, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size) by (exported_instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{exported_instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Used Disk Space by Host", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Volume Server", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 251, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_filerStore_request_total [1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Store QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Store", + "titleSize": "h6" + }, + { + "collapse": true, + "height": 242, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 52, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_memstats_alloc_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "bytes allocated", + "refId": "B" + }, + { + "expr": "rate(go_memstats_alloc_bytes_total{exported_job=\"filer\"}[30s])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "alloc rate", + "refId": "A" + }, + { + "expr": "go_memstats_stack_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "stack inuse", + "refId": "C" + }, + { + "expr": "go_memstats_heap_inuse_bytes{exported_job=\"filer\"}", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "heap inuse", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Memory Stats", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_gc_duration_seconds{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{quantile}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go GC duration quantiles", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS-DEV}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "id": 53, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{exported_job=\"filer\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{exported_instance}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Filer Go Routines", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Filer Instances", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30d", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "SeaweedFS", + "version": 2 +} diff --git a/other/metrics/grafana_seaweedfs_k8s.json b/other/metrics/grafana_seaweedfs_k8s.json new file mode 100644 index 000000000..348198e52 --- /dev/null +++ b/other/metrics/grafana_seaweedfs_k8s.json @@ -0,0 +1,2362 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "$DS_PROMETHEUS", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": 10423, + "graphTooltip": 0, + "id": 3690, + "iteration": 1602763266349, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 60, + "panels": [], + "title": "S3 api", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 63, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "maxPerRow": 1, + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_s3_request_total{namespace=\"$namespace\",service=~\"$service-api\",type=~\"$method\"}[1m])) by (code)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{code}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "S3 QPS by statusCode", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 1 + }, + "hiddenSeries": false, + "id": 62, + "legend": { + "alignAsTable": false, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "maxPerRow": 1, + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_s3_request_total{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (type)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "S3 QPS by method", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 68, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\",type=~\"$method\"}[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "S3 Request Duration 80th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 8 + }, + "hiddenSeries": false, + "id": 67, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\", type=~\"$method\"}[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "S3 Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 8 + }, + "hiddenSeries": false, + "id": 65, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_s3_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\", type=~\"$method\"}[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "S3 Request Duration 99th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 15 + }, + "id": 55, + "panels": [], + "repeat": null, + "title": "Filer", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 16 + }, + "hiddenSeries": false, + "id": 46, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.80, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le, type))", + "format": "time_series", + "hide": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Filer Request Duration 80th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 16 + }, + "hiddenSeries": false, + "id": 49, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 16 + }, + "hiddenSeries": false, + "id": 66, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le))", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "average", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(SeaweedFS_filer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le, type))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B", + "step": 60 + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 2, + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Filer Request Duration 95th percentile", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 23 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 250, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "maxPerRow": 1, + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(SeaweedFS_filer_request_total{namespace=\"$namespace\",service=~\"$service-api\"}[1m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 30 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Filer QPS", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 30 + }, + "id": 56, + "panels": [], + "repeat": null, + "title": "Volume Server", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 31 + }, + "hiddenSeries": false, + "id": 47, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-volume\"}[1m])) by (le, exported_instance))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{exported_instance}}", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_volumeServer_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-volume\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "average", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Volume Server Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 31 + }, + "hiddenSeries": false, + "id": 40, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_volumeServer_request_total{namespace=\"$namespace\",service=~\"$service-volume\"}[1m])) by (type)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Volume Server QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 38 + }, + "hiddenSeries": false, + "id": 48, + "legend": { + "avg": false, + "current": true, + "max": false, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": true, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_volumes{namespace=\"$namespace\",service=~\"$service-volume\"}) by (collection, type)", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{collection}} {{type}}", + "refId": "A" + }, + { + "expr": "sum(max(SeaweedFS_volumeServer_max_volumes{namespace=\"$namespace\",service=~\"$service-volume\"}) by (pod))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + }, + { + "expr": "sum(max(SeaweedFS_volumeServer_read_only_volumes{namespace=\"$namespace\",service=~\"$service-volume\"}) by (pod))", + "interval": "", + "legendFormat": "Read only", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Volume Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 45 + }, + "hiddenSeries": false, + "id": 50, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size{namespace=\"$namespace\",service=~\"$service-volume\"}) by (collection, type)", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{collection}} {{type}}", + "refId": "A" + }, + { + "expr": "sum(SeaweedFS_volumeServer_total_disk_size{namespace=\"$namespace\",service=~\"$service-volume\"})", + "format": "time_series", + "hide": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Used Disk Space by Collection and Type", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 52 + }, + "hiddenSeries": false, + "id": 51, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(max(SeaweedFS_volumeServer_total_disk_size{namespace=\"$namespace\",service=~\"$service-volume\"}) by (collection,pod)) by (pod)", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Used Disk Space by Host", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 59 + }, + "id": 57, + "panels": [], + "repeat": null, + "title": "Filer Store", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 60 + }, + "hiddenSeries": false, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(SeaweedFS_filerStore_request_seconds_bucket{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (le, type))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Filer Store Request Duration 99th percentile", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 60 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(SeaweedFS_filerStore_request_total{namespace=\"$namespace\",service=~\"$service-api\"}[1m])) by (type)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Filer Store QPS", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 67 + }, + "id": 58, + "panels": [], + "repeat": null, + "title": "Filer Instances", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 68 + }, + "hiddenSeries": false, + "id": 52, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_memstats_alloc_bytes{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "bytes allocated", + "refId": "B" + }, + { + "expr": "rate(go_memstats_alloc_bytes_total{namespace=~\"$namespace\", endpoint=\"swfs-.*-metrics\"}[30s])", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "alloc rate", + "refId": "A" + }, + { + "expr": "go_memstats_stack_inuse_bytes{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}", + "format": "time_series", + "hide": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "stack inuse", + "refId": "C" + }, + { + "expr": "go_memstats_heap_inuse_bytes{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}", + "format": "time_series", + "hide": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "heap inuse", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Filer Go Memory Stats", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 68 + }, + "hiddenSeries": false, + "id": 54, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_gc_duration_seconds{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{quantile}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Filer Go GC duration quantiles", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$DS_PROMETHEUS", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 75 + }, + "hiddenSeries": false, + "id": 53, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{namespace=~\"$namespace\", endpoint=\"swfs-filer-metrics\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Filer Go Routines", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "30s", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "clickhouse-prom", + "value": "clickhouse-prom" + }, + "hide": 0, + "includeAll": false, + "label": "Datasource", + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "s3", + "value": "s3" + }, + "datasource": "$DS_PROMETHEUS", + "definition": "label_values({endpoint=\"swfs-filer-metrics\"}, namespace)", + "hide": 0, + "includeAll": false, + "label": "namespace", + "multi": false, + "name": "namespace", + "options": [ + { + "selected": true, + "text": "s3", + "value": "s3" + } + ], + "query": "label_values({endpoint=\"swfs-filer-metrics\"}, namespace)", + "refresh": 0, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "", + "current": { + "selected": true, + "text": "fast", + "value": "fast" + }, + "datasource": "$DS_PROMETHEUS", + "definition": "label_values({namespace=\"$namespace\"}, service)", + "hide": 0, + "includeAll": true, + "label": "service", + "multi": false, + "name": "service", + "options": [ + { + "selected": false, + "text": "All", + "value": "$__all" + }, + { + "selected": true, + "text": "fast", + "value": "fast" + }, + { + "selected": false, + "text": "slow", + "value": "slow" + } + ], + "query": "label_values({namespace=\"$namespace\"}, service)", + "refresh": 0, + "regex": "/(\\w+)-master/", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": "$DS_PROMETHEUS", + "definition": "label_values(SeaweedFS_s3_request_total{namespace=\"$namespace\"}, type)", + "hide": 0, + "includeAll": true, + "label": "method", + "multi": false, + "name": "method", + "options": [ + { + "selected": true, + "text": "All", + "value": "$__all" + }, + { + "selected": false, + "text": "DELETE", + "value": "DELETE" + }, + { + "selected": false, + "text": "GET", + "value": "GET" + }, + { + "selected": false, + "text": "LIST", + "value": "LIST" + }, + { + "selected": false, + "text": "POST", + "value": "POST" + }, + { + "selected": false, + "text": "PUT", + "value": "PUT" + } + ], + "query": "label_values(SeaweedFS_s3_request_total{namespace=\"$namespace\"}, type)", + "refresh": 0, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-12h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "SeaweedFS", + "version": 2 +} diff --git a/snap/README.md b/snap/README.md new file mode 100644 index 000000000..5752bd4af --- /dev/null +++ b/snap/README.md @@ -0,0 +1,49 @@ +Hi + +This PR adds support for building a snap package of seaweedfs. Snaps are cross distro Linux software packages. One snap can be installed on Ubuntu all supported LTS and non LTS releases from 14.04 onward. Additionally they can installed on Debian, Manjaro, Fedora, OpenSUSE and others. Making a snap of seaweedfs enables you to provide automatic updates on your schedule to your users via the snap store. + +If accepted, you can use snapcraft locally, a CI system such as travis or circle-ci, or our free build system (build.snapcraft.io) to create snaps and upload to the store (snapcraft.io/store). The store supports + +To test this PR locally, I used an Ubuntu 16.04 VM, with the following steps. + +``` +snap install snapcraft --classic +git clone https://github.com/popey/seaweedfs +cd seaweedfs +git checkout add-snapcraft +snapcraft +``` + +The generated a .snap file from the tip of master (I could have checked out a stable release instead). It can be installed with:- + + snap install seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --dangerous + +(the --dangerous is necessary because we’re installing an app which hasn’t gone through the snap store review process) + +Once installed, the (namespaced) weed command can be executed. If you accept this and land the snap in the store, we can request an ‘alias’ so users can use the ‘weed’ command rather than the namespaced ‘seaweedfs.weed’ + +- Run the command +- Create sample config. Snaps are securely confined so their home directory is in a different place + mkdir ~/snap/seaweedfs/current/.seaweedfs + seaweedfs.weed scaffold > ~/snap/seaweed/current/.seaweedfs/filer.toml +- Run a server + seaweedfs.weed server +- Run a benchmark + seaweedfs.weed benchmark + +Results from my test run: https://paste.ubuntu.com/p/95Xk8zFQ7w/ + +If landed, you will need to:- + +- Register an account in the snap store https://snapcraft.io/account +- Register the ‘seaweedfs’ name in the store + - snapcraft login + - snapcraft register seaweedfs +- Upload a built snap to the store + - snapcraft push seaweedfs_0.99+git30.79371c0-dirty_amd64.snap --release edge +- Test installing on a clean Ubuntu 16.04 machine + - snap install seaweedfs --edge + +The store supports multiple risk levels as “channels” with the edge channel typically used to host the latest build from git master. Stable is where stable releases are pushed. Optionally beta and candidate channels can also be used if needed. + +Once you are happy, you can push a stable release to the stable channel, update the store page, and promote the application online (we can help there). diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml new file mode 100644 index 000000000..6449e9bfb --- /dev/null +++ b/snap/snapcraft.yaml @@ -0,0 +1,53 @@ +# Name of snap as registered in the store +name: seaweedfs +# Automatically derive snap version from git tags +version: git +# Short human readable name as seen in 'snap find $SNAPNAME' +summary: SeaweedFS +# Longer multi-line description found in 'snap info $SNAPNAME' +description: | + SeaweedFS is a simple and highly scalable distributed file system, + to store and serve billions of files fast! + SeaweedFS implements an object store with O(1) disk seek, + transparent cloud integration, and an optional Filer with POSIX interface, + supporting S3 API, Rack-Aware Erasure Coding for warm storage, + FUSE mount, Hadoop compatible, WebDAV. + +# Grade is stable for snaps expected to land in the stable channel +grade: stable +# Uses the strict confinement model and uses interfaces to open up access to +# resources on the target host +confinement: strict + +# List of parts which comprise the snap +parts: + # The main part which defines how to build the application in the snap + seaweedfs: + # This part needs a newer version of golang, so we use a separate part + # which defines how to get a newer golang during the build + after: [go] + # The go plugin knows how to build go applications into a snap + plugin: go + # Snapcraft will look in this location for the source of the application + source: . + go-importpath: github.com/chrislusf/seaweedfs + go: + # Defines the version of golang which will be bootstrapped into the snap + source-tag: go1.14 + +# Apps exposes the binaries inside the snap to the host system once installed +apps: + # We expose the weed command. + # This differs from the snap name, so it will be namespaced as seaweedfs.weed + # An alias can be added to expose this as 'weed' if requested in the snapcraft forum + weed: + # The path to the binary inside the snap, relative to the $SNAP home + command: bin/weed + # Plugs connect the snap to resources on the host system. We enable network connectivity + # We also add home and removable-media (latter not autoconnected by default) + # so users can access files in their home or on removable disks + plugs: + - network + - network-bind + - home + - removable-media diff --git a/test/sample.idx b/test/data/sample.idx similarity index 100% rename from test/sample.idx rename to test/data/sample.idx diff --git a/test/random_access/pom.xml b/test/random_access/pom.xml new file mode 100644 index 000000000..36fe6b256 --- /dev/null +++ b/test/random_access/pom.xml @@ -0,0 +1,58 @@ + + + 4.0.0 + com.seaweedfs.test + random_access + jar + 1.0-SNAPSHOT + + + 30.0-jre + + + + + com.google.guava + guava + ${guava.version} + + + org.slf4j + slf4j-api + 1.7.25 + + + junit + junit + 4.13.1 + test + + + com.esotericsoftware.kryo + kryo + 2.24.0 + + + + + + + kr.motd.maven + os-maven-plugin + 1.6.2 + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 8 + 8 + + + + + + diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/BTreePersistentIndexedCache.java b/test/random_access/src/main/java/seaweedfs/client/btree/BTreePersistentIndexedCache.java new file mode 100644 index 000000000..8409c40b3 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/BTreePersistentIndexedCache.java @@ -0,0 +1,753 @@ +/* + * Copyright 2010 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +import com.google.common.collect.ImmutableSet; +import seaweedfs.client.btree.serialize.Serializer; +import seaweedfs.client.btree.serialize.kryo.KryoBackedDecoder; +import seaweedfs.client.btree.serialize.kryo.KryoBackedEncoder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +// todo - stream serialised value to file +// todo - handle hash collisions (properly, this time) +// todo - don't store null links to child blocks in leaf index blocks +// todo - align block boundaries +// todo - thread safety control +// todo - merge small values into a single data block +// todo - discard when file corrupt +// todo - include data directly in index entry when serializer can guarantee small fixed sized data +// todo - free list leaks disk space +// todo - merge adjacent free blocks +// todo - use more efficient lookup for free block with nearest size +@SuppressWarnings("unchecked") +public class BTreePersistentIndexedCache { + private static final Logger LOGGER = LoggerFactory.getLogger(BTreePersistentIndexedCache.class); + private final File cacheFile; + private final KeyHasher keyHasher; + private final Serializer serializer; + private final short maxChildIndexEntries; + private final int minIndexChildNodes; + private final StateCheckBlockStore store; + private HeaderBlock header; + + public BTreePersistentIndexedCache(File cacheFile, Serializer keySerializer, Serializer valueSerializer) { + this(cacheFile, keySerializer, valueSerializer, (short) 512, 512); + } + + public BTreePersistentIndexedCache(File cacheFile, Serializer keySerializer, Serializer valueSerializer, + short maxChildIndexEntries, int maxFreeListEntries) { + this.cacheFile = cacheFile; + this.keyHasher = new KeyHasher(keySerializer); + this.serializer = valueSerializer; + this.maxChildIndexEntries = maxChildIndexEntries; + this.minIndexChildNodes = maxChildIndexEntries / 2; + BlockStore cachingStore = new CachingBlockStore(new FileBackedBlockStore(cacheFile), ImmutableSet.of(IndexBlock.class, FreeListBlockStore.FreeListBlock.class)); + this.store = new StateCheckBlockStore(new FreeListBlockStore(cachingStore, maxFreeListEntries)); + try { + open(); + } catch (Exception e) { + throw new UncheckedIOException(String.format("Could not open %s.", this), e); + } + } + + @Override + public String toString() { + return "cache " + cacheFile.getName() + " (" + cacheFile + ")"; + } + + private void open() throws Exception { + LOGGER.debug("Opening {}", this); + try { + doOpen(); + } catch (CorruptedCacheException e) { + rebuild(); + } + } + + private void doOpen() throws Exception { + BlockStore.Factory factory = new BlockStore.Factory() { + @Override + public Object create(Class type) { + if (type == HeaderBlock.class) { + return new HeaderBlock(); + } + if (type == IndexBlock.class) { + return new IndexBlock(); + } + if (type == DataBlock.class) { + return new DataBlock(); + } + throw new UnsupportedOperationException(); + } + }; + Runnable initAction = new Runnable() { + @Override + public void run() { + header = new HeaderBlock(); + store.write(header); + header.index.newRoot(); + store.flush(); + } + }; + + store.open(initAction, factory); + header = store.readFirst(HeaderBlock.class); + } + + public V get(K key) { + try { + try { + DataBlock block = header.getRoot().get(key); + if (block != null) { + return block.getValue(); + } + return null; + } catch (CorruptedCacheException e) { + rebuild(); + return null; + } + } catch (Exception e) { + throw new UncheckedIOException(String.format("Could not read entry '%s' from %s.", key, this), e); + } + } + + public void put(K key, V value) { + try { + long hashCode = keyHasher.getHashCode(key); + Lookup lookup = header.getRoot().find(hashCode); + DataBlock newBlock = null; + if (lookup.entry != null) { + DataBlock block = store.read(lookup.entry.dataBlock, DataBlock.class); + DataBlockUpdateResult updateResult = block.useNewValue(value); + if (updateResult.isFailed()) { + store.remove(block); + newBlock = new DataBlock(value, updateResult.getSerializedValue()); + } + } else { + newBlock = new DataBlock(value); + } + if (newBlock != null) { + store.write(newBlock); + lookup.indexBlock.put(hashCode, newBlock.getPos()); + } + store.flush(); + } catch (Exception e) { + throw new UncheckedIOException(String.format("Could not add entry '%s' to %s.", key, this), e); + } + } + + public void remove(K key) { + try { + Lookup lookup = header.getRoot().find(key); + if (lookup.entry == null) { + return; + } + lookup.indexBlock.remove(lookup.entry); + DataBlock block = store.read(lookup.entry.dataBlock, DataBlock.class); + store.remove(block); + store.flush(); + } catch (Exception e) { + throw new UncheckedIOException(String.format("Could not remove entry '%s' from %s.", key, this), e); + } + } + + private IndexBlock load(BlockPointer pos, IndexRoot root, IndexBlock parent, int index) { + IndexBlock block = store.read(pos, IndexBlock.class); + block.root = root; + block.parent = parent; + block.parentEntryIndex = index; + return block; + } + + public void reset() { + close(); + try { + open(); + } catch (Exception e) { + throw new UncheckedIOException(e); + } + } + + public void close() { + LOGGER.debug("Closing {}", this); + try { + store.close(); + } catch (Exception e) { + throw new UncheckedIOException(e); + } + } + + public boolean isOpen() { + return store.isOpen(); + } + + private void rebuild() { + LOGGER.warn("{} is corrupt. Discarding.", this); + try { + clear(); + } catch (Exception e) { + LOGGER.warn("{} couldn't be rebuilt. Closing.", this); + close(); + } + } + + public void verify() { + try { + doVerify(); + } catch (Exception e) { + throw new UncheckedIOException(String.format("Some problems were found when checking the integrity of %s.", + this), e); + } + } + + private void doVerify() throws Exception { + List blocks = new ArrayList(); + + HeaderBlock header = store.readFirst(HeaderBlock.class); + blocks.add(header); + verifyTree(header.getRoot(), "", blocks, Long.MAX_VALUE, true); + + Collections.sort(blocks, new Comparator() { + @Override + public int compare(BlockPayload block, BlockPayload block1) { + return block.getPos().compareTo(block1.getPos()); + } + }); + + for (int i = 0; i < blocks.size() - 1; i++) { + Block b1 = blocks.get(i).getBlock(); + Block b2 = blocks.get(i + 1).getBlock(); + if (b1.getPos().getPos() + b1.getSize() > b2.getPos().getPos()) { + throw new IOException(String.format("%s overlaps with %s", b1, b2)); + } + } + } + + private void verifyTree(IndexBlock current, String prefix, Collection blocks, long maxValue, + boolean loadData) throws Exception { + blocks.add(current); + + if (!prefix.equals("") && current.entries.size() < maxChildIndexEntries / 2) { + throw new IOException(String.format("Too few entries found in %s", current)); + } + if (current.entries.size() > maxChildIndexEntries) { + throw new IOException(String.format("Too many entries found in %s", current)); + } + + boolean isLeaf = current.entries.size() == 0 || current.entries.get(0).childIndexBlock.isNull(); + if (isLeaf ^ current.tailPos.isNull()) { + throw new IOException(String.format("Mismatched leaf/tail-node in %s", current)); + } + + long min = Long.MIN_VALUE; + for (IndexEntry entry : current.entries) { + if (isLeaf ^ entry.childIndexBlock.isNull()) { + throw new IOException(String.format("Mismatched leaf/non-leaf entry in %s", current)); + } + if (entry.hashCode >= maxValue || entry.hashCode <= min) { + throw new IOException(String.format("Out-of-order key in %s", current)); + } + min = entry.hashCode; + if (!entry.childIndexBlock.isNull()) { + IndexBlock child = store.read(entry.childIndexBlock, IndexBlock.class); + verifyTree(child, " " + prefix, blocks, entry.hashCode, loadData); + } + if (loadData) { + DataBlock block = store.read(entry.dataBlock, DataBlock.class); + blocks.add(block); + } + } + if (!current.tailPos.isNull()) { + IndexBlock tail = store.read(current.tailPos, IndexBlock.class); + verifyTree(tail, " " + prefix, blocks, maxValue, loadData); + } + } + + public void clear() { + store.clear(); + close(); + try { + doOpen(); + } catch (Exception e) { + throw new UncheckedIOException(e); + } + } + + private class IndexRoot { + private BlockPointer rootPos = BlockPointer.start(); + private HeaderBlock owner; + + private IndexRoot(HeaderBlock owner) { + this.owner = owner; + } + + public void setRootPos(BlockPointer rootPos) { + this.rootPos = rootPos; + store.write(owner); + } + + public IndexBlock getRoot() { + return load(rootPos, this, null, 0); + } + + public IndexBlock newRoot() { + IndexBlock block = new IndexBlock(); + store.write(block); + setRootPos(block.getPos()); + return block; + } + } + + private class HeaderBlock extends BlockPayload { + private IndexRoot index; + + private HeaderBlock() { + index = new IndexRoot(this); + } + + @Override + protected byte getType() { + return 0x55; + } + + @Override + protected int getSize() { + return Block.LONG_SIZE + Block.SHORT_SIZE; + } + + @Override + protected void read(DataInputStream instr) throws Exception { + index.rootPos = BlockPointer.pos(instr.readLong()); + + short actualChildIndexEntries = instr.readShort(); + if (actualChildIndexEntries != maxChildIndexEntries) { + throw blockCorruptedException(); + } + } + + @Override + protected void write(DataOutputStream outstr) throws Exception { + outstr.writeLong(index.rootPos.getPos()); + outstr.writeShort(maxChildIndexEntries); + } + + public IndexBlock getRoot() throws Exception { + return index.getRoot(); + } + } + + private class IndexBlock extends BlockPayload { + private final List entries = new ArrayList(); + private BlockPointer tailPos = BlockPointer.start(); + // Transient fields + private IndexBlock parent; + private int parentEntryIndex; + private IndexRoot root; + + @Override + protected byte getType() { + return 0x77; + } + + @Override + protected int getSize() { + return Block.INT_SIZE + Block.LONG_SIZE + (3 * Block.LONG_SIZE) * maxChildIndexEntries; + } + + @Override + public void read(DataInputStream instr) throws IOException { + int count = instr.readInt(); + entries.clear(); + for (int i = 0; i < count; i++) { + IndexEntry entry = new IndexEntry(); + entry.hashCode = instr.readLong(); + entry.dataBlock = BlockPointer.pos(instr.readLong()); + entry.childIndexBlock = BlockPointer.pos(instr.readLong()); + entries.add(entry); + } + tailPos = BlockPointer.pos(instr.readLong()); + } + + @Override + public void write(DataOutputStream outstr) throws IOException { + outstr.writeInt(entries.size()); + for (IndexEntry entry : entries) { + outstr.writeLong(entry.hashCode); + outstr.writeLong(entry.dataBlock.getPos()); + outstr.writeLong(entry.childIndexBlock.getPos()); + } + outstr.writeLong(tailPos.getPos()); + } + + public void put(long hashCode, BlockPointer pos) throws Exception { + int index = Collections.binarySearch(entries, new IndexEntry(hashCode)); + IndexEntry entry; + if (index >= 0) { + entry = entries.get(index); + } else { + assert tailPos.isNull(); + entry = new IndexEntry(); + entry.hashCode = hashCode; + entry.childIndexBlock = BlockPointer.start(); + index = -index - 1; + entries.add(index, entry); + } + + entry.dataBlock = pos; + store.write(this); + + maybeSplit(); + } + + private void maybeSplit() throws Exception { + if (entries.size() > maxChildIndexEntries) { + int splitPos = entries.size() / 2; + IndexEntry splitEntry = entries.remove(splitPos); + if (parent == null) { + parent = root.newRoot(); + } + IndexBlock sibling = new IndexBlock(); + store.write(sibling); + List siblingEntries = entries.subList(splitPos, entries.size()); + sibling.entries.addAll(siblingEntries); + siblingEntries.clear(); + sibling.tailPos = tailPos; + tailPos = splitEntry.childIndexBlock; + splitEntry.childIndexBlock = BlockPointer.start(); + parent.add(this, splitEntry, sibling); + } + } + + private void add(IndexBlock left, IndexEntry entry, IndexBlock right) throws Exception { + int index = left.parentEntryIndex; + if (index < entries.size()) { + IndexEntry parentEntry = entries.get(index); + assert parentEntry.childIndexBlock.equals(left.getPos()); + parentEntry.childIndexBlock = right.getPos(); + } else { + assert index == entries.size() && (tailPos.isNull() || tailPos.equals(left.getPos())); + tailPos = right.getPos(); + } + entries.add(index, entry); + entry.childIndexBlock = left.getPos(); + store.write(this); + + maybeSplit(); + } + + public DataBlock get(K key) throws Exception { + Lookup lookup = find(key); + if (lookup.entry == null) { + return null; + } + + return store.read(lookup.entry.dataBlock, DataBlock.class); + } + + public Lookup find(K key) throws Exception { + long checksum = keyHasher.getHashCode(key); + return find(checksum); + } + + private Lookup find(long hashCode) throws Exception { + int index = Collections.binarySearch(entries, new IndexEntry(hashCode)); + if (index >= 0) { + return new Lookup(this, entries.get(index)); + } + + index = -index - 1; + BlockPointer childBlockPos; + if (index == entries.size()) { + childBlockPos = tailPos; + } else { + childBlockPos = entries.get(index).childIndexBlock; + } + if (childBlockPos.isNull()) { + return new Lookup(this, null); + } + + IndexBlock childBlock = load(childBlockPos, root, this, index); + return childBlock.find(hashCode); + } + + public void remove(IndexEntry entry) throws Exception { + int index = entries.indexOf(entry); + assert index >= 0; + entries.remove(index); + store.write(this); + + if (entry.childIndexBlock.isNull()) { + maybeMerge(); + } else { + // Not a leaf node. Move up an entry from a leaf node, then possibly merge the leaf node + IndexBlock leafBlock = load(entry.childIndexBlock, root, this, index); + leafBlock = leafBlock.findHighestLeaf(); + IndexEntry highestEntry = leafBlock.entries.remove(leafBlock.entries.size() - 1); + highestEntry.childIndexBlock = entry.childIndexBlock; + entries.add(index, highestEntry); + store.write(leafBlock); + leafBlock.maybeMerge(); + } + } + + private void maybeMerge() throws Exception { + if (parent == null) { + // This is the root block. Can have any number of children <= maxChildIndexEntries + if (entries.size() == 0 && !tailPos.isNull()) { + // This is an empty root block, discard it + header.index.setRootPos(tailPos); + store.remove(this); + } + return; + } + + // This is not the root block. Must have children >= minIndexChildNodes + if (entries.size() >= minIndexChildNodes) { + return; + } + + // Attempt to merge with the left sibling + IndexBlock left = parent.getPrevious(this); + if (left != null) { + assert entries.size() + left.entries.size() <= maxChildIndexEntries * 2; + if (left.entries.size() > minIndexChildNodes) { + // There are enough entries in this block and the left sibling to make up 2 blocks, so redistribute + // the entries evenly between them + left.mergeFrom(this); + left.maybeSplit(); + return; + } else { + // There are only enough entries to make up 1 block, so move the entries of the left sibling into + // this block and discard the left sibling. Might also need to merge the parent + left.mergeFrom(this); + parent.maybeMerge(); + return; + } + } + + // Attempt to merge with the right sibling + IndexBlock right = parent.getNext(this); + if (right != null) { + assert entries.size() + right.entries.size() <= maxChildIndexEntries * 2; + if (right.entries.size() > minIndexChildNodes) { + // There are enough entries in this block and the right sibling to make up 2 blocks, so redistribute + // the entries evenly between them + mergeFrom(right); + maybeSplit(); + return; + } else { + // There are only enough entries to make up 1 block, so move the entries of the right sibling into + // this block and discard this block. Might also need to merge the parent + mergeFrom(right); + parent.maybeMerge(); + return; + } + } + + // Should not happen + throw new IllegalStateException(String.format("%s does not have any siblings.", getBlock())); + } + + private void mergeFrom(IndexBlock right) throws Exception { + IndexEntry newChildEntry = parent.entries.remove(parentEntryIndex); + if (right.getPos().equals(parent.tailPos)) { + parent.tailPos = getPos(); + } else { + IndexEntry newParentEntry = parent.entries.get(parentEntryIndex); + assert newParentEntry.childIndexBlock.equals(right.getPos()); + newParentEntry.childIndexBlock = getPos(); + } + entries.add(newChildEntry); + entries.addAll(right.entries); + newChildEntry.childIndexBlock = tailPos; + tailPos = right.tailPos; + store.write(parent); + store.write(this); + store.remove(right); + } + + private IndexBlock getNext(IndexBlock indexBlock) throws Exception { + int index = indexBlock.parentEntryIndex + 1; + if (index > entries.size()) { + return null; + } + if (index == entries.size()) { + return load(tailPos, root, this, index); + } + return load(entries.get(index).childIndexBlock, root, this, index); + } + + private IndexBlock getPrevious(IndexBlock indexBlock) throws Exception { + int index = indexBlock.parentEntryIndex - 1; + if (index < 0) { + return null; + } + return load(entries.get(index).childIndexBlock, root, this, index); + } + + private IndexBlock findHighestLeaf() throws Exception { + if (tailPos.isNull()) { + return this; + } + return load(tailPos, root, this, entries.size()).findHighestLeaf(); + } + } + + private static class IndexEntry implements Comparable { + long hashCode; + BlockPointer dataBlock; + BlockPointer childIndexBlock; + + private IndexEntry() { + } + + private IndexEntry(long hashCode) { + this.hashCode = hashCode; + } + + @Override + public int compareTo(IndexEntry indexEntry) { + if (hashCode > indexEntry.hashCode) { + return 1; + } + if (hashCode < indexEntry.hashCode) { + return -1; + } + return 0; + } + } + + private class Lookup { + final IndexBlock indexBlock; + final IndexEntry entry; + + private Lookup(IndexBlock indexBlock, IndexEntry entry) { + this.indexBlock = indexBlock; + this.entry = entry; + } + } + + private class DataBlock extends BlockPayload { + private int size; + private StreamByteBuffer buffer; + private V value; + + private DataBlock() { + } + + public DataBlock(V value) throws Exception { + this.value = value; + setValue(value); + size = buffer.totalBytesUnread(); + } + + public DataBlock(V value, StreamByteBuffer buffer) throws Exception { + this.value = value; + this.buffer = buffer; + size = buffer.totalBytesUnread(); + } + + public void setValue(V value) throws Exception { + buffer = StreamByteBuffer.createWithChunkSizeInDefaultRange(size); + KryoBackedEncoder encoder = new KryoBackedEncoder(buffer.getOutputStream()); + serializer.write(encoder, value); + encoder.flush(); + } + + public V getValue() throws Exception { + if (value == null) { + value = serializer.read(new KryoBackedDecoder(buffer.getInputStream())); + buffer = null; + } + return value; + } + + @Override + protected byte getType() { + return 0x33; + } + + @Override + protected int getSize() { + return 2 * Block.INT_SIZE + size; + } + + @Override + public void read(DataInputStream instr) throws Exception { + size = instr.readInt(); + int bytes = instr.readInt(); + buffer = StreamByteBuffer.of(instr, bytes); + } + + @Override + public void write(DataOutputStream outstr) throws Exception { + outstr.writeInt(size); + outstr.writeInt(buffer.totalBytesUnread()); + buffer.writeTo(outstr); + buffer = null; + } + + public DataBlockUpdateResult useNewValue(V value) throws Exception { + setValue(value); + boolean ok = buffer.totalBytesUnread() <= size; + if (ok) { + this.value = value; + store.write(this); + return DataBlockUpdateResult.success(); + } else { + return DataBlockUpdateResult.failed(buffer); + } + } + } + + private static class DataBlockUpdateResult { + private static final DataBlockUpdateResult SUCCESS = new DataBlockUpdateResult(true, null); + private final boolean success; + private final StreamByteBuffer serializedValue; + + private DataBlockUpdateResult(boolean success, StreamByteBuffer serializedValue) { + this.success = success; + this.serializedValue = serializedValue; + } + + static DataBlockUpdateResult success() { + return SUCCESS; + } + + static DataBlockUpdateResult failed(StreamByteBuffer serializedValue) { + return new DataBlockUpdateResult(false, serializedValue); + } + + public boolean isFailed() { + return !success; + } + + public StreamByteBuffer getSerializedValue() { + return serializedValue; + } + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/Block.java b/test/random_access/src/main/java/seaweedfs/client/btree/Block.java new file mode 100644 index 000000000..f3ecb2421 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/Block.java @@ -0,0 +1,59 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +public abstract class Block { + static final int LONG_SIZE = 8; + static final int INT_SIZE = 4; + static final int SHORT_SIZE = 2; + + private BlockPayload payload; + + protected Block(BlockPayload payload) { + this.payload = payload; + payload.setBlock(this); + } + + public BlockPayload getPayload() { + return payload; + } + + protected void detach() { + payload.setBlock(null); + payload = null; + } + + public abstract BlockPointer getPos(); + + public abstract int getSize(); + + public abstract RuntimeException blockCorruptedException(); + + @Override + public String toString() { + return payload.getClass().getSimpleName() + " " + getPos(); + } + + public BlockPointer getNextPos() { + return BlockPointer.pos(getPos().getPos() + getSize()); + } + + public abstract boolean hasPos(); + + public abstract void setPos(BlockPointer pos); + + public abstract void setSize(int size); +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/BlockPayload.java b/test/random_access/src/main/java/seaweedfs/client/btree/BlockPayload.java new file mode 100644 index 000000000..d14af26c7 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/BlockPayload.java @@ -0,0 +1,51 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +import java.io.DataInputStream; +import java.io.DataOutputStream; + +public abstract class BlockPayload { + private Block block; + + public Block getBlock() { + return block; + } + + public void setBlock(Block block) { + this.block = block; + } + + public BlockPointer getPos() { + return getBlock().getPos(); + } + + public BlockPointer getNextPos() { + return getBlock().getNextPos(); + } + + protected abstract int getSize(); + + protected abstract byte getType(); + + protected abstract void read(DataInputStream inputStream) throws Exception; + + protected abstract void write(DataOutputStream outputStream) throws Exception; + + protected RuntimeException blockCorruptedException() { + return getBlock().blockCorruptedException(); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/BlockPointer.java b/test/random_access/src/main/java/seaweedfs/client/btree/BlockPointer.java new file mode 100644 index 000000000..38bff7d97 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/BlockPointer.java @@ -0,0 +1,75 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +import com.google.common.primitives.Longs; + +public class BlockPointer implements Comparable { + + private static final BlockPointer NULL = new BlockPointer(-1); + + public static BlockPointer start() { + return NULL; + } + + public static BlockPointer pos(long pos) { + if (pos < -1) { + throw new CorruptedCacheException("block pointer must be >= -1, but was" + pos); + } + if (pos == -1) { + return NULL; + } + return new BlockPointer(pos); + } + + private final long pos; + + private BlockPointer(long pos) { + this.pos = pos; + } + + public boolean isNull() { + return pos < 0; + } + + public long getPos() { + return pos; + } + + @Override + public String toString() { + return String.valueOf(pos); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + BlockPointer other = (BlockPointer) obj; + return pos == other.pos; + } + + @Override + public int hashCode() { + return Longs.hashCode(pos); + } + + @Override + public int compareTo(BlockPointer o) { + return Longs.compare(pos, o.pos); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/BlockStore.java b/test/random_access/src/main/java/seaweedfs/client/btree/BlockStore.java new file mode 100644 index 000000000..141eb70fe --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/BlockStore.java @@ -0,0 +1,68 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +public interface BlockStore { + /** + * Opens this store, calling the given action if the store is empty. + */ + void open(Runnable initAction, Factory factory); + + /** + * Closes this store. + */ + void close(); + + /** + * Discards all blocks from this store. + */ + void clear(); + + /** + * Removes the given block from this store. + */ + void remove(BlockPayload block); + + /** + * Reads the first block from this store. + */ + T readFirst(Class payloadType); + + /** + * Reads a block from this store. + */ + T read(BlockPointer pos, Class payloadType); + + /** + * Writes a block to this store, adding the block if required. + */ + void write(BlockPayload block); + + /** + * Adds a new block to this store. Allocates space for the block, but does not write the contents of the block + * until {@link #write(BlockPayload)} is called. + */ + void attach(BlockPayload block); + + /** + * Flushes any pending updates for this store. + */ + void flush(); + + interface Factory { + Object create(Class type); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/BufferCaster.java b/test/random_access/src/main/java/seaweedfs/client/btree/BufferCaster.java new file mode 100644 index 000000000..a43160211 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/BufferCaster.java @@ -0,0 +1,30 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree; + +import java.nio.Buffer; + +public class BufferCaster { + /** + * Without this cast, when the code compiled by Java 9+ is executed on Java 8, it will throw + * java.lang.NoSuchMethodError: Method flip()Ljava/nio/ByteBuffer; does not exist in class java.nio.ByteBuffer + */ + @SuppressWarnings("RedundantCast") + public static Buffer cast(T byteBuffer) { + return (Buffer) byteBuffer; + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/ByteInput.java b/test/random_access/src/main/java/seaweedfs/client/btree/ByteInput.java new file mode 100644 index 000000000..2030a8cde --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/ByteInput.java @@ -0,0 +1,74 @@ +/* + * Copyright 2014 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree; + +import com.google.common.io.CountingInputStream; + +import java.io.BufferedInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; + +/** + * Allows a stream of bytes to be read from a particular location of some backing byte stream. + */ +class ByteInput { + private final RandomAccessFile file; + private final ResettableBufferedInputStream bufferedInputStream; + private CountingInputStream countingInputStream; + + public ByteInput(RandomAccessFile file) { + this.file = file; + bufferedInputStream = new ResettableBufferedInputStream(new RandomAccessFileInputStream(file)); + } + + /** + * Starts reading from the given offset. + */ + public DataInputStream start(long offset) throws IOException { + file.seek(offset); + bufferedInputStream.clear(); + countingInputStream = new CountingInputStream(bufferedInputStream); + return new DataInputStream(countingInputStream); + } + + /** + * Returns the number of bytes read since {@link #start(long)} was called. + */ + public long getBytesRead() { + return countingInputStream.getCount(); + } + + /** + * Finishes reading, resetting any buffered state. + */ + public void done() { + countingInputStream = null; + } + + private static class ResettableBufferedInputStream extends BufferedInputStream { + ResettableBufferedInputStream(InputStream input) { + super(input); + } + + void clear() { + count = 0; + pos = 0; + } + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/ByteOutput.java b/test/random_access/src/main/java/seaweedfs/client/btree/ByteOutput.java new file mode 100644 index 000000000..dfb24cfd0 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/ByteOutput.java @@ -0,0 +1,74 @@ +/* + * Copyright 2014 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree; + +import com.google.common.io.CountingOutputStream; + +import java.io.BufferedOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.io.RandomAccessFile; + +/** + * Allows a stream of bytes to be written to a particular location of some backing byte stream. + */ +class ByteOutput { + private final RandomAccessFile file; + private final ResettableBufferedOutputStream bufferedOutputStream; + private CountingOutputStream countingOutputStream; + + public ByteOutput(RandomAccessFile file) { + this.file = file; + bufferedOutputStream = new ResettableBufferedOutputStream(new RandomAccessFileOutputStream(file)); + } + + /** + * Starts writing to the given offset. Can be beyond the current length of the file. + */ + public DataOutputStream start(long offset) throws IOException { + file.seek(offset); + bufferedOutputStream.clear(); + countingOutputStream = new CountingOutputStream(bufferedOutputStream); + return new DataOutputStream(countingOutputStream); + } + + /** + * Returns the number of byte written since {@link #start(long)} was called. + */ + public long getBytesWritten() { + return countingOutputStream.getCount(); + } + + /** + * Finishes writing, flushing and resetting any buffered state + */ + public void done() throws IOException { + countingOutputStream.flush(); + countingOutputStream = null; + } + + private static class ResettableBufferedOutputStream extends BufferedOutputStream { + ResettableBufferedOutputStream(OutputStream output) { + super(output); + } + + void clear() { + count = 0; + } + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/CachingBlockStore.java b/test/random_access/src/main/java/seaweedfs/client/btree/CachingBlockStore.java new file mode 100644 index 000000000..308838b1d --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/CachingBlockStore.java @@ -0,0 +1,129 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.ImmutableSet; + +import javax.annotation.Nullable; +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; + +public class CachingBlockStore implements BlockStore { + private final BlockStore store; + private final Map dirty = new LinkedHashMap(); + private final Cache indexBlockCache = CacheBuilder.newBuilder().maximumSize(100).concurrencyLevel(1).build(); + private final ImmutableSet> cacheableBlockTypes; + + public CachingBlockStore(BlockStore store, Collection> cacheableBlockTypes) { + this.store = store; + this.cacheableBlockTypes = ImmutableSet.copyOf(cacheableBlockTypes); + } + + @Override + public void open(Runnable initAction, Factory factory) { + store.open(initAction, factory); + } + + @Override + public void close() { + flush(); + indexBlockCache.invalidateAll(); + store.close(); + } + + @Override + public void clear() { + dirty.clear(); + indexBlockCache.invalidateAll(); + store.clear(); + } + + @Override + public void flush() { + Iterator iterator = dirty.values().iterator(); + while (iterator.hasNext()) { + BlockPayload block = iterator.next(); + iterator.remove(); + store.write(block); + } + store.flush(); + } + + @Override + public void attach(BlockPayload block) { + store.attach(block); + } + + @Override + public void remove(BlockPayload block) { + dirty.remove(block.getPos()); + if (isCacheable(block)) { + indexBlockCache.invalidate(block.getPos()); + } + store.remove(block); + } + + @Override + public T readFirst(Class payloadType) { + T block = store.readFirst(payloadType); + maybeCache(block); + return block; + } + + @Override + public T read(BlockPointer pos, Class payloadType) { + T block = payloadType.cast(dirty.get(pos)); + if (block != null) { + return block; + } + block = maybeGetFromCache(pos, payloadType); + if (block != null) { + return block; + } + block = store.read(pos, payloadType); + maybeCache(block); + return block; + } + + @Nullable + private T maybeGetFromCache(BlockPointer pos, Class payloadType) { + if (cacheableBlockTypes.contains(payloadType)) { + return payloadType.cast(indexBlockCache.getIfPresent(pos)); + } + return null; + } + + @Override + public void write(BlockPayload block) { + store.attach(block); + maybeCache(block); + dirty.put(block.getPos(), block); + } + + private void maybeCache(T block) { + if (isCacheable(block)) { + indexBlockCache.put(block.getPos(), block); + } + } + + private boolean isCacheable(T block) { + return cacheableBlockTypes.contains(block.getClass()); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/CorruptedCacheException.java b/test/random_access/src/main/java/seaweedfs/client/btree/CorruptedCacheException.java new file mode 100644 index 000000000..8f9ac1240 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/CorruptedCacheException.java @@ -0,0 +1,22 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +class CorruptedCacheException extends RuntimeException { + CorruptedCacheException(String message) { + super(message); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/FileBackedBlockStore.java b/test/random_access/src/main/java/seaweedfs/client/btree/FileBackedBlockStore.java new file mode 100644 index 000000000..556db3647 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/FileBackedBlockStore.java @@ -0,0 +1,274 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.RandomAccessFile; + +public class FileBackedBlockStore implements BlockStore { + private final File cacheFile; + private RandomAccessFile file; + private ByteOutput output; + private ByteInput input; + private long nextBlock; + private Factory factory; + private long currentFileSize; + + public FileBackedBlockStore(File cacheFile) { + this.cacheFile = cacheFile; + } + + @Override + public String toString() { + return "cache '" + cacheFile + "'"; + } + + @Override + public void open(Runnable runnable, Factory factory) { + this.factory = factory; + try { + cacheFile.getParentFile().mkdirs(); + file = openRandomAccessFile(); + output = new ByteOutput(file); + input = new ByteInput(file); + currentFileSize = file.length(); + nextBlock = currentFileSize; + if (currentFileSize == 0) { + runnable.run(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private RandomAccessFile openRandomAccessFile() throws FileNotFoundException { + try { + return randomAccessFile("rw"); + } catch (FileNotFoundException e) { + return randomAccessFile("r"); + } + } + + private RandomAccessFile randomAccessFile(String mode) throws FileNotFoundException { + return new RandomAccessFile(cacheFile, mode); + } + + @Override + public void close() { + try { + file.close(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + public void clear() { + try { + file.setLength(0); + currentFileSize = 0; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + nextBlock = 0; + } + + @Override + public void attach(BlockPayload block) { + if (block.getBlock() == null) { + block.setBlock(new BlockImpl(block)); + } + } + + @Override + public void remove(BlockPayload block) { + BlockImpl blockImpl = (BlockImpl) block.getBlock(); + blockImpl.detach(); + } + + @Override + public void flush() { + } + + @Override + public T readFirst(Class payloadType) { + return read(BlockPointer.pos(0), payloadType); + } + + @Override + public T read(BlockPointer pos, Class payloadType) { + assert !pos.isNull(); + try { + T payload = payloadType.cast(factory.create(payloadType)); + BlockImpl block = new BlockImpl(payload, pos); + block.read(); + return payload; + } catch (CorruptedCacheException e) { + throw e; + } catch (Exception e) { + throw new UncheckedIOException(e); + } + } + + @Override + public void write(BlockPayload block) { + BlockImpl blockImpl = (BlockImpl) block.getBlock(); + try { + blockImpl.write(); + } catch (CorruptedCacheException e) { + throw e; + } catch (Exception e) { + throw new UncheckedIOException(e); + } + } + + private long alloc(long length) { + long pos = nextBlock; + nextBlock += length; + return pos; + } + + private final class BlockImpl extends Block { + private static final int HEADER_SIZE = 1 + INT_SIZE; // type, payload size + private static final int TAIL_SIZE = INT_SIZE; + + private BlockPointer pos; + private int payloadSize; + + private BlockImpl(BlockPayload payload, BlockPointer pos) { + this(payload); + setPos(pos); + } + + public BlockImpl(BlockPayload payload) { + super(payload); + pos = null; + payloadSize = -1; + } + + @Override + public boolean hasPos() { + return pos != null; + } + + @Override + public BlockPointer getPos() { + if (pos == null) { + pos = BlockPointer.pos(alloc(getSize())); + } + return pos; + } + + @Override + public void setPos(BlockPointer pos) { + assert this.pos == null && !pos.isNull(); + this.pos = pos; + } + + @Override + public int getSize() { + if (payloadSize < 0) { + payloadSize = getPayload().getSize(); + } + return payloadSize + HEADER_SIZE + TAIL_SIZE; + } + + @Override + public void setSize(int size) { + int newPayloadSize = size - HEADER_SIZE - TAIL_SIZE; + assert newPayloadSize >= payloadSize; + payloadSize = newPayloadSize; + } + + public void write() throws Exception { + long pos = getPos().getPos(); + + DataOutputStream outputStream = output.start(pos); + + BlockPayload payload = getPayload(); + + // Write header + outputStream.writeByte(payload.getType()); + outputStream.writeInt(payloadSize); + long finalSize = pos + HEADER_SIZE + TAIL_SIZE + payloadSize; + + // Write body + payload.write(outputStream); + + // Write count + long bytesWritten = output.getBytesWritten(); + if (bytesWritten > Integer.MAX_VALUE) { + throw new IllegalArgumentException("Block payload exceeds maximum size"); + } + outputStream.writeInt((int) bytesWritten); + output.done(); + + // System.out.println(String.format("wrote [%d,%d)", pos, pos + bytesWritten + 4)); + + // Pad + if (currentFileSize < finalSize) { + // System.out.println(String.format("pad length %d => %d", currentFileSize, finalSize)); + file.setLength(finalSize); + currentFileSize = finalSize; + } + } + + public void read() throws Exception { + long pos = getPos().getPos(); + assert pos >= 0; + if (pos + HEADER_SIZE >= currentFileSize) { + throw blockCorruptedException(); + } + + DataInputStream inputStream = input.start(pos); + + BlockPayload payload = getPayload(); + + // Read header + byte type = inputStream.readByte(); + if (type != payload.getType()) { + throw blockCorruptedException(); + } + + // Read body + payloadSize = inputStream.readInt(); + if (pos + HEADER_SIZE + TAIL_SIZE + payloadSize > currentFileSize) { + throw blockCorruptedException(); + } + payload.read(inputStream); + + // Read and verify count + long actualCount = input.getBytesRead(); + long count = inputStream.readInt(); + if (actualCount != count) { + System.out.println(String.format("read expected %d actual %d, pos %d payloadSize %d currentFileSize %d", count, actualCount, pos, payloadSize, currentFileSize)); + throw blockCorruptedException(); + } + input.done(); + } + + @Override + public RuntimeException blockCorruptedException() { + return new CorruptedCacheException(String.format("Corrupted %s found in %s.", this, + FileBackedBlockStore.this)); + } + } + +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/FreeListBlockStore.java b/test/random_access/src/main/java/seaweedfs/client/btree/FreeListBlockStore.java new file mode 100644 index 000000000..c2cd640f9 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/FreeListBlockStore.java @@ -0,0 +1,283 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class FreeListBlockStore implements BlockStore { + private final BlockStore store; + private final BlockStore freeListStore; + private final int maxBlockEntries; + private FreeListBlock freeListBlock; + + public FreeListBlockStore(BlockStore store, int maxBlockEntries) { + this.store = store; + freeListStore = this; + this.maxBlockEntries = maxBlockEntries; + } + + @Override + public void open(final Runnable initAction, final Factory factory) { + Runnable freeListInitAction = new Runnable() { + @Override + public void run() { + freeListBlock = new FreeListBlock(); + store.write(freeListBlock); + store.flush(); + initAction.run(); + } + }; + Factory freeListFactory = new Factory() { + @Override + public Object create(Class type) { + if (type == FreeListBlock.class) { + return new FreeListBlock(); + } + return factory.create(type); + } + }; + + store.open(freeListInitAction, freeListFactory); + freeListBlock = store.readFirst(FreeListBlock.class); + } + + @Override + public void close() { + freeListBlock = null; + store.close(); + } + + @Override + public void clear() { + store.clear(); + } + + @Override + public void remove(BlockPayload block) { + Block container = block.getBlock(); + store.remove(block); + freeListBlock.add(container.getPos(), container.getSize()); + } + + @Override + public T readFirst(Class payloadType) { + return store.read(freeListBlock.getNextPos(), payloadType); + } + + @Override + public T read(BlockPointer pos, Class payloadType) { + return store.read(pos, payloadType); + } + + @Override + public void write(BlockPayload block) { + attach(block); + store.write(block); + } + + @Override + public void attach(BlockPayload block) { + store.attach(block); + freeListBlock.alloc(block.getBlock()); + } + + @Override + public void flush() { + store.flush(); + } + + private void verify() { + FreeListBlock block = store.readFirst(FreeListBlock.class); + verify(block, Integer.MAX_VALUE); + } + + private void verify(FreeListBlock block, int maxValue) { + if (block.largestInNextBlock > maxValue) { + throw new RuntimeException("corrupt free list"); + } + int current = 0; + for (FreeListEntry entry : block.entries) { + if (entry.size > maxValue) { + throw new RuntimeException("corrupt free list"); + } + if (entry.size < block.largestInNextBlock) { + throw new RuntimeException("corrupt free list"); + } + if (entry.size < current) { + throw new RuntimeException("corrupt free list"); + } + current = entry.size; + } + if (!block.nextBlock.isNull()) { + verify(store.read(block.nextBlock, FreeListBlock.class), block.largestInNextBlock); + } + } + + public class FreeListBlock extends BlockPayload { + private List entries = new ArrayList(); + private int largestInNextBlock; + private BlockPointer nextBlock = BlockPointer.start(); + // Transient fields + private FreeListBlock prev; + private FreeListBlock next; + + @Override + protected int getSize() { + return Block.LONG_SIZE + Block.INT_SIZE + Block.INT_SIZE + maxBlockEntries * (Block.LONG_SIZE + + Block.INT_SIZE); + } + + @Override + protected byte getType() { + return 0x44; + } + + @Override + protected void read(DataInputStream inputStream) throws Exception { + nextBlock = BlockPointer.pos(inputStream.readLong()); + largestInNextBlock = inputStream.readInt(); + int count = inputStream.readInt(); + for (int i = 0; i < count; i++) { + BlockPointer pos = BlockPointer.pos(inputStream.readLong()); + int size = inputStream.readInt(); + entries.add(new FreeListEntry(pos, size)); + } + } + + @Override + protected void write(DataOutputStream outputStream) throws Exception { + outputStream.writeLong(nextBlock.getPos()); + outputStream.writeInt(largestInNextBlock); + outputStream.writeInt(entries.size()); + for (FreeListEntry entry : entries) { + outputStream.writeLong(entry.pos.getPos()); + outputStream.writeInt(entry.size); + } + } + + public void add(BlockPointer pos, int size) { + assert !pos.isNull() && size >= 0; + if (size == 0) { + return; + } + + if (size < largestInNextBlock) { + FreeListBlock next = getNextBlock(); + next.add(pos, size); + return; + } + + FreeListEntry entry = new FreeListEntry(pos, size); + int index = Collections.binarySearch(entries, entry); + if (index < 0) { + index = -index - 1; + } + entries.add(index, entry); + + if (entries.size() > maxBlockEntries) { + FreeListBlock newBlock = new FreeListBlock(); + newBlock.largestInNextBlock = largestInNextBlock; + newBlock.nextBlock = nextBlock; + newBlock.prev = this; + newBlock.next = next; + next = newBlock; + + List newBlockEntries = entries.subList(0, entries.size() / 2); + newBlock.entries.addAll(newBlockEntries); + newBlockEntries.clear(); + largestInNextBlock = newBlock.entries.get(newBlock.entries.size() - 1).size; + freeListStore.write(newBlock); + nextBlock = newBlock.getPos(); + } + + freeListStore.write(this); + } + + private FreeListBlock getNextBlock() { + if (next == null) { + next = freeListStore.read(nextBlock, FreeListBlock.class); + next.prev = this; + } + return next; + } + + public void alloc(Block block) { + if (block.hasPos()) { + return; + } + + int requiredSize = block.getSize(); + + if (entries.isEmpty() || requiredSize <= largestInNextBlock) { + if (nextBlock.isNull()) { + return; + } + getNextBlock().alloc(block); + return; + } + + int index = Collections.binarySearch(entries, new FreeListEntry(null, requiredSize)); + if (index < 0) { + index = -index - 1; + } + if (index == entries.size()) { + // Largest free block is too small + return; + } + + FreeListEntry entry = entries.remove(index); + block.setPos(entry.pos); + block.setSize(entry.size); + freeListStore.write(this); + + if (entries.size() == 0 && prev != null) { + prev.nextBlock = nextBlock; + prev.largestInNextBlock = largestInNextBlock; + prev.next = next; + if (next != null) { + next.prev = prev; + } + freeListStore.write(prev); + freeListStore.remove(this); + } + } + } + + private static class FreeListEntry implements Comparable { + final BlockPointer pos; + final int size; + + private FreeListEntry(BlockPointer pos, int size) { + this.pos = pos; + this.size = size; + } + + @Override + public int compareTo(FreeListEntry o) { + if (size > o.size) { + return 1; + } + if (size < o.size) { + return -1; + } + return 0; + } + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/KeyHasher.java b/test/random_access/src/main/java/seaweedfs/client/btree/KeyHasher.java new file mode 100644 index 000000000..bdc78dde2 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/KeyHasher.java @@ -0,0 +1,75 @@ +/* + * Copyright 2014 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree; + +import seaweedfs.client.btree.serialize.Serializer; +import seaweedfs.client.btree.serialize.kryo.KryoBackedEncoder; + +import java.io.IOException; +import java.io.OutputStream; +import java.math.BigInteger; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +class KeyHasher { + private final Serializer serializer; + private final MessageDigestStream digestStream = new MessageDigestStream(); + private final KryoBackedEncoder encoder = new KryoBackedEncoder(digestStream); + + public KeyHasher(Serializer serializer) { + this.serializer = serializer; + } + + long getHashCode(K key) throws Exception { + serializer.write(encoder, key); + encoder.flush(); + return digestStream.getChecksum(); + } + + private static class MessageDigestStream extends OutputStream { + MessageDigest messageDigest; + + private MessageDigestStream() { + try { + messageDigest = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw UncheckedException.throwAsUncheckedException(e); + } + } + + @Override + public void write(int b) throws IOException { + messageDigest.update((byte) b); + } + + @Override + public void write(byte[] b) throws IOException { + messageDigest.update(b); + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + messageDigest.update(b, off, len); + } + + long getChecksum() { + byte[] digest = messageDigest.digest(); + assert digest.length == 16; + return new BigInteger(digest).longValue(); + } + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileInputStream.java b/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileInputStream.java new file mode 100644 index 000000000..5f876989f --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileInputStream.java @@ -0,0 +1,54 @@ +/* + * Copyright 2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +import java.io.IOException; +import java.io.InputStream; +import java.io.RandomAccessFile; + +/** + * Reads from a {@link RandomAccessFile}. Each operation reads from and advances the current position of the file. + * + *

Closing this stream does not close the underlying file. + */ +public class RandomAccessFileInputStream extends InputStream { + private final RandomAccessFile file; + + public RandomAccessFileInputStream(RandomAccessFile file) { + this.file = file; + } + + @Override + public long skip(long n) throws IOException { + file.seek(file.getFilePointer() + n); + return n; + } + + @Override + public int read(byte[] bytes) throws IOException { + return file.read(bytes); + } + + @Override + public int read() throws IOException { + return file.read(); + } + + @Override + public int read(byte[] bytes, int offset, int length) throws IOException { + return file.read(bytes, offset, length); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileOutputStream.java b/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileOutputStream.java new file mode 100644 index 000000000..3327fe3c6 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/RandomAccessFileOutputStream.java @@ -0,0 +1,48 @@ +/* + * Copyright 2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.RandomAccessFile; + +/** + * Writes to a {@link RandomAccessFile}. Each operation writes to and advances the current position of the file. + * + *

Closing this stream does not close the underlying file. Flushing this stream does nothing. + */ +public class RandomAccessFileOutputStream extends OutputStream { + private final RandomAccessFile file; + + public RandomAccessFileOutputStream(RandomAccessFile file) { + this.file = file; + } + + @Override + public void write(int i) throws IOException { + file.write(i); + } + + @Override + public void write(byte[] bytes) throws IOException { + file.write(bytes); + } + + @Override + public void write(byte[] bytes, int offset, int length) throws IOException { + file.write(bytes, offset, length); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/StateCheckBlockStore.java b/test/random_access/src/main/java/seaweedfs/client/btree/StateCheckBlockStore.java new file mode 100644 index 000000000..f720ebb2e --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/StateCheckBlockStore.java @@ -0,0 +1,87 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +public class StateCheckBlockStore implements BlockStore { + private final BlockStore blockStore; + private boolean open; + + public StateCheckBlockStore(BlockStore blockStore) { + this.blockStore = blockStore; + } + + @Override + public void open(Runnable initAction, Factory factory) { + assert !open; + open = true; + blockStore.open(initAction, factory); + } + + public boolean isOpen() { + return open; + } + + @Override + public void close() { + if (!open) { + return; + } + open = false; + blockStore.close(); + } + + @Override + public void clear() { + assert open; + blockStore.clear(); + } + + @Override + public void remove(BlockPayload block) { + assert open; + blockStore.remove(block); + } + + @Override + public T readFirst(Class payloadType) { + assert open; + return blockStore.readFirst(payloadType); + } + + @Override + public T read(BlockPointer pos, Class payloadType) { + assert open; + return blockStore.read(pos, payloadType); + } + + @Override + public void write(BlockPayload block) { + assert open; + blockStore.write(block); + } + + @Override + public void attach(BlockPayload block) { + assert open; + blockStore.attach(block); + } + + @Override + public void flush() { + assert open; + blockStore.flush(); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/StreamByteBuffer.java b/test/random_access/src/main/java/seaweedfs/client/btree/StreamByteBuffer.java new file mode 100644 index 000000000..8af6e14d8 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/StreamByteBuffer.java @@ -0,0 +1,526 @@ +/* + * Copyright 2016 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CoderResult; +import java.nio.charset.CodingErrorAction; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; + + +/** + * An in-memory buffer that provides OutputStream and InputStream interfaces. + * + * This is more efficient than using ByteArrayOutputStream/ByteArrayInputStream + * + * Reading the buffer will clear the buffer. + * This is not thread-safe, it is intended to be used by a single Thread. + */ +public class StreamByteBuffer { + private static final int DEFAULT_CHUNK_SIZE = 4096; + private static final int MAX_CHUNK_SIZE = 1024 * 1024; + private LinkedList chunks = new LinkedList(); + private StreamByteBufferChunk currentWriteChunk; + private StreamByteBufferChunk currentReadChunk; + private int chunkSize; + private int nextChunkSize; + private int maxChunkSize; + private StreamByteBufferOutputStream output; + private StreamByteBufferInputStream input; + private int totalBytesUnreadInList; + + public StreamByteBuffer() { + this(DEFAULT_CHUNK_SIZE); + } + + public StreamByteBuffer(int chunkSize) { + this.chunkSize = chunkSize; + this.nextChunkSize = chunkSize; + this.maxChunkSize = Math.max(chunkSize, MAX_CHUNK_SIZE); + currentWriteChunk = new StreamByteBufferChunk(nextChunkSize); + output = new StreamByteBufferOutputStream(); + input = new StreamByteBufferInputStream(); + } + + public static StreamByteBuffer of(InputStream inputStream) throws IOException { + StreamByteBuffer buffer = new StreamByteBuffer(chunkSizeInDefaultRange(inputStream.available())); + buffer.readFully(inputStream); + return buffer; + } + + public static StreamByteBuffer of(InputStream inputStream, int len) throws IOException { + StreamByteBuffer buffer = new StreamByteBuffer(chunkSizeInDefaultRange(len)); + buffer.readFrom(inputStream, len); + return buffer; + } + + public static StreamByteBuffer createWithChunkSizeInDefaultRange(int value) { + return new StreamByteBuffer(chunkSizeInDefaultRange(value)); + } + + static int chunkSizeInDefaultRange(int value) { + return valueInRange(value, DEFAULT_CHUNK_SIZE, MAX_CHUNK_SIZE); + } + + private static int valueInRange(int value, int min, int max) { + return Math.min(Math.max(value, min), max); + } + + public OutputStream getOutputStream() { + return output; + } + + public InputStream getInputStream() { + return input; + } + + public void writeTo(OutputStream target) throws IOException { + while (prepareRead() != -1) { + currentReadChunk.writeTo(target); + } + } + + public void readFrom(InputStream inputStream, int len) throws IOException { + int bytesLeft = len; + while (bytesLeft > 0) { + int spaceLeft = allocateSpace(); + int limit = Math.min(spaceLeft, bytesLeft); + int readBytes = currentWriteChunk.readFrom(inputStream, limit); + if (readBytes == -1) { + throw new EOFException("Unexpected EOF"); + } + bytesLeft -= readBytes; + } + } + + public void readFully(InputStream inputStream) throws IOException { + while (true) { + int len = allocateSpace(); + int readBytes = currentWriteChunk.readFrom(inputStream, len); + if (readBytes == -1) { + break; + } + } + } + + public byte[] readAsByteArray() { + byte[] buf = new byte[totalBytesUnread()]; + input.readImpl(buf, 0, buf.length); + return buf; + } + + public List readAsListOfByteArrays() { + List listOfByteArrays = new ArrayList(chunks.size() + 1); + byte[] buf; + while ((buf = input.readNextBuffer()) != null) { + if (buf.length > 0) { + listOfByteArrays.add(buf); + } + } + return listOfByteArrays; + } + + public String readAsString(String encoding) { + Charset charset = Charset.forName(encoding); + return readAsString(charset); + } + + public String readAsString() { + return readAsString(Charset.defaultCharset()); + } + + public String readAsString(Charset charset) { + try { + return doReadAsString(charset); + } catch (CharacterCodingException e) { + throw new UncheckedIOException(e); + } + } + + private String doReadAsString(Charset charset) throws CharacterCodingException { + int unreadSize = totalBytesUnread(); + if (unreadSize > 0) { + return readAsCharBuffer(charset).toString(); + } + return ""; + } + + private CharBuffer readAsCharBuffer(Charset charset) throws CharacterCodingException { + CharsetDecoder decoder = charset.newDecoder().onMalformedInput( + CodingErrorAction.REPLACE).onUnmappableCharacter( + CodingErrorAction.REPLACE); + CharBuffer charbuffer = CharBuffer.allocate(totalBytesUnread()); + ByteBuffer buf = null; + boolean wasUnderflow = false; + ByteBuffer nextBuf = null; + boolean needsFlush = false; + while (hasRemaining(nextBuf) || hasRemaining(buf) || prepareRead() != -1) { + if (hasRemaining(buf)) { + // handle decoding underflow, multi-byte unicode character at buffer chunk boundary + if (!wasUnderflow) { + throw new IllegalStateException("Unexpected state. Buffer has remaining bytes without underflow in decoding."); + } + if (!hasRemaining(nextBuf) && prepareRead() != -1) { + nextBuf = currentReadChunk.readToNioBuffer(); + } + // copy one by one until the underflow has been resolved + buf = ByteBuffer.allocate(buf.remaining() + 1).put(buf); + buf.put(nextBuf.get()); + BufferCaster.cast(buf).flip(); + } else { + if (hasRemaining(nextBuf)) { + buf = nextBuf; + } else if (prepareRead() != -1) { + buf = currentReadChunk.readToNioBuffer(); + if (!hasRemaining(buf)) { + throw new IllegalStateException("Unexpected state. Buffer is empty."); + } + } + nextBuf = null; + } + boolean endOfInput = !hasRemaining(nextBuf) && prepareRead() == -1; + int bufRemainingBefore = buf.remaining(); + CoderResult result = decoder.decode(buf, charbuffer, false); + if (bufRemainingBefore > buf.remaining()) { + needsFlush = true; + } + if (endOfInput) { + result = decoder.decode(ByteBuffer.allocate(0), charbuffer, true); + if (!result.isUnderflow()) { + result.throwException(); + } + break; + } + wasUnderflow = result.isUnderflow(); + } + if (needsFlush) { + CoderResult result = decoder.flush(charbuffer); + if (!result.isUnderflow()) { + result.throwException(); + } + } + clear(); + // push back remaining bytes of multi-byte unicode character + while (hasRemaining(buf)) { + byte b = buf.get(); + try { + getOutputStream().write(b); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + BufferCaster.cast(charbuffer).flip(); + return charbuffer; + } + + private boolean hasRemaining(ByteBuffer nextBuf) { + return nextBuf != null && nextBuf.hasRemaining(); + } + + public int totalBytesUnread() { + int total = totalBytesUnreadInList; + if (currentReadChunk != null) { + total += currentReadChunk.bytesUnread(); + } + if (currentWriteChunk != currentReadChunk && currentWriteChunk != null) { + total += currentWriteChunk.bytesUnread(); + } + return total; + } + + protected int allocateSpace() { + int spaceLeft = currentWriteChunk.spaceLeft(); + if (spaceLeft == 0) { + addChunk(currentWriteChunk); + currentWriteChunk = new StreamByteBufferChunk(nextChunkSize); + if (nextChunkSize < maxChunkSize) { + nextChunkSize = Math.min(nextChunkSize * 2, maxChunkSize); + } + spaceLeft = currentWriteChunk.spaceLeft(); + } + return spaceLeft; + } + + protected int prepareRead() { + int bytesUnread = (currentReadChunk != null) ? currentReadChunk.bytesUnread() : 0; + if (bytesUnread == 0) { + if (!chunks.isEmpty()) { + currentReadChunk = chunks.removeFirst(); + bytesUnread = currentReadChunk.bytesUnread(); + totalBytesUnreadInList -= bytesUnread; + } else if (currentReadChunk != currentWriteChunk) { + currentReadChunk = currentWriteChunk; + bytesUnread = currentReadChunk.bytesUnread(); + } else { + bytesUnread = -1; + } + } + return bytesUnread; + } + + public static StreamByteBuffer of(List listOfByteArrays) { + StreamByteBuffer buffer = new StreamByteBuffer(); + buffer.addChunks(listOfByteArrays); + return buffer; + } + + private void addChunks(List listOfByteArrays) { + for (byte[] buf : listOfByteArrays) { + addChunk(new StreamByteBufferChunk(buf)); + } + } + + private void addChunk(StreamByteBufferChunk chunk) { + chunks.add(chunk); + totalBytesUnreadInList += chunk.bytesUnread(); + } + + static class StreamByteBufferChunk { + private int pointer; + private byte[] buffer; + private int size; + private int used; + + public StreamByteBufferChunk(int size) { + this.size = size; + buffer = new byte[size]; + } + + public StreamByteBufferChunk(byte[] buf) { + this.size = buf.length; + this.buffer = buf; + this.used = buf.length; + } + + public ByteBuffer readToNioBuffer() { + if (pointer < used) { + ByteBuffer result; + if (pointer > 0 || used < size) { + result = ByteBuffer.wrap(buffer, pointer, used - pointer); + } else { + result = ByteBuffer.wrap(buffer); + } + pointer = used; + return result; + } + + return null; + } + + public boolean write(byte b) { + if (used < size) { + buffer[used++] = b; + return true; + } + + return false; + } + + public void write(byte[] b, int off, int len) { + System.arraycopy(b, off, buffer, used, len); + used = used + len; + } + + public void read(byte[] b, int off, int len) { + System.arraycopy(buffer, pointer, b, off, len); + pointer = pointer + len; + } + + public void writeTo(OutputStream target) throws IOException { + if (pointer < used) { + target.write(buffer, pointer, used - pointer); + pointer = used; + } + } + + public void reset() { + pointer = 0; + } + + public int bytesUsed() { + return used; + } + + public int bytesUnread() { + return used - pointer; + } + + public int read() { + if (pointer < used) { + return buffer[pointer++] & 0xff; + } + + return -1; + } + + public int spaceLeft() { + return size - used; + } + + public int readFrom(InputStream inputStream, int len) throws IOException { + int readBytes = inputStream.read(buffer, used, len); + if(readBytes > 0) { + used += readBytes; + } + return readBytes; + } + + public void clear() { + used = pointer = 0; + } + + public byte[] readBuffer() { + if (used == buffer.length && pointer == 0) { + pointer = used; + return buffer; + } else if (pointer < used) { + byte[] buf = new byte[used - pointer]; + read(buf, 0, used - pointer); + return buf; + } else { + return new byte[0]; + } + } + } + + class StreamByteBufferOutputStream extends OutputStream { + private boolean closed; + + @Override + public void write(byte[] b, int off, int len) throws IOException { + if (b == null) { + throw new NullPointerException(); + } + + if ((off < 0) || (off > b.length) || (len < 0) + || ((off + len) > b.length) || ((off + len) < 0)) { + throw new IndexOutOfBoundsException(); + } + + if (len == 0) { + return; + } + + int bytesLeft = len; + int currentOffset = off; + while (bytesLeft > 0) { + int spaceLeft = allocateSpace(); + int writeBytes = Math.min(spaceLeft, bytesLeft); + currentWriteChunk.write(b, currentOffset, writeBytes); + bytesLeft -= writeBytes; + currentOffset += writeBytes; + } + } + + @Override + public void close() throws IOException { + closed = true; + } + + public boolean isClosed() { + return closed; + } + + @Override + public void write(int b) throws IOException { + allocateSpace(); + currentWriteChunk.write((byte) b); + } + + public StreamByteBuffer getBuffer() { + return StreamByteBuffer.this; + } + } + + class StreamByteBufferInputStream extends InputStream { + @Override + public int read() throws IOException { + prepareRead(); + return currentReadChunk.read(); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return readImpl(b, off, len); + } + + int readImpl(byte[] b, int off, int len) { + if (b == null) { + throw new NullPointerException(); + } + + if ((off < 0) || (off > b.length) || (len < 0) + || ((off + len) > b.length) || ((off + len) < 0)) { + throw new IndexOutOfBoundsException(); + } + + if (len == 0) { + return 0; + } + + int bytesLeft = len; + int currentOffset = off; + int bytesUnread = prepareRead(); + int totalBytesRead = 0; + while (bytesLeft > 0 && bytesUnread != -1) { + int readBytes = Math.min(bytesUnread, bytesLeft); + currentReadChunk.read(b, currentOffset, readBytes); + bytesLeft -= readBytes; + currentOffset += readBytes; + totalBytesRead += readBytes; + bytesUnread = prepareRead(); + } + if (totalBytesRead > 0) { + return totalBytesRead; + } + + return -1; + } + + @Override + public int available() throws IOException { + return totalBytesUnread(); + } + + public StreamByteBuffer getBuffer() { + return StreamByteBuffer.this; + } + + public byte[] readNextBuffer() { + if (prepareRead() != -1) { + return currentReadChunk.readBuffer(); + } + return null; + } + } + + public void clear() { + chunks.clear(); + currentReadChunk = null; + totalBytesUnreadInList = 0; + currentWriteChunk.clear(); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedException.java b/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedException.java new file mode 100644 index 000000000..ab57d8c95 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedException.java @@ -0,0 +1,88 @@ +/* + * Copyright 2010 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.util.concurrent.Callable; + +/** + * Wraps a checked exception. Carries no other context. + */ +public final class UncheckedException extends RuntimeException { + private UncheckedException(Throwable cause) { + super(cause); + } + + private UncheckedException(String message, Throwable cause) { + super(message, cause); + } + + /** + * Note: always throws the failure in some form. The return value is to keep the compiler happy. + */ + public static RuntimeException throwAsUncheckedException(Throwable t) { + return throwAsUncheckedException(t, false); + } + + /** + * Note: always throws the failure in some form. The return value is to keep the compiler happy. + */ + public static RuntimeException throwAsUncheckedException(Throwable t, boolean preserveMessage) { + if (t instanceof InterruptedException) { + Thread.currentThread().interrupt(); + } + if (t instanceof RuntimeException) { + throw (RuntimeException) t; + } + if (t instanceof Error) { + throw (Error) t; + } + if (t instanceof IOException) { + if (preserveMessage) { + throw new UncheckedIOException(t.getMessage(), t); + } else { + throw new UncheckedIOException(t); + } + } + if (preserveMessage) { + throw new UncheckedException(t.getMessage(), t); + } else { + throw new UncheckedException(t); + } + } + + public static T callUnchecked(Callable callable) { + try { + return callable.call(); + } catch (Exception e) { + throw throwAsUncheckedException(e); + } + } + + /** + * Unwraps passed InvocationTargetException hence making the stack of exceptions cleaner without losing information. + * + * Note: always throws the failure in some form. The return value is to keep the compiler happy. + * + * @param e to be unwrapped + * @return an instance of RuntimeException based on the target exception of the parameter. + */ + public static RuntimeException unwrapAndRethrow(InvocationTargetException e) { + return UncheckedException.throwAsUncheckedException(e.getTargetException()); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedIOException.java b/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedIOException.java new file mode 100644 index 000000000..1cf30df7a --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/UncheckedIOException.java @@ -0,0 +1,36 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +/** + * UncheckedIOException is used to wrap an {@link java.io.IOException} into an unchecked exception. + */ +public class UncheckedIOException extends RuntimeException { + public UncheckedIOException() { + } + + public UncheckedIOException(String message) { + super(message); + } + + public UncheckedIOException(String message, Throwable cause) { + super(message, cause); + } + + public UncheckedIOException(Throwable cause) { + super(cause); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractDecoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractDecoder.java new file mode 100644 index 000000000..d805f4654 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractDecoder.java @@ -0,0 +1,133 @@ +/* + * Copyright 2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize; + +import javax.annotation.Nullable; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; + +public abstract class AbstractDecoder implements Decoder { + private DecoderStream stream; + + @Override + public InputStream getInputStream() { + if (stream == null) { + stream = new DecoderStream(); + } + return stream; + } + + @Override + public void readBytes(byte[] buffer) throws IOException { + readBytes(buffer, 0, buffer.length); + } + + @Override + public byte[] readBinary() throws EOFException, IOException { + int size = readSmallInt(); + byte[] result = new byte[size]; + readBytes(result); + return result; + } + + @Override + public int readSmallInt() throws EOFException, IOException { + return readInt(); + } + + @Override + public long readSmallLong() throws EOFException, IOException { + return readLong(); + } + + @Nullable + @Override + public Integer readNullableSmallInt() throws IOException { + if (readBoolean()) { + return readSmallInt(); + } else { + return null; + } + } + + @Override + public String readNullableString() throws EOFException, IOException { + if (readBoolean()) { + return readString(); + } else { + return null; + } + } + + @Override + public void skipBytes(long count) throws EOFException, IOException { + long remaining = count; + while (remaining > 0) { + long skipped = maybeSkip(remaining); + if (skipped <= 0) { + break; + } + remaining -= skipped; + } + if (remaining > 0) { + throw new EOFException(); + } + } + + @Override + public T decodeChunked(DecodeAction decodeAction) throws EOFException, Exception { + throw new UnsupportedOperationException(); + } + + @Override + public void skipChunked() throws EOFException, IOException { + throw new UnsupportedOperationException(); + } + + protected abstract int maybeReadBytes(byte[] buffer, int offset, int count) throws IOException; + + protected abstract long maybeSkip(long count) throws IOException; + + private class DecoderStream extends InputStream { + byte[] buffer = new byte[1]; + + @Override + public long skip(long n) throws IOException { + return maybeSkip(n); + } + + @Override + public int read() throws IOException { + int read = maybeReadBytes(buffer, 0, 1); + if (read <= 0) { + return read; + } + return buffer[0] & 0xff; + } + + @Override + public int read(byte[] buffer) throws IOException { + return maybeReadBytes(buffer, 0, buffer.length); + } + + @Override + public int read(byte[] buffer, int offset, int count) throws IOException { + return maybeReadBytes(buffer, offset, count); + } + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractEncoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractEncoder.java new file mode 100644 index 000000000..4caf3461d --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractEncoder.java @@ -0,0 +1,101 @@ +/* + * Copyright 2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.io.OutputStream; + +public abstract class AbstractEncoder implements Encoder { + private EncoderStream stream; + + @Override + public OutputStream getOutputStream() { + if (stream == null) { + stream = new EncoderStream(); + } + return stream; + } + + @Override + public void writeBytes(byte[] bytes) throws IOException { + writeBytes(bytes, 0, bytes.length); + } + + @Override + public void writeBinary(byte[] bytes) throws IOException { + writeBinary(bytes, 0, bytes.length); + } + + @Override + public void writeBinary(byte[] bytes, int offset, int count) throws IOException { + writeSmallInt(count); + writeBytes(bytes, offset, count); + } + + @Override + public void encodeChunked(EncodeAction writeAction) throws Exception { + throw new UnsupportedOperationException(); + } + + @Override + public void writeSmallInt(int value) throws IOException { + writeInt(value); + } + + @Override + public void writeSmallLong(long value) throws IOException { + writeLong(value); + } + + @Override + public void writeNullableSmallInt(@Nullable Integer value) throws IOException { + if (value == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeSmallInt(value); + } + } + + @Override + public void writeNullableString(@Nullable CharSequence value) throws IOException { + if (value == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeString(value.toString()); + } + } + + private class EncoderStream extends OutputStream { + @Override + public void write(byte[] buffer) throws IOException { + writeBytes(buffer); + } + + @Override + public void write(byte[] buffer, int offset, int length) throws IOException { + writeBytes(buffer, offset, length); + } + + @Override + public void write(int b) throws IOException { + writeByte((byte) b); + } + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractSerializer.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractSerializer.java new file mode 100644 index 000000000..a60980354 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/AbstractSerializer.java @@ -0,0 +1,40 @@ +/* + * Copyright 2016 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize; + +import com.google.common.base.Objects; + +/** + * This abstract class provide a sensible default implementation for {@code Serializer} equality. This equality + * implementation is required to enable cache instance reuse within the same Gradle runtime. Serializers are used + * as cache parameter which need to be compared to determine compatible cache. + */ +public abstract class AbstractSerializer implements Serializer { + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + return Objects.equal(obj.getClass(), getClass()); + } + + @Override + public int hashCode() { + return Objects.hashCode(getClass()); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Cast.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Cast.java new file mode 100644 index 000000000..4f962cea6 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Cast.java @@ -0,0 +1,79 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize; + +import javax.annotation.Nullable; + +public abstract class Cast { + + /** + * Casts the given object to the given type, providing a better error message than the default. + * + * The standard {@link Class#cast(Object)} method produces unsatisfactory error messages on some platforms + * when it fails. All this method does is provide a better, consistent, error message. + * + * This should be used whenever there is a chance the cast could fail. If in doubt, use this. + * + * @param outputType The type to cast the input to + * @param object The object to be cast (must not be {@code null}) + * @param The type to be cast to + * @param The type of the object to be vast + * @return The input object, cast to the output type + */ + public static O cast(Class outputType, I object) { + try { + return outputType.cast(object); + } catch (ClassCastException e) { + throw new ClassCastException(String.format( + "Failed to cast object %s of type %s to target type %s", object, object.getClass().getName(), outputType.getName() + )); + } + } + + /** + * Casts the given object to the given type, providing a better error message than the default. + * + * The standard {@link Class#cast(Object)} method produces unsatisfactory error messages on some platforms + * when it fails. All this method does is provide a better, consistent, error message. + * + * This should be used whenever there is a chance the cast could fail. If in doubt, use this. + * + * @param outputType The type to cast the input to + * @param object The object to be cast + * @param The type to be cast to + * @param The type of the object to be vast + * @return The input object, cast to the output type + */ + @Nullable + public static O castNullable(Class outputType, @Nullable I object) { + if (object == null) { + return null; + } + return cast(outputType, object); + } + + @SuppressWarnings("unchecked") + @Nullable + public static T uncheckedCast(@Nullable Object object) { + return (T) object; + } + + @SuppressWarnings("unchecked") + public static T uncheckedNonnullCast(Object object) { + return (T) object; + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ClassLoaderObjectInputStream.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ClassLoaderObjectInputStream.java new file mode 100644 index 000000000..5f9cb3052 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ClassLoaderObjectInputStream.java @@ -0,0 +1,43 @@ +/* + * Copyright 2010 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree.serialize; + +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.ObjectStreamClass; + +public class ClassLoaderObjectInputStream extends ObjectInputStream { + private final ClassLoader loader; + + public ClassLoaderObjectInputStream(InputStream in, ClassLoader loader) throws IOException { + super(in); + this.loader = loader; + } + + public ClassLoader getClassLoader() { + return loader; + } + + @Override + protected Class resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException { + try { + return Class.forName(desc.getName(), false, loader); + } catch (ClassNotFoundException e) { + return super.resolveClass(desc); + } + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Decoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Decoder.java new file mode 100644 index 000000000..e5251b8c2 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Decoder.java @@ -0,0 +1,140 @@ +/* + * Copyright 2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize; + +import javax.annotation.Nullable; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; + +/** + * Provides a way to decode structured data from a backing byte stream. Implementations may buffer incoming bytes read + * from the backing stream prior to decoding. + */ +public interface Decoder { + /** + * Returns an InputStream which can be used to read raw bytes. + */ + InputStream getInputStream(); + + /** + * Reads a signed 64 bit long value. Can read any value that was written using {@link Encoder#writeLong(long)}. + * + * @throws EOFException when the end of the byte stream is reached before the long value can be fully read. + */ + long readLong() throws EOFException, IOException; + + /** + * Reads a signed 64 bit int value. Can read any value that was written using {@link Encoder#writeSmallLong(long)}. + * + * @throws EOFException when the end of the byte stream is reached before the int value can be fully read. + */ + long readSmallLong() throws EOFException, IOException; + + /** + * Reads a signed 32 bit int value. Can read any value that was written using {@link Encoder#writeInt(int)}. + * + * @throws EOFException when the end of the byte stream is reached before the int value can be fully read. + */ + int readInt() throws EOFException, IOException; + + /** + * Reads a signed 32 bit int value. Can read any value that was written using {@link Encoder#writeSmallInt(int)}. + * + * @throws EOFException when the end of the byte stream is reached before the int value can be fully read. + */ + int readSmallInt() throws EOFException, IOException; + + /** + * Reads a nullable signed 32 bit int value. + * + * @see #readSmallInt() + */ + @Nullable + Integer readNullableSmallInt() throws EOFException, IOException; + + /** + * Reads a boolean value. Can read any value that was written using {@link Encoder#writeBoolean(boolean)}. + * + * @throws EOFException when the end of the byte stream is reached before the boolean value can be fully read. + */ + boolean readBoolean() throws EOFException, IOException; + + /** + * Reads a non-null string value. Can read any value that was written using {@link Encoder#writeString(CharSequence)}. + * + * @throws EOFException when the end of the byte stream is reached before the string can be fully read. + */ + String readString() throws EOFException, IOException; + + /** + * Reads a nullable string value. Can reads any value that was written using {@link Encoder#writeNullableString(CharSequence)}. + * + * @throws EOFException when the end of the byte stream is reached before the string can be fully read. + */ + @Nullable + String readNullableString() throws EOFException, IOException; + + /** + * Reads a byte value. Can read any byte value that was written using one of the raw byte methods on {@link Encoder}, such as {@link Encoder#writeByte(byte)} or {@link Encoder#getOutputStream()} + * + * @throws EOFException when the end of the byte stream is reached. + */ + byte readByte() throws EOFException, IOException; + + /** + * Reads bytes into the given buffer, filling the buffer. Can read any byte values that were written using one of the raw byte methods on {@link Encoder}, such as {@link + * Encoder#writeBytes(byte[])} or {@link Encoder#getOutputStream()} + * + * @throws EOFException when the end of the byte stream is reached before the buffer is full. + */ + void readBytes(byte[] buffer) throws EOFException, IOException; + + /** + * Reads the specified number of bytes into the given buffer. Can read any byte values that were written using one of the raw byte methods on {@link Encoder}, such as {@link + * Encoder#writeBytes(byte[])} or {@link Encoder#getOutputStream()} + * + * @throws EOFException when the end of the byte stream is reached before the specified number of bytes were read. + */ + void readBytes(byte[] buffer, int offset, int count) throws EOFException, IOException; + + /** + * Reads a byte array. Can read any byte array written using {@link Encoder#writeBinary(byte[])} or {@link Encoder#writeBinary(byte[], int, int)}. + * + * @throws EOFException when the end of the byte stream is reached before the byte array was fully read. + */ + byte[] readBinary() throws EOFException, IOException; + + /** + * Skips the given number of bytes. Can skip over any byte values that were written using one of the raw byte methods on {@link Encoder}. + */ + void skipBytes(long count) throws EOFException, IOException; + + /** + * Reads a byte stream written using {@link Encoder#encodeChunked(Encoder.EncodeAction)}. + */ + T decodeChunked(DecodeAction decodeAction) throws EOFException, Exception; + + /** + * Skips over a byte stream written using {@link Encoder#encodeChunked(Encoder.EncodeAction)}, discarding its content. + */ + void skipChunked() throws EOFException, IOException; + + interface DecodeAction { + OUT read(IN source) throws Exception; + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/DefaultSerializer.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/DefaultSerializer.java new file mode 100644 index 000000000..15ba1c592 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/DefaultSerializer.java @@ -0,0 +1,73 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree.serialize; + +import com.google.common.base.Objects; + +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.io.StreamCorruptedException; + +public class DefaultSerializer extends AbstractSerializer { + private ClassLoader classLoader; + + public DefaultSerializer() { + classLoader = getClass().getClassLoader(); + } + + public DefaultSerializer(ClassLoader classLoader) { + this.classLoader = classLoader != null ? classLoader : getClass().getClassLoader(); + } + + public ClassLoader getClassLoader() { + return classLoader; + } + + public void setClassLoader(ClassLoader classLoader) { + this.classLoader = classLoader; + } + + @Override + public T read(Decoder decoder) throws Exception { + try { + return Cast.uncheckedNonnullCast(new ClassLoaderObjectInputStream(decoder.getInputStream(), classLoader).readObject()); + } catch (StreamCorruptedException e) { + return null; + } + } + + @Override + public void write(Encoder encoder, T value) throws IOException { + ObjectOutputStream objectStr = new ObjectOutputStream(encoder.getOutputStream()); + objectStr.writeObject(value); + objectStr.flush(); + } + + @Override + public boolean equals(Object obj) { + if (!super.equals(obj)) { + return false; + } + + DefaultSerializer rhs = (DefaultSerializer) obj; + return Objects.equal(classLoader, rhs.classLoader); + } + + @Override + public int hashCode() { + return Objects.hashCode(super.hashCode(), classLoader); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Encoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Encoder.java new file mode 100644 index 000000000..1cdea10af --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Encoder.java @@ -0,0 +1,110 @@ +/* + * Copyright 2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize; + +import javax.annotation.Nullable; +import java.io.IOException; +import java.io.OutputStream; + +/** + * Provides a way to encode structured data to a backing byte stream. Implementations may buffer outgoing encoded bytes prior + * to writing to the backing byte stream. + */ +public interface Encoder { + /** + * Returns an {@link OutputStream) that can be used to write raw bytes to the stream. + */ + OutputStream getOutputStream(); + + /** + * Writes a raw byte value to the stream. + */ + void writeByte(byte value) throws IOException; + + /** + * Writes the given raw bytes to the stream. Does not encode any length information. + */ + void writeBytes(byte[] bytes) throws IOException; + + /** + * Writes the given raw bytes to the stream. Does not encode any length information. + */ + void writeBytes(byte[] bytes, int offset, int count) throws IOException; + + /** + * Writes the given byte array to the stream. Encodes the bytes and length information. + */ + void writeBinary(byte[] bytes) throws IOException; + + /** + * Writes the given byte array to the stream. Encodes the bytes and length information. + */ + void writeBinary(byte[] bytes, int offset, int count) throws IOException; + + /** + * Appends an encoded stream to this stream. Encodes the stream as a series of chunks with length information. + */ + void encodeChunked(EncodeAction writeAction) throws Exception; + + /** + * Writes a signed 64 bit long value. The implementation may encode the value as a variable number of bytes, not necessarily as 8 bytes. + */ + void writeLong(long value) throws IOException; + + /** + * Writes a signed 64 bit long value whose value is likely to be small and positive but may not be. The implementation may encode the value in a way that is more efficient for small positive + * values. + */ + void writeSmallLong(long value) throws IOException; + + /** + * Writes a signed 32 bit int value. The implementation may encode the value as a variable number of bytes, not necessarily as 4 bytes. + */ + void writeInt(int value) throws IOException; + + /** + * Writes a signed 32 bit int value whose value is likely to be small and positive but may not be. The implementation may encode the value in a way that + * is more efficient for small positive values. + */ + void writeSmallInt(int value) throws IOException; + + /** + * Writes a nullable signed 32 bit int value whose value is likely to be small and positive but may not be. + * + * @see #writeSmallInt(int) + */ + void writeNullableSmallInt(@Nullable Integer value) throws IOException; + + /** + * Writes a boolean value. + */ + void writeBoolean(boolean value) throws IOException; + + /** + * Writes a non-null string value. + */ + void writeString(CharSequence value) throws IOException; + + /** + * Writes a nullable string value. + */ + void writeNullableString(@Nullable CharSequence value) throws IOException; + + interface EncodeAction { + void write(T target) throws Exception; + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/FlushableEncoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/FlushableEncoder.java new file mode 100644 index 000000000..ddef9f5c6 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/FlushableEncoder.java @@ -0,0 +1,31 @@ +/* + * Copyright 2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize; + +import java.io.Flushable; +import java.io.IOException; + +/** + * Represents an {@link Encoder} that buffers encoded data prior to writing to the backing stream. + */ +public interface FlushableEncoder extends Encoder, Flushable { + /** + * Ensures that all buffered data has been written to the backing stream. Does not flush the backing stream. + */ + @Override + void flush() throws IOException; +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectReader.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectReader.java new file mode 100644 index 000000000..fdea08191 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectReader.java @@ -0,0 +1,28 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize; + +import java.io.EOFException; + +public interface ObjectReader { + /** + * Reads the next object from the stream. + * + * @throws EOFException When the next object cannot be fully read due to reaching the end of stream. + */ + T read() throws EOFException, Exception; +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectWriter.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectWriter.java new file mode 100644 index 000000000..482bdd0f8 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/ObjectWriter.java @@ -0,0 +1,21 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize; + +public interface ObjectWriter { + void write(T value) throws Exception; +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Serializer.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Serializer.java new file mode 100644 index 000000000..b474ba3ac --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/Serializer.java @@ -0,0 +1,33 @@ +/* + * Copyright 2009 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree.serialize; + +import java.io.EOFException; + +public interface Serializer { + /** + * Reads the next object from the given stream. The implementation must not perform any buffering, so that it reads only those bytes from the input stream that are + * required to deserialize the next object. + * + * @throws EOFException When the next object cannot be fully read due to reaching the end of stream. + */ + T read(Decoder decoder) throws EOFException, Exception; + + /** + * Writes the given object to the given stream. The implementation must not perform any buffering. + */ + void write(Encoder encoder, T value) throws Exception; +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/StatefulSerializer.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/StatefulSerializer.java new file mode 100644 index 000000000..ea677d2c0 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/StatefulSerializer.java @@ -0,0 +1,33 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize; + +/** + * Implementations must allow concurrent reading and writing, so that a thread can read and a thread can write at the same time. + * Implementations do not need to support multiple read threads or multiple write threads. + */ +public interface StatefulSerializer { + /** + * Should not perform any buffering + */ + ObjectReader newReader(Decoder decoder); + + /** + * Should not perform any buffering + */ + ObjectWriter newWriter(Encoder encoder); +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedDecoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedDecoder.java new file mode 100644 index 000000000..d8e44a0dc --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedDecoder.java @@ -0,0 +1,210 @@ +/* + * Copyright 2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize.kryo; + +import com.esotericsoftware.kryo.KryoException; +import com.esotericsoftware.kryo.io.Input; +import seaweedfs.client.btree.serialize.AbstractDecoder; +import seaweedfs.client.btree.serialize.Decoder; + +import java.io.Closeable; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; + +/** + * Note that this decoder uses buffering, so will attempt to read beyond the end of the encoded data. This means you should use this type only when this decoder will be used to decode the entire + * stream. + */ +public class KryoBackedDecoder extends AbstractDecoder implements Decoder, Closeable { + private final Input input; + private final InputStream inputStream; + private long extraSkipped; + private KryoBackedDecoder nested; + + public KryoBackedDecoder(InputStream inputStream) { + this(inputStream, 4096); + } + + public KryoBackedDecoder(InputStream inputStream, int bufferSize) { + this.inputStream = inputStream; + input = new Input(this.inputStream, bufferSize); + } + + @Override + protected int maybeReadBytes(byte[] buffer, int offset, int count) { + return input.read(buffer, offset, count); + } + + @Override + protected long maybeSkip(long count) throws IOException { + // Work around some bugs in Input.skip() + int remaining = input.limit() - input.position(); + if (remaining == 0) { + long skipped = inputStream.skip(count); + if (skipped > 0) { + extraSkipped += skipped; + } + return skipped; + } else if (count <= remaining) { + input.setPosition(input.position() + (int) count); + return count; + } else { + input.setPosition(input.limit()); + return remaining; + } + } + + private RuntimeException maybeEndOfStream(KryoException e) throws EOFException { + if (e.getMessage().equals("Buffer underflow.")) { + throw (EOFException) (new EOFException().initCause(e)); + } + throw e; + } + + @Override + public byte readByte() throws EOFException { + try { + return input.readByte(); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public void readBytes(byte[] buffer, int offset, int count) throws EOFException { + try { + input.readBytes(buffer, offset, count); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public long readLong() throws EOFException { + try { + return input.readLong(); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public long readSmallLong() throws EOFException, IOException { + try { + return input.readLong(true); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public int readInt() throws EOFException { + try { + return input.readInt(); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public int readSmallInt() throws EOFException { + try { + return input.readInt(true); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public boolean readBoolean() throws EOFException { + try { + return input.readBoolean(); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public String readString() throws EOFException { + return readNullableString(); + } + + @Override + public String readNullableString() throws EOFException { + try { + return input.readString(); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public void skipChunked() throws EOFException, IOException { + while (true) { + int count = readSmallInt(); + if (count == 0) { + break; + } + skipBytes(count); + } + } + + @Override + public T decodeChunked(DecodeAction decodeAction) throws EOFException, Exception { + if (nested == null) { + nested = new KryoBackedDecoder(new InputStream() { + @Override + public int read() throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public int read(byte[] buffer, int offset, int length) throws IOException { + int count = readSmallInt(); + if (count == 0) { + // End of stream has been reached + return -1; + } + if (count > length) { + // For now, assume same size buffers used to read and write + throw new UnsupportedOperationException(); + } + readBytes(buffer, offset, count); + return count; + } + }); + } + T value = decodeAction.read(nested); + if (readSmallInt() != 0) { + throw new IllegalStateException("Expecting the end of nested stream."); + } + return value; + } + + /** + * Returns the total number of bytes consumed by this decoder. Some additional bytes may also be buffered by this decoder but have not been consumed. + */ + public long getReadPosition() { + return input.total() + extraSkipped; + } + + @Override + public void close() throws IOException { + input.close(); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedEncoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedEncoder.java new file mode 100644 index 000000000..6de3c4db5 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/KryoBackedEncoder.java @@ -0,0 +1,134 @@ +/* + * Copyright 2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize.kryo; + +import com.esotericsoftware.kryo.io.Output; +import seaweedfs.client.btree.serialize.AbstractEncoder; +import seaweedfs.client.btree.serialize.Encoder; +import seaweedfs.client.btree.serialize.FlushableEncoder; + +import javax.annotation.Nullable; +import java.io.Closeable; +import java.io.IOException; +import java.io.OutputStream; + +public class KryoBackedEncoder extends AbstractEncoder implements FlushableEncoder, Closeable { + private final Output output; + private KryoBackedEncoder nested; + + public KryoBackedEncoder(OutputStream outputStream) { + this(outputStream, 4096); + } + + public KryoBackedEncoder(OutputStream outputStream, int bufferSize) { + output = new Output(outputStream, bufferSize); + } + + @Override + public void writeByte(byte value) { + output.writeByte(value); + } + + @Override + public void writeBytes(byte[] bytes, int offset, int count) { + output.writeBytes(bytes, offset, count); + } + + @Override + public void writeLong(long value) { + output.writeLong(value); + } + + @Override + public void writeSmallLong(long value) { + output.writeLong(value, true); + } + + @Override + public void writeInt(int value) { + output.writeInt(value); + } + + @Override + public void writeSmallInt(int value) { + output.writeInt(value, true); + } + + @Override + public void writeBoolean(boolean value) { + output.writeBoolean(value); + } + + @Override + public void writeString(CharSequence value) { + if (value == null) { + throw new IllegalArgumentException("Cannot encode a null string."); + } + output.writeString(value); + } + + @Override + public void writeNullableString(@Nullable CharSequence value) { + output.writeString(value); + } + + @Override + public void encodeChunked(EncodeAction writeAction) throws Exception { + if (nested == null) { + nested = new KryoBackedEncoder(new OutputStream() { + @Override + public void write(byte[] buffer, int offset, int length) { + if (length == 0) { + return; + } + writeSmallInt(length); + writeBytes(buffer, offset, length); + } + + @Override + public void write(byte[] buffer) throws IOException { + write(buffer, 0, buffer.length); + } + + @Override + public void write(int b) { + throw new UnsupportedOperationException(); + } + }); + } + writeAction.write(nested); + nested.flush(); + writeSmallInt(0); + } + + /** + * Returns the total number of bytes written by this encoder, some of which may still be buffered. + */ + public long getWritePosition() { + return output.total(); + } + + @Override + public void flush() { + output.flush(); + } + + @Override + public void close() { + output.close(); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedDecoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedDecoder.java new file mode 100644 index 000000000..f323daf43 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedDecoder.java @@ -0,0 +1,188 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize.kryo; + +import com.esotericsoftware.kryo.KryoException; +import com.esotericsoftware.kryo.io.Input; +import seaweedfs.client.btree.serialize.AbstractDecoder; +import seaweedfs.client.btree.serialize.Decoder; + +import java.io.Closeable; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; + +/** + * Note that this decoder uses buffering, so will attempt to read beyond the end of the encoded data. This means you should use this type only when this decoder will be used to decode the entire + * stream. + */ +public class StringDeduplicatingKryoBackedDecoder extends AbstractDecoder implements Decoder, Closeable { + public static final int INITIAL_CAPACITY = 32; + private final Input input; + private final InputStream inputStream; + private String[] strings; + private long extraSkipped; + + public StringDeduplicatingKryoBackedDecoder(InputStream inputStream) { + this(inputStream, 4096); + } + + public StringDeduplicatingKryoBackedDecoder(InputStream inputStream, int bufferSize) { + this.inputStream = inputStream; + input = new Input(this.inputStream, bufferSize); + } + + @Override + protected int maybeReadBytes(byte[] buffer, int offset, int count) { + return input.read(buffer, offset, count); + } + + @Override + protected long maybeSkip(long count) throws IOException { + // Work around some bugs in Input.skip() + int remaining = input.limit() - input.position(); + if (remaining == 0) { + long skipped = inputStream.skip(count); + if (skipped > 0) { + extraSkipped += skipped; + } + return skipped; + } else if (count <= remaining) { + input.setPosition(input.position() + (int) count); + return count; + } else { + input.setPosition(input.limit()); + return remaining; + } + } + + private RuntimeException maybeEndOfStream(KryoException e) throws EOFException { + if (e.getMessage().equals("Buffer underflow.")) { + throw (EOFException) (new EOFException().initCause(e)); + } + throw e; + } + + @Override + public byte readByte() throws EOFException { + try { + return input.readByte(); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public void readBytes(byte[] buffer, int offset, int count) throws EOFException { + try { + input.readBytes(buffer, offset, count); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public long readLong() throws EOFException { + try { + return input.readLong(); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public long readSmallLong() throws EOFException, IOException { + try { + return input.readLong(true); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public int readInt() throws EOFException { + try { + return input.readInt(); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public int readSmallInt() throws EOFException { + try { + return input.readInt(true); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public boolean readBoolean() throws EOFException { + try { + return input.readBoolean(); + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + @Override + public String readString() throws EOFException { + return readNullableString(); + } + + @Override + public String readNullableString() throws EOFException { + try { + int idx = readInt(); + if (idx == -1) { + return null; + } + if (strings == null) { + strings = new String[INITIAL_CAPACITY]; + } + String string = null; + if (idx >= strings.length) { + String[] grow = new String[strings.length * 3 / 2]; + System.arraycopy(strings, 0, grow, 0, strings.length); + strings = grow; + } else { + string = strings[idx]; + } + if (string == null) { + string = input.readString(); + strings[idx] = string; + } + return string; + } catch (KryoException e) { + throw maybeEndOfStream(e); + } + } + + /** + * Returns the total number of bytes consumed by this decoder. Some additional bytes may also be buffered by this decoder but have not been consumed. + */ + public long getReadPosition() { + return input.total() + extraSkipped; + } + + @Override + public void close() throws IOException { + strings = null; + input.close(); + } +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedEncoder.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedEncoder.java new file mode 100644 index 000000000..140933660 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/StringDeduplicatingKryoBackedEncoder.java @@ -0,0 +1,128 @@ +/* + * Copyright 2018 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize.kryo; + +import com.esotericsoftware.kryo.io.Output; +import com.google.common.collect.Maps; +import seaweedfs.client.btree.serialize.AbstractEncoder; +import seaweedfs.client.btree.serialize.FlushableEncoder; + +import javax.annotation.Nullable; +import java.io.Closeable; +import java.io.OutputStream; +import java.util.Map; + +public class StringDeduplicatingKryoBackedEncoder extends AbstractEncoder implements FlushableEncoder, Closeable { + private Map strings; + + private final Output output; + + public StringDeduplicatingKryoBackedEncoder(OutputStream outputStream) { + this(outputStream, 4096); + } + + public StringDeduplicatingKryoBackedEncoder(OutputStream outputStream, int bufferSize) { + output = new Output(outputStream, bufferSize); + } + + @Override + public void writeByte(byte value) { + output.writeByte(value); + } + + @Override + public void writeBytes(byte[] bytes, int offset, int count) { + output.writeBytes(bytes, offset, count); + } + + @Override + public void writeLong(long value) { + output.writeLong(value); + } + + @Override + public void writeSmallLong(long value) { + output.writeLong(value, true); + } + + @Override + public void writeInt(int value) { + output.writeInt(value); + } + + @Override + public void writeSmallInt(int value) { + output.writeInt(value, true); + } + + @Override + public void writeBoolean(boolean value) { + output.writeBoolean(value); + } + + @Override + public void writeString(CharSequence value) { + if (value == null) { + throw new IllegalArgumentException("Cannot encode a null string."); + } + writeNullableString(value); + } + + @Override + public void writeNullableString(@Nullable CharSequence value) { + if (value == null) { + output.writeInt(-1); + return; + } else { + if (strings == null) { + strings = Maps.newHashMapWithExpectedSize(1024); + } + } + String key = value.toString(); + Integer index = strings.get(key); + if (index == null) { + index = strings.size(); + output.writeInt(index); + strings.put(key, index); + output.writeString(key); + } else { + output.writeInt(index); + } + } + + /** + * Returns the total number of bytes written by this encoder, some of which may still be buffered. + */ + public long getWritePosition() { + return output.total(); + } + + @Override + public void flush() { + output.flush(); + } + + @Override + public void close() { + output.close(); + } + + public void done() { + strings = null; + } + +} diff --git a/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/TypeSafeSerializer.java b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/TypeSafeSerializer.java new file mode 100644 index 000000000..16c00cdf4 --- /dev/null +++ b/test/random_access/src/main/java/seaweedfs/client/btree/serialize/kryo/TypeSafeSerializer.java @@ -0,0 +1,51 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package seaweedfs.client.btree.serialize.kryo; + +import seaweedfs.client.btree.serialize.*; + +public class TypeSafeSerializer implements StatefulSerializer { + private final Class type; + private final StatefulSerializer serializer; + + public TypeSafeSerializer(Class type, StatefulSerializer serializer) { + this.type = type; + this.serializer = serializer; + } + + @Override + public ObjectReader newReader(Decoder decoder) { + final ObjectReader reader = serializer.newReader(decoder); + return new ObjectReader() { + @Override + public Object read() throws Exception { + return reader.read(); + } + }; + } + + @Override + public ObjectWriter newWriter(Encoder encoder) { + final ObjectWriter writer = serializer.newWriter(encoder); + return new ObjectWriter() { + @Override + public void write(Object value) throws Exception { + writer.write(type.cast(value)); + } + }; + } +} diff --git a/test/random_access/src/test/java/seaweedfs/client/btree/BTreePersistentIndexedCacheTest.java b/test/random_access/src/test/java/seaweedfs/client/btree/BTreePersistentIndexedCacheTest.java new file mode 100644 index 000000000..796c7f0f5 --- /dev/null +++ b/test/random_access/src/test/java/seaweedfs/client/btree/BTreePersistentIndexedCacheTest.java @@ -0,0 +1,476 @@ +/* + * Copyright 2010 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package seaweedfs.client.btree; + +import seaweedfs.client.btree.serialize.DefaultSerializer; +import seaweedfs.client.btree.serialize.Serializer; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.*; +import static org.junit.Assert.assertNull; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertTrue; + +public class BTreePersistentIndexedCacheTest { + private final Serializer stringSerializer = new DefaultSerializer(); + private final Serializer integerSerializer = new DefaultSerializer(); + private BTreePersistentIndexedCache cache; + private File cacheFile; + + @Before + public void setup() { + cacheFile = tmpDirFile("cache.bin"); + } + + public File tmpDirFile(String filename) { + File f = new File("/Users/chris/tmp/mm/dev/btree_test"); + // File f = new File("/tmp/btree_test"); + f.mkdirs(); + return new File(f, filename); + } + + private void createCache() { + cache = new BTreePersistentIndexedCache(cacheFile, stringSerializer, integerSerializer, (short) 4, 100); + } + + private void verifyAndCloseCache() { + cache.verify(); + cache.close(); + } + + @Test + public void getReturnsNullWhenEntryDoesNotExist() { + createCache(); + assertNull(cache.get("unknown")); + verifyAndCloseCache(); + } + + @Test + public void persistsAddedEntries() { + createCache(); + checkAdds(1, 2, 3, 4, 5); + verifyAndCloseCache(); + } + + @Test + public void persistsAddedEntriesInReverseOrder() { + createCache(); + checkAdds(5, 4, 3, 2, 1); + verifyAndCloseCache(); + } + + @Test + public void persistsAddedEntriesOverMultipleIndexBlocks() { + createCache(); + checkAdds(3, 2, 11, 5, 7, 1, 10, 8, 9, 4, 6, 0); + verifyAndCloseCache(); + } + + @Test + public void persistsUpdates() { + createCache(); + checkUpdates(3, 2, 11, 5, 7, 1, 10, 8, 9, 4, 6, 0); + verifyAndCloseCache(); + } + + @Test + public void handlesUpdatesWhenBlockSizeDecreases() { + BTreePersistentIndexedCache> cache = + new BTreePersistentIndexedCache>( + tmpDirFile("listcache.bin"), stringSerializer, + new DefaultSerializer>(), (short) 4, 100); + + List values = Arrays.asList(3, 2, 11, 5, 7, 1, 10, 8, 9, 4, 6, 0); + Map> updated = new LinkedHashMap>(); + + for (int i = 10; i > 0; i--) { + for (Integer value : values) { + String key = String.format("key_%d", value); + List newValue = new ArrayList(i); + for (int j = 0; j < i * 2; j++) { + newValue.add(j); + } + cache.put(key, newValue); + updated.put(value, newValue); + } + + checkListEntries(cache, updated); + } + + cache.reset(); + + checkListEntries(cache, updated); + + cache.verify(); + cache.close(); + } + + private void checkListEntries(BTreePersistentIndexedCache> cache, Map> updated) { + for (Map.Entry> entry : updated.entrySet()) { + String key = String.format("key_%d", entry.getKey()); + assertThat(cache.get(key), equalTo(entry.getValue())); + } + } + + @Test + public void handlesUpdatesWhenBlockSizeIncreases() { + BTreePersistentIndexedCache> cache = + new BTreePersistentIndexedCache>( + tmpDirFile("listcache.bin"), stringSerializer, + new DefaultSerializer>(), (short) 4, 100); + + List values = Arrays.asList(3, 2, 11, 5, 7, 1, 10, 8, 9, 4, 6, 0); + Map> updated = new LinkedHashMap>(); + + for (int i = 1; i < 10; i++) { + for (Integer value : values) { + String key = String.format("key_%d", value); + List newValue = new ArrayList(i); + for (int j = 0; j < i * 2; j++) { + newValue.add(j); + } + cache.put(key, newValue); + updated.put(value, newValue); + } + + checkListEntries(cache, updated); + } + + cache.reset(); + + checkListEntries(cache, updated); + + cache.verify(); + cache.close(); + } + + @Test + public void persistsAddedEntriesAfterReopen() { + createCache(); + + checkAdds(1, 2, 3, 4); + + cache.reset(); + + checkAdds(5, 6, 7, 8); + verifyAndCloseCache(); + } + + @Test + public void persistsReplacedEntries() { + createCache(); + + cache.put("key_1", 1); + cache.put("key_2", 2); + cache.put("key_3", 3); + cache.put("key_4", 4); + cache.put("key_5", 5); + + cache.put("key_1", 1); + cache.put("key_4", 12); + + assertThat(cache.get("key_1"), equalTo(1)); + assertThat(cache.get("key_2"), equalTo(2)); + assertThat(cache.get("key_3"), equalTo(3)); + assertThat(cache.get("key_4"), equalTo(12)); + assertThat(cache.get("key_5"), equalTo(5)); + + cache.reset(); + + assertThat(cache.get("key_1"), equalTo(1)); + assertThat(cache.get("key_2"), equalTo(2)); + assertThat(cache.get("key_3"), equalTo(3)); + assertThat(cache.get("key_4"), equalTo(12)); + assertThat(cache.get("key_5"), equalTo(5)); + + verifyAndCloseCache(); + } + + @Test + public void reusesEmptySpaceWhenPuttingEntries() { + BTreePersistentIndexedCache cache = new BTreePersistentIndexedCache(cacheFile, stringSerializer, stringSerializer, (short) 4, 100); + + long beforeLen = cacheFile.length(); + if (beforeLen>0){ + System.out.println(String.format("cache %s: %s", "key_new", cache.get("key_new"))); + } + + cache.put("key_1", "abcd"); + cache.put("key_2", "abcd"); + cache.put("key_3", "abcd"); + cache.put("key_4", "abcd"); + cache.put("key_5", "abcd"); + + long len = cacheFile.length(); + assertTrue(len > 0L); + + System.out.println(String.format("cache file size %d => %d", beforeLen, len)); + + cache.put("key_1", "1234"); + assertThat(cacheFile.length(), equalTo(len)); + + cache.remove("key_1"); + cache.put("key_new", "a1b2"); + assertThat(cacheFile.length(), equalTo(len)); + + cache.put("key_new", "longer value assertThat(cacheFile.length(), equalTo(len))"); + System.out.println(String.format("cache file size %d beforeLen %d", cacheFile.length(), len)); + // assertTrue(cacheFile.length() > len); + len = cacheFile.length(); + + cache.put("key_1", "1234"); + assertThat(cacheFile.length(), equalTo(len)); + + cache.close(); + } + + @Test + public void canHandleLargeNumberOfEntries() { + createCache(); + int count = 2000; + List values = new ArrayList(); + for (int i = 0; i < count; i++) { + values.add(i); + } + + checkAddsAndRemoves(null, values); + + long len = cacheFile.length(); + + checkAddsAndRemoves(Collections.reverseOrder(), values); + + // need to make this better + assertTrue(cacheFile.length() < (long)(1.4 * len)); + + checkAdds(values); + + // need to make this better + assertTrue(cacheFile.length() < (long) (1.4 * 1.4 * len)); + + cache.close(); + } + + @Test + public void persistsRemovalOfEntries() { + createCache(); + checkAddsAndRemoves(1, 2, 3, 4, 5); + verifyAndCloseCache(); + } + + @Test + public void persistsRemovalOfEntriesInReverse() { + createCache(); + checkAddsAndRemoves(Collections.reverseOrder(), 1, 2, 3, 4, 5); + verifyAndCloseCache(); + } + + @Test + public void persistsRemovalOfEntriesOverMultipleIndexBlocks() { + createCache(); + checkAddsAndRemoves(4, 12, 9, 1, 3, 10, 11, 7, 8, 2, 5, 6); + verifyAndCloseCache(); + } + + @Test + public void removalRedistributesRemainingEntriesWithLeftSibling() { + createCache(); + // Ends up with: 1 2 3 -> 4 <- 5 6 + checkAdds(1, 2, 5, 6, 4, 3); + cache.verify(); + cache.remove("key_5"); + verifyAndCloseCache(); + } + + @Test + public void removalMergesRemainingEntriesIntoLeftSibling() { + createCache(); + // Ends up with: 1 2 -> 3 <- 4 5 + checkAdds(1, 2, 4, 5, 3); + cache.verify(); + cache.remove("key_4"); + verifyAndCloseCache(); + } + + @Test + public void removalRedistributesRemainingEntriesWithRightSibling() { + createCache(); + // Ends up with: 1 2 -> 3 <- 4 5 6 + checkAdds(1, 2, 4, 5, 3, 6); + cache.verify(); + cache.remove("key_2"); + verifyAndCloseCache(); + } + + @Test + public void removalMergesRemainingEntriesIntoRightSibling() { + createCache(); + // Ends up with: 1 2 -> 3 <- 4 5 + checkAdds(1, 2, 4, 5, 3); + cache.verify(); + cache.remove("key_2"); + verifyAndCloseCache(); + } + + @Test + public void handlesOpeningATruncatedCacheFile() throws IOException { + BTreePersistentIndexedCache cache = new BTreePersistentIndexedCache(cacheFile, stringSerializer, integerSerializer); + + assertNull(cache.get("key_1")); + cache.put("key_1", 99); + + RandomAccessFile file = new RandomAccessFile(cacheFile, "rw"); + file.setLength(file.length() - 10); + file.close(); + + cache.reset(); + + assertNull(cache.get("key_1")); + cache.verify(); + + cache.close(); + } + + @Test + public void canUseFileAsKey() { + BTreePersistentIndexedCache cache = new BTreePersistentIndexedCache(cacheFile, new DefaultSerializer(), integerSerializer); + + cache.put(new File("file"), 1); + cache.put(new File("dir/file"), 2); + cache.put(new File("File"), 3); + + assertThat(cache.get(new File("file")), equalTo(1)); + assertThat(cache.get(new File("dir/file")), equalTo(2)); + assertThat(cache.get(new File("File")), equalTo(3)); + + cache.close(); + } + + @Test + public void handlesKeysWithSameHashCode() { + createCache(); + + String key1 = new String(new byte[]{2, 31}); + String key2 = new String(new byte[]{1, 62}); + cache.put(key1, 1); + cache.put(key2, 2); + + assertThat(cache.get(key1), equalTo(1)); + assertThat(cache.get(key2), equalTo(2)); + + cache.close(); + } + + private void checkAdds(Integer... values) { + checkAdds(Arrays.asList(values)); + } + + private Map checkAdds(Iterable values) { + Map added = new LinkedHashMap(); + + for (Integer value : values) { + String key = String.format("key_%d", value); + cache.put(key, value); + added.put(String.format("key_%d", value), value); + } + + for (Map.Entry entry : added.entrySet()) { + assertThat(cache.get(entry.getKey()), equalTo(entry.getValue())); + } + + cache.reset(); + + for (Map.Entry entry : added.entrySet()) { + assertThat(cache.get(entry.getKey()), equalTo(entry.getValue())); + } + + return added; + } + + private void checkUpdates(Integer... values) { + checkUpdates(Arrays.asList(values)); + } + + private Map checkUpdates(Iterable values) { + Map updated = new LinkedHashMap(); + + for (int i = 0; i < 10; i++) { + for (Integer value : values) { + String key = String.format("key_%d", value); + int newValue = value + (i * 100); + cache.put(key, newValue); + updated.put(value, newValue); + } + + for (Map.Entry entry : updated.entrySet()) { + String key = String.format("key_%d", entry.getKey()); + assertThat(cache.get(key), equalTo(entry.getValue())); + } + } + + cache.reset(); + + for (Map.Entry entry : updated.entrySet()) { + String key = String.format("key_%d", entry.getKey()); + assertThat(cache.get(key), equalTo(entry.getValue())); + } + + return updated; + } + + private void checkAddsAndRemoves(Integer... values) { + checkAddsAndRemoves(null, values); + } + + private void checkAddsAndRemoves(Comparator comparator, Integer... values) { + checkAddsAndRemoves(comparator, Arrays.asList(values)); + } + + private void checkAddsAndRemoves(Comparator comparator, Collection values) { + checkAdds(values); + + List deleteValues = new ArrayList(values); + Collections.sort(deleteValues, comparator); + for (Integer value : deleteValues) { + String key = String.format("key_%d", value); + assertThat(cache.get(key), notNullValue()); + cache.remove(key); + assertThat(cache.get(key), nullValue()); + } + + cache.reset(); + cache.verify(); + + for (Integer value : deleteValues) { + String key = String.format("key_%d", value); + assertThat(cache.get(key), nullValue()); + } + } + +} diff --git a/test/random_access/src/test/java/seaweedfs/file/MmapFileTest.java b/test/random_access/src/test/java/seaweedfs/file/MmapFileTest.java new file mode 100644 index 000000000..1d741ee2f --- /dev/null +++ b/test/random_access/src/test/java/seaweedfs/file/MmapFileTest.java @@ -0,0 +1,143 @@ +package seaweedfs.file; + +import org.junit.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; + +public class MmapFileTest { + + static File dir = new File("/Users/chris/tmp/mm/dev"); + + @Test + public void testMmap() { + try { + System.out.println("starting ..."); + + File f = new File(dir, "mmap_file.txt"); + RandomAccessFile raf = new RandomAccessFile(f, "rw"); + FileChannel fc = raf.getChannel(); + MappedByteBuffer mbf = fc.map(FileChannel.MapMode.READ_ONLY, 0, fc.size()); + fc.close(); + raf.close(); + + FileOutputStream fos = new FileOutputStream(f); + fos.write("abcdefg".getBytes()); + fos.close(); + System.out.println("completed!"); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testBigMmap() throws IOException { + /* + +// new file +I0817 09:48:02 25175 dir.go:147] create /dev/mmap_big.txt: OpenReadWrite+OpenCreate +I0817 09:48:02 25175 wfs.go:116] AcquireHandle /dev/mmap_big.txt uid=502 gid=20 +I0817 09:48:02 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 0 +I0817 09:48:02 25175 meta_cache_subscribe.go:32] creating /dev/mmap_big.txt + +//get channel +I0817 09:48:26 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 0 + +I0817 09:48:32 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 0 +I0817 09:48:32 25175 wfs.go:116] AcquireHandle /dev/mmap_big.txt uid=0 gid=0 +I0817 09:48:32 25175 filehandle.go:160] Release /dev/mmap_big.txt fh 14968871991130164560 + +//fileChannel.map +I0817 09:49:18 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 0 +I0817 09:49:18 25175 file.go:112] /dev/mmap_big.txt file setattr set size=262144 chunks=0 +I0817 09:49:18 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144 +I0817 09:49:18 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144 +I0817 09:49:18 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144 + +// buffer.put +I0817 09:49:49 25175 filehandle.go:57] /dev/mmap_big.txt read fh 14968871991130164560: [0,32768) size 32768 resp.Data len=0 cap=32768 +I0817 09:49:49 25175 reader_at.go:113] zero2 [0,32768) +I0817 09:49:50 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144 + +I0817 09:49:53 25175 file.go:233] /dev/mmap_big.txt fsync file Fsync [ID=0x4 Node=0xe Uid=0 Gid=0 Pid=0] Handle 0x2 Flags 1 + +//close +I0817 09:50:14 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144 +I0817 09:50:14 25175 dirty_page.go:130] saveToStorage /dev/mmap_big.txt 1,315b69812039e5 [0,4096) of 262144 bytes +I0817 09:50:14 25175 file.go:274] /dev/mmap_big.txt existing 0 chunks adds 1 more +I0817 09:50:14 25175 filehandle.go:218] /dev/mmap_big.txt set chunks: 1 +I0817 09:50:14 25175 filehandle.go:220] /dev/mmap_big.txt chunks 0: 1,315b69812039e5 [0,4096) +I0817 09:50:14 25175 meta_cache_subscribe.go:23] deleting /dev/mmap_big.txt +I0817 09:50:14 25175 meta_cache_subscribe.go:32] creating /dev/mmap_big.txt + +// end of test +I0817 09:50:41 25175 file.go:62] file Attr /dev/mmap_big.txt, open:1, size: 262144 +I0817 09:50:41 25175 filehandle.go:160] Release /dev/mmap_big.txt fh 14968871991130164560 + + */ + // Create file object + File file = new File(dir, "mmap_big.txt"); + + try (RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw")) { + // Get file channel in read-write mode + FileChannel fileChannel = randomAccessFile.getChannel(); + + // Get direct byte buffer access using channel.map() operation + MappedByteBuffer buffer = fileChannel.map(FileChannel.MapMode.READ_WRITE, 0, 4096 * 8 * 8); + + //Write the content using put methods + buffer.put("howtodoinjava.com".getBytes()); + } + +/* +> meta.cat /dev/mmap_big.txt +{ + "name": "mmap_big.txt", + "isDirectory": false, + "chunks": [ + { + "fileId": "1,315b69812039e5", + "offset": "0", + "size": "4096", + "mtime": "1597683014026365000", + "eTag": "985ab0ac", + "sourceFileId": "", + "fid": { + "volumeId": 1, + "fileKey": "3234665", + "cookie": 2166372837 + }, + "sourceFid": null, + "cipherKey": null, + "isCompressed": true, + "isChunkManifest": false + } + ], + "attributes": { + "fileSize": "262144", + "mtime": "1597683014", + "fileMode": 420, + "uid": 502, + "gid": 20, + "crtime": "1597682882", + "mime": "application/octet-stream", + "replication": "", + "collection": "", + "ttlSec": 0, + "userName": "", + "groupName": [ + ], + "symlinkTarget": "", + "md5": null + }, + "extended": { + } +} + */ + + } +} diff --git a/test/random_access/src/test/java/seaweedfs/file/RandomeAccessFileTest.java b/test/random_access/src/test/java/seaweedfs/file/RandomeAccessFileTest.java new file mode 100644 index 000000000..cb5847567 --- /dev/null +++ b/test/random_access/src/test/java/seaweedfs/file/RandomeAccessFileTest.java @@ -0,0 +1,70 @@ +package seaweedfs.file; + +import org.junit.Assert; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.util.Random; + +public class RandomeAccessFileTest { + + @Test + public void testRandomWriteAndRead() throws IOException { + + File f = new File(MmapFileTest.dir, "mmap_file.txt"); + + RandomAccessFile af = new RandomAccessFile(f, "rw"); + af.setLength(0); + af.close(); + + Random r = new Random(); + + int maxLength = 5000; + + byte[] data = new byte[maxLength]; + byte[] readData = new byte[maxLength]; + + for (int i = 4096; i < maxLength; i++) { + + RandomAccessFile raf = new RandomAccessFile(f, "rw"); + long fileSize = raf.length(); + + raf.readFully(readData, 0, (int)fileSize); + + for (int x=0;x stop) { + int t = stop; + stop = start; + start = t; + } + if (stop > fileSize) { + fileSize = stop; + raf.setLength(fileSize); + } + + randomize(r, data, start, stop); + raf.seek(start); + raf.write(data, start, stop-start); + + raf.close(); + } + + } + + private static void randomize(Random r, byte[] bytes, int start, int stop) { + for (int i = start; i < stop; i++) { + int rnd = r.nextInt(); + bytes[i] = (byte) rnd; + } + } + + +} diff --git a/test/s3/basic/basic_test.go b/test/s3/basic/basic_test.go new file mode 100644 index 000000000..653fa1237 --- /dev/null +++ b/test/s3/basic/basic_test.go @@ -0,0 +1,226 @@ +package basic + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "io/ioutil" + "os" + "strings" + "testing" +) + +var ( + svc *s3.S3 +) + +func init() { + // Initialize a session in us-west-2 that the SDK will use to load + // credentials from the shared credentials file ~/.aws/credentials. + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + Endpoint: aws.String("localhost:8333"), + DisableSSL: aws.Bool(true), + }) + if err != nil { + exitErrorf("create session, %v", err) + } + + // Create S3 service client + svc = s3.New(sess) +} + +func TestCreateBucket(t *testing.T) { + + input := &s3.CreateBucketInput{ + Bucket: aws.String("theBucket"), + } + + result, err := svc.CreateBucket(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + case s3.ErrCodeBucketAlreadyExists: + fmt.Println(s3.ErrCodeBucketAlreadyExists, aerr.Error()) + case s3.ErrCodeBucketAlreadyOwnedByYou: + fmt.Println(s3.ErrCodeBucketAlreadyOwnedByYou, aerr.Error()) + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) + +} + +func TestPutObject(t *testing.T) { + + input := &s3.PutObjectInput{ + ACL: aws.String("authenticated-read"), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("theBucket"), + Key: aws.String("exampleobject"), + } + + result, err := svc.PutObject(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + switch aerr.Code() { + default: + fmt.Println(aerr.Error()) + } + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + fmt.Println(err.Error()) + } + return + } + + fmt.Println(result) + +} + +func TestListBucket(t *testing.T) { + + result, err := svc.ListBuckets(nil) + if err != nil { + exitErrorf("Unable to list buckets, %v", err) + } + + fmt.Println("Buckets:") + + for _, b := range result.Buckets { + fmt.Printf("* %s created on %s\n", + aws.StringValue(b.Name), aws.TimeValue(b.CreationDate)) + } + +} + +func TestListObjectV2(t *testing.T) { + + listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: aws.String(Bucket), + Prefix: aws.String("foo"), + Delimiter: aws.String("/"), + }) + if err != nil { + exitErrorf("Unable to list objects, %v", err) + } + for _, content := range listObj.Contents { + fmt.Println(aws.StringValue(content.Key)) + } + fmt.Printf("list: %s\n", listObj) + +} + +func exitErrorf(msg string, args ...interface{}) { + fmt.Fprintf(os.Stderr, msg+"\n", args...) + os.Exit(1) +} + +const ( + Bucket = "theBucket" + object = "foo/bar" + Data = "" +) + +func TestObjectOp(t *testing.T) { + _, err := svc.CreateBucket(&s3.CreateBucketInput{ + Bucket: aws.String(Bucket), + }) + if err != nil { + exitErrorf("Unable to create bucket, %v", err) + } + + _, err = svc.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(Bucket), + Key: aws.String(object), + Body: strings.NewReader(Data), + }) + if err != nil { + exitErrorf("Unable to put object, %v", err) + } + + dest := fmt.Sprintf("%s_bak", object) + copyObj, err := svc.CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String(Bucket), + CopySource: aws.String(fmt.Sprintf("%s/%s", Bucket, object)), + Key: aws.String(dest), + }) + if err != nil { + exitErrorf("Unable to copy object, %v", err) + } + t.Log("copy object result -> ", copyObj.CopyObjectResult) + + getObj, err := svc.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(Bucket), + Key: aws.String(dest), + }) + if err != nil { + exitErrorf("Unable to get copy object, %v", err) + } + + data, err := ioutil.ReadAll(getObj.Body) + if err != nil { + exitErrorf("Unable to read object data, %v", err) + } + if string(data) != Data { + t.Error("object data -> ", string(data)) + } + + listObj, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: aws.String(Bucket), + Prefix: aws.String("foo/"), + }) + if err != nil { + exitErrorf("Unable to list objects, %v", err) + } + count := 0 + for _, content := range listObj.Contents { + key := aws.StringValue(content.Key) + if key == dest { + count++ + } else if key == object { + count++ + } + if count == 2 { + break + } + } + if count != 2 { + exitErrorf("Unable to find two objects, %v", listObj.Contents) + } + + _, err = svc.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(Bucket), + Key: aws.String(object), + }) + if err != nil { + exitErrorf("Unable to delete source object, %v", err) + } + + _, err = svc.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(Bucket), + Key: aws.String(dest), + }) + if err != nil { + exitErrorf("Unable to delete object, %v", err) + } + + _, err = svc.DeleteBucket(&s3.DeleteBucketInput{ + Bucket: aws.String(Bucket), + }) + + if err != nil { + exitErrorf("Unable to delete bucket, %v", err) + } +} diff --git a/test/s3/basic/object_tagging_test.go b/test/s3/basic/object_tagging_test.go new file mode 100644 index 000000000..2b9b7e5aa --- /dev/null +++ b/test/s3/basic/object_tagging_test.go @@ -0,0 +1,82 @@ +package basic + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "testing" +) + +func TestObjectTagging(t *testing.T) { + + input := &s3.PutObjectInput{ + Bucket: aws.String("theBucket"), + Key: aws.String("testDir/testObject"), + } + + svc.PutObject(input) + + printTags() + + setTags() + + printTags() + + clearTags() + + printTags() + +} + +func printTags() { + response, err := svc.GetObjectTagging( + &s3.GetObjectTaggingInput{ + Bucket: aws.String("theBucket"), + Key: aws.String("testDir/testObject"), + }) + + fmt.Println("printTags") + if err != nil { + fmt.Println(err.Error()) + } + + fmt.Println(response.TagSet) +} + +func setTags() { + + response, err := svc.PutObjectTagging(&s3.PutObjectTaggingInput{ + Bucket: aws.String("theBucket"), + Key: aws.String("testDir/testObject"), + Tagging: &s3.Tagging{ + TagSet: []*s3.Tag{ + { + Key: aws.String("kye2"), + Value: aws.String("value2"), + }, + }, + }, + }) + + fmt.Println("setTags") + if err != nil { + fmt.Println(err.Error()) + } + + fmt.Println(response.String()) +} + +func clearTags() { + + response, err := svc.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{ + Bucket: aws.String("theBucket"), + Key: aws.String("testDir/testObject"), + }) + + fmt.Println("clearTags") + if err != nil { + fmt.Println(err.Error()) + } + + fmt.Println(response.String()) +} diff --git a/test/s3/multipart/aws_upload.go b/test/s3/multipart/aws_upload.go new file mode 100644 index 000000000..8c15cf6ed --- /dev/null +++ b/test/s3/multipart/aws_upload.go @@ -0,0 +1,175 @@ +package main + +// copied from https://github.com/apoorvam/aws-s3-multipart-upload + +import ( + "bytes" + "flag" + "fmt" + "net/http" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +const ( + maxPartSize = int64(5 * 1024 * 1024) + maxRetries = 3 + awsAccessKeyID = "Your access key" + awsSecretAccessKey = "Your secret key" + awsBucketRegion = "S3 bucket region" + awsBucketName = "newBucket" +) + +var ( + filename = flag.String("f", "", "the file name") +) + +func main() { + flag.Parse() + + creds := credentials.NewStaticCredentials(awsAccessKeyID, awsSecretAccessKey, "") + _, err := creds.Get() + if err != nil { + fmt.Printf("bad credentials: %s", err) + } + cfg := aws.NewConfig().WithRegion(awsBucketRegion).WithCredentials(creds).WithDisableSSL(true).WithEndpoint("localhost:8333") + svc := s3.New(session.New(), cfg) + + file, err := os.Open(*filename) + if err != nil { + fmt.Printf("err opening file: %s", err) + return + } + defer file.Close() + fileInfo, _ := file.Stat() + size := fileInfo.Size() + buffer := make([]byte, size) + fileType := http.DetectContentType(buffer) + file.Read(buffer) + + path := "/media/" + file.Name() + input := &s3.CreateMultipartUploadInput{ + Bucket: aws.String(awsBucketName), + Key: aws.String(path), + ContentType: aws.String(fileType), + } + + resp, err := svc.CreateMultipartUpload(input) + if err != nil { + fmt.Println(err.Error()) + return + } + fmt.Println("Created multipart upload request") + + var curr, partLength int64 + var remaining = size + var completedParts []*s3.CompletedPart + partNumber := 1 + for curr = 0; remaining != 0; curr += partLength { + if remaining < maxPartSize { + partLength = remaining + } else { + partLength = maxPartSize + } + completedPart, err := uploadPart(svc, resp, buffer[curr:curr+partLength], partNumber) + if err != nil { + fmt.Println(err.Error()) + err := abortMultipartUpload(svc, resp) + if err != nil { + fmt.Println(err.Error()) + } + return + } + remaining -= partLength + partNumber++ + completedParts = append(completedParts, completedPart) + } + + // list parts + parts, err := svc.ListParts(&s3.ListPartsInput{ + Bucket: input.Bucket, + Key: input.Key, + MaxParts: nil, + PartNumberMarker: nil, + RequestPayer: nil, + UploadId: resp.UploadId, + }) + if err != nil { + fmt.Println(err.Error()) + return + } + fmt.Printf("list parts: %d\n", len(parts.Parts)) + for i, part := range parts.Parts { + fmt.Printf("part %d: %v\n", i, part) + } + + + completeResponse, err := completeMultipartUpload(svc, resp, completedParts) + if err != nil { + fmt.Println(err.Error()) + return + } + + fmt.Printf("Successfully uploaded file: %s\n", completeResponse.String()) +} + +func completeMultipartUpload(svc *s3.S3, resp *s3.CreateMultipartUploadOutput, completedParts []*s3.CompletedPart) (*s3.CompleteMultipartUploadOutput, error) { + completeInput := &s3.CompleteMultipartUploadInput{ + Bucket: resp.Bucket, + Key: resp.Key, + UploadId: resp.UploadId, + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: completedParts, + }, + } + return svc.CompleteMultipartUpload(completeInput) +} + +func uploadPart(svc *s3.S3, resp *s3.CreateMultipartUploadOutput, fileBytes []byte, partNumber int) (*s3.CompletedPart, error) { + tryNum := 1 + partInput := &s3.UploadPartInput{ + Body: bytes.NewReader(fileBytes), + Bucket: resp.Bucket, + Key: resp.Key, + PartNumber: aws.Int64(int64(partNumber)), + UploadId: resp.UploadId, + ContentLength: aws.Int64(int64(len(fileBytes))), + } + + for tryNum <= maxRetries { + uploadResult, err := svc.UploadPart(partInput) + if err != nil { + if tryNum == maxRetries { + if aerr, ok := err.(awserr.Error); ok { + return nil, aerr + } + return nil, err + } + fmt.Printf("Retrying to upload part #%v\n", partNumber) + tryNum++ + } else { + fmt.Printf("Uploaded part #%v\n", partNumber) + return &s3.CompletedPart{ + ETag: uploadResult.ETag, + PartNumber: aws.Int64(int64(partNumber)), + }, nil + } + } + return nil, nil +} + +func abortMultipartUpload(svc *s3.S3, resp *s3.CreateMultipartUploadOutput) error { + fmt.Println("Aborting multipart upload for UploadId#" + *resp.UploadId) + abortInput := &s3.AbortMultipartUploadInput{ + Bucket: resp.Bucket, + Key: resp.Key, + UploadId: resp.UploadId, + } + _, err := svc.AbortMultipartUpload(abortInput) + return err +} diff --git a/unmaintained/change_superblock/change_superblock.go b/unmaintained/change_superblock/change_superblock.go index afe651c4e..56342a0cb 100644 --- a/unmaintained/change_superblock/change_superblock.go +++ b/unmaintained/change_superblock/change_superblock.go @@ -92,7 +92,7 @@ func main() { header := superBlock.Bytes() - if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil { + if n, e := datBackend.WriteAt(header, 0); n == 0 || e != nil { glog.Fatalf("cannot write super block: %v", e) } diff --git a/unmaintained/check_disk_size/check_disk_size.go b/unmaintained/check_disk_size/check_disk_size.go new file mode 100644 index 000000000..4a8b92b88 --- /dev/null +++ b/unmaintained/check_disk_size/check_disk_size.go @@ -0,0 +1,42 @@ +package main + +import ( + "flag" + "fmt" + "runtime" + "syscall" +) + +var ( + dir = flag.String("dir", ".", "the directory which uses a disk") +) + +func main() { + flag.Parse() + + fillInDiskStatus(*dir) + + fmt.Printf("OS: %v\n", runtime.GOOS) + fmt.Printf("Arch: %v\n", runtime.GOARCH) + +} + +func fillInDiskStatus(dir string) { + fs := syscall.Statfs_t{} + err := syscall.Statfs(dir, &fs) + if err != nil { + fmt.Printf("failed to statfs on %s: %v\n", dir, err) + return + } + fmt.Printf("statfs: %+v\n", fs) + fmt.Println() + + total := fs.Blocks * uint64(fs.Bsize) + free := fs.Bfree * uint64(fs.Bsize) + fmt.Printf("Total: %d blocks x %d block size = %d bytes\n", fs.Blocks, uint64(fs.Bsize), total) + fmt.Printf("Free : %d blocks x %d block size = %d bytes\n", fs.Bfree, uint64(fs.Bsize), free) + fmt.Printf("Used : %d blocks x %d block size = %d bytes\n", fs.Blocks-fs.Bfree, uint64(fs.Bsize), total-free) + fmt.Printf("Free Percentage : %.2f%%\n", float32((float64(free)/float64(total))*100)) + fmt.Printf("Used Percentage : %.2f%%\n", float32((float64(total-free)/float64(total))*100)) + return +} diff --git a/unmaintained/compact_leveldb/compact_leveldb.go b/unmaintained/compact_leveldb/compact_leveldb.go index 317356c3f..9be5697de 100644 --- a/unmaintained/compact_leveldb/compact_leveldb.go +++ b/unmaintained/compact_leveldb/compact_leveldb.go @@ -5,6 +5,7 @@ import ( "log" "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/util" ) @@ -25,6 +26,9 @@ func main() { } db, err := leveldb.OpenFile(*dir, opts) + if errors.IsCorrupted(err) { + db, err = leveldb.RecoverFile(*dir, opts) + } if err != nil { log.Fatal(err) } diff --git a/unmaintained/diff_volume_servers/diff_volume_servers.go b/unmaintained/diff_volume_servers/diff_volume_servers.go new file mode 100644 index 000000000..27a537617 --- /dev/null +++ b/unmaintained/diff_volume_servers/diff_volume_servers.go @@ -0,0 +1,196 @@ +package main + +import ( + "bytes" + "context" + "errors" + "flag" + "fmt" + "io" + "math" + "os" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/idx" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" +) + +var ( + serversStr = flag.String("volumeServers", "", "comma-delimited list of volume servers to diff the volume against") + volumeId = flag.Int("volumeId", -1, "a volume id to diff from servers") + volumeCollection = flag.String("collection", "", "the volume collection name") + grpcDialOption grpc.DialOption +) + +/* + Diff the volume's files across multiple volume servers. + diff_volume_servers -volumeServers 127.0.0.1:8080,127.0.0.1:8081 -volumeId 5 + + Example Output: + reference 127.0.0.1:8081 + fileId volumeServer message + 5,01617c3f61 127.0.0.1:8080 wrongSize +*/ +func main() { + flag.Parse() + + util.LoadConfiguration("security", false) + grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + vid := uint32(*volumeId) + servers := strings.Split(*serversStr, ",") + if len(servers) < 2 { + glog.Fatalf("You must specify more than 1 server\n") + } + var referenceServer string + var maxOffset int64 + allFiles := map[string]map[types.NeedleId]needleState{} + for _, addr := range servers { + files, offset, err := getVolumeFiles(vid, addr) + if err != nil { + glog.Fatalf("Failed to copy idx from volume server %s\n", err) + } + allFiles[addr] = files + if offset > maxOffset { + referenceServer = addr + } + } + + same := true + fmt.Println("reference", referenceServer) + fmt.Println("fileId volumeServer message") + for nid, n := range allFiles[referenceServer] { + for addr, files := range allFiles { + if addr == referenceServer { + continue + } + var diffMsg string + n2, ok := files[nid] + if !ok { + if n.state == stateDeleted { + continue + } + diffMsg = "missing" + } else if n2.state != n.state { + switch n.state { + case stateDeleted: + diffMsg = "notDeleted" + case statePresent: + diffMsg = "deleted" + } + } else if n2.size != n.size { + diffMsg = "wrongSize" + } else { + continue + } + same = false + + // fetch the needle details + var id string + var err error + if n.state == statePresent { + id, err = getNeedleFileId(vid, nid, referenceServer) + } else { + id, err = getNeedleFileId(vid, nid, addr) + } + if err != nil { + glog.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err) + } + fmt.Println(id, addr, diffMsg) + } + } + if !same { + os.Exit(1) + } +} + +const ( + stateDeleted uint8 = 1 + statePresent uint8 = 2 +) + +type needleState struct { + state uint8 + size types.Size +} + +func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int64, error) { + var idxFile *bytes.Reader + err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + copyFileClient, err := vs.CopyFile(ctx, &volume_server_pb.CopyFileRequest{ + VolumeId: v, + Ext: ".idx", + CompactionRevision: math.MaxUint32, + StopOffset: math.MaxInt64, + Collection: *volumeCollection, + }) + if err != nil { + return err + } + var buf bytes.Buffer + for { + resp, err := copyFileClient.Recv() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return err + } + buf.Write(resp.FileContent) + } + idxFile = bytes.NewReader(buf.Bytes()) + return nil + }) + if err != nil { + return nil, 0, err + } + + var maxOffset int64 + files := map[types.NeedleId]needleState{} + err = idx.WalkIndexFile(idxFile, func(key types.NeedleId, offset types.Offset, size types.Size) error { + if offset.IsZero() || size.IsDeleted() { + files[key] = needleState{ + state: stateDeleted, + size: size, + } + } else { + files[key] = needleState{ + state: statePresent, + size: size, + } + } + if actual := offset.ToActualOffset(); actual > maxOffset { + maxOffset = actual + } + return nil + }) + if err != nil { + return nil, 0, err + } + return files, maxOffset, nil +} + +func getNeedleFileId(v uint32, nid types.NeedleId, addr string) (string, error) { + var id string + err := operation.WithVolumeServerClient(addr, grpcDialOption, func(vs volume_server_pb.VolumeServerClient) error { + resp, err := vs.VolumeNeedleStatus(context.Background(), &volume_server_pb.VolumeNeedleStatusRequest{ + VolumeId: v, + NeedleId: uint64(nid), + }) + if err != nil { + return err + } + id = needle.NewFileId(needle.VolumeId(v), resp.NeedleId, resp.Cookie).String() + return nil + }) + return id, err +} diff --git a/unmaintained/fix_dat/fix_dat.go b/unmaintained/fix_dat/fix_dat.go index d6110d870..70bce3bf9 100644 --- a/unmaintained/fix_dat/fix_dat.go +++ b/unmaintained/fix_dat/fix_dat.go @@ -98,7 +98,7 @@ func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, vis // parse index file entry key := util.BytesToUint64(bytes[0:8]) offsetFromIndex := util.BytesToUint32(bytes[8:12]) - sizeFromIndex := util.BytesToUint32(bytes[12:16]) + sizeFromIndex := types.BytesToSize(bytes[12:16]) count, _ = idxFile.ReadAt(bytes, readerOffset) readerOffset += int64(count) @@ -123,7 +123,7 @@ func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, vis } }() - if n.Size <= n.DataSize { + if n.Size <= types.Size(n.DataSize) { continue } visitNeedle(n, offset) diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go index 28bcabb9b..bff5becc1 100644 --- a/unmaintained/repeated_vacuum/repeated_vacuum.go +++ b/unmaintained/repeated_vacuum/repeated_vacuum.go @@ -1,51 +1,73 @@ package main import ( - "bytes" "flag" "fmt" "log" "math/rand" + "time" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) var ( - master = flag.String("master", "127.0.0.1:9333", "the master server") - repeat = flag.Int("n", 5, "repeat how many times") + master = flag.String("master", "127.0.0.1:9333", "the master server") + repeat = flag.Int("n", 5, "repeat how many times") + garbageThreshold = flag.Float64("garbageThreshold", 0.3, "garbageThreshold") + replication = flag.String("replication", "", "replication 000, 001, 002, etc") ) func main() { flag.Parse() util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") - for i := 0; i < *repeat; i++ { - assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1}) - if err != nil { - log.Fatalf("assign: %v", err) - } + genFile(grpcDialOption, 0) - data := make([]byte, 1024) - rand.Read(data) - reader := bytes.NewReader(data) + go func() { + for { + println("vacuum threshold", *garbageThreshold) + _, _, err := util.Get(fmt.Sprintf("http://%s/vol/vacuum?garbageThreshold=%f", *master, *garbageThreshold)) + if err != nil { + log.Fatalf("vacuum: %v", err) + } + time.Sleep(time.Second) + } + }() - targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + for i := 0; i < *repeat; i++ { + // create 2 files, and delete one of them - _, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, assignResult.Auth) - if err != nil { - log.Fatalf("upload: %v", err) - } + assignResult, targetUrl := genFile(grpcDialOption, i) util.Delete(targetUrl, string(assignResult.Auth)) - util.Get(fmt.Sprintf("http://%s/vol/vacuum", *master)) + } + +} +func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) { + assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{ + Count: 1, + Replication: *replication, + }) + if err != nil { + log.Fatalf("assign: %v", err) } + data := make([]byte, 1024) + rand.Read(data) + + targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + + _, err = operation.UploadData(targetUrl, fmt.Sprintf("test%d", i), false, data, false, "bench/test", nil, assignResult.Auth) + if err != nil { + log.Fatalf("upload: %v", err) + } + return assignResult, targetUrl } diff --git a/unmaintained/s3/benchmark/hsbench.sh b/unmaintained/s3/benchmark/hsbench.sh new file mode 100755 index 000000000..285b51405 --- /dev/null +++ b/unmaintained/s3/benchmark/hsbench.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +hsbench -a accesstoken -s secret -z 4K -d 10 -t 10 -b 10 -u http://localhost:8333 -m "cxipgdx" -bp "hsbench-" diff --git a/unmaintained/s3/presigned_put/presigned_put.go b/unmaintained/s3/presigned_put/presigned_put.go new file mode 100644 index 000000000..e8368d124 --- /dev/null +++ b/unmaintained/s3/presigned_put/presigned_put.go @@ -0,0 +1,73 @@ +package main + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "encoding/base64" + "fmt" + "crypto/md5" + "strings" + "time" + "net/http" +) + +// Downloads an item from an S3 Bucket in the region configured in the shared config +// or AWS_REGION environment variable. +// +// Usage: +// go run presigned_put.go +// For this exampl to work, the domainName is needd +// weed s3 -domainName=localhost +func main() { + h := md5.New() + content := strings.NewReader(stringContent) + content.WriteTo(h) + + // Initialize a session in us-west-2 that the SDK will use to load + // credentials from the shared credentials file ~/.aws/credentials. + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + Endpoint: aws.String("http://localhost:8333"), + }) + + // Create S3 service client + svc := s3.New(sess) + + putRequest, output := svc.PutObjectRequest(&s3.PutObjectInput{ + Bucket: aws.String("dev"), + Key: aws.String("testKey"), + }) + fmt.Printf("output: %+v\n", output) + + md5s := base64.StdEncoding.EncodeToString(h.Sum(nil)) + putRequest.HTTPRequest.Header.Set("Content-MD5", md5s) + + url, err := putRequest.Presign(15 * time.Minute) + if err != nil { + fmt.Println("error presigning request", err) + return + } + + fmt.Println(url) + + req, err := http.NewRequest("PUT", url, strings.NewReader(stringContent)) + req.Header.Set("Content-MD5", md5s) + if err != nil { + fmt.Println("error creating request", url) + return + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + fmt.Printf("error put request: %v\n", err) + return + } + fmt.Printf("response: %+v\n", resp) +} + +var stringContent = `Generate a Pre-Signed URL for an Amazon S3 PUT Operation with a Specific Payload +You can generate a pre-signed URL for a PUT operation that checks whether users upload the correct content. When the SDK pre-signs a request, it computes the checksum of the request body and generates an MD5 checksum that is included in the pre-signed URL. Users must upload the same content that produces the same MD5 checksum generated by the SDK; otherwise, the operation fails. This is not the Content-MD5, but the signature. To enforce Content-MD5, simply add the header to the request. + +The following example adds a Body field to generate a pre-signed PUT operation that requires a specific payload to be uploaded by users. +` \ No newline at end of file diff --git a/unmaintained/see_dat/see_dat.go b/unmaintained/see_dat/see_dat.go index efc58e751..17c494841 100644 --- a/unmaintained/see_dat/see_dat.go +++ b/unmaintained/see_dat/see_dat.go @@ -2,6 +2,7 @@ package main import ( "flag" + "github.com/chrislusf/seaweedfs/weed/util" "time" "github.com/chrislusf/seaweedfs/weed/glog" @@ -31,7 +32,8 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool { func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second)) - glog.V(0).Infof("%d,%s%x offset %d size %d cookie %x appendedAt %v", *volumeId, n.Id, n.Cookie, offset, n.Size, n.Cookie, t) + glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v", + *volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t) return nil } diff --git a/unmaintained/see_idx/see_idx.go b/unmaintained/see_idx/see_idx.go index 777af1821..22c659351 100644 --- a/unmaintained/see_idx/see_idx.go +++ b/unmaintained/see_idx/see_idx.go @@ -3,6 +3,7 @@ package main import ( "flag" "fmt" + "github.com/chrislusf/seaweedfs/weed/util" "os" "path" "strconv" @@ -35,8 +36,8 @@ func main() { } defer indexFile.Close() - idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error { - fmt.Printf("key:%v offset:%v size:%v\n", key, offset, size) + idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size types.Size) error { + fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size))) return nil }) diff --git a/unmaintained/see_log_entry/see_log_entry.go b/unmaintained/see_log_entry/see_log_entry.go new file mode 100644 index 000000000..45480d4dc --- /dev/null +++ b/unmaintained/see_log_entry/see_log_entry.go @@ -0,0 +1,75 @@ +package main + +import ( + "flag" + "fmt" + "io" + "log" + "os" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + logdataFile = flag.String("logdata", "", "log data file saved under "+ filer.SystemLogDir) +) + +func main() { + flag.Parse() + + dst, err := os.OpenFile(*logdataFile, os.O_RDONLY, 0644) + if err != nil { + log.Fatalf("failed to open %s: %v", *logdataFile, err) + } + defer dst.Close() + + err = walkLogEntryFile(dst) + if err != nil { + log.Fatalf("failed to visit %s: %v", *logdataFile, err) + } + +} + +func walkLogEntryFile(dst *os.File) error { + + sizeBuf := make([]byte, 4) + + for { + if n, err := dst.Read(sizeBuf); n != 4 { + if err == io.EOF { + return nil + } + return err + } + + size := util.BytesToUint32(sizeBuf) + + data := make([]byte, int(size)) + + if n, err := dst.Read(data); n != len(data) { + return err + } + + logEntry := &filer_pb.LogEntry{} + err := proto.Unmarshal(data, logEntry) + if err != nil { + log.Printf("unexpected unmarshal filer_pb.LogEntry: %v", err) + return nil + } + + event := &filer_pb.SubscribeMetadataResponse{} + err = proto.Unmarshal(logEntry.Data, event) + if err != nil { + log.Printf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) + return nil + } + + fmt.Printf("event: %+v\n", event) + + } + +} diff --git a/unmaintained/see_meta/see_meta.go b/unmaintained/see_meta/see_meta.go index 0d2ac8de1..452badfd6 100644 --- a/unmaintained/see_meta/see_meta.go +++ b/unmaintained/see_meta/see_meta.go @@ -7,10 +7,10 @@ import ( "log" "os" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/golang/protobuf/proto" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" ) var ( @@ -58,7 +58,7 @@ func walkMetaFile(dst *os.File) error { return err } - fmt.Fprintf(os.Stdout, "file %s %v\n", filer2.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String()) + fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String()) for i, chunk := range fullEntry.Entry.Chunks { fmt.Fprintf(os.Stdout, " chunk %d %v\n", i+1, chunk.String()) } diff --git a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go index b2e4b28c6..2ee8028f2 100644 --- a/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go +++ b/unmaintained/stress_filer_upload/bench_filer_upload/bench_filer_upload.go @@ -45,7 +45,7 @@ func main() { defer wg.Done() client := &http.Client{Transport: &http.Transport{ - MaxConnsPerHost: 1024, + MaxIdleConns: 1024, MaxIdleConnsPerHost: 1024, }} r := rand.New(rand.NewSource(time.Now().UnixNano() + int64(x))) diff --git a/unmaintained/stress_filer_upload/write_files/write_files.go b/unmaintained/stress_filer_upload/write_files/write_files.go new file mode 100644 index 000000000..508e37d14 --- /dev/null +++ b/unmaintained/stress_filer_upload/write_files/write_files.go @@ -0,0 +1,54 @@ +package main + +import ( + "flag" + "fmt" + "math/rand" + "os" + "time" +) + +var ( + minSize = flag.Int("minSize", 1024, "min file size") + maxSize = flag.Int("maxSize", 3*1024*1024, "max file size") + fileCount = flag.Int("n", 1, "number of files to write") + blockSize = flag.Int("blockSizeKB", 4, "write block size") + toDir = flag.String("dir", ".", "destination directory") +) + +func check(e error) { + if e != nil { + panic(e) + } +} + +func main() { + + flag.Parse() + + block := make([]byte, *blockSize*1024) + + for i := 0; i < *fileCount; i++ { + + f, err := os.Create(fmt.Sprintf("%s/file%05d", *toDir, i)) + check(err) + + fileSize := *minSize + rand.Intn(*maxSize-*minSize) + startTime := time.Now() + + fmt.Printf("write %s %d bytes: ", f.Name(), fileSize) + + for x := 0; x < fileSize; { + rand.Read(block) + _, err = f.Write(block) + check(err) + x += len(block) + } + + err = f.Close() + check(err) + + fmt.Printf("%.02f MB/sec\n", float64(fileSize)*float64(time.Second)/float64(time.Now().Sub(startTime)*1024*1024)) + } + +} diff --git a/unmaintained/volume_tailer/volume_tailer.go b/unmaintained/volume_tailer/volume_tailer.go index f0ef51c09..32da2e6ab 100644 --- a/unmaintained/volume_tailer/volume_tailer.go +++ b/unmaintained/volume_tailer/volume_tailer.go @@ -9,7 +9,6 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage/needle" util2 "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" "golang.org/x/tools/godoc/util" ) @@ -25,7 +24,7 @@ func main() { flag.Parse() util2.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util2.GetViper(), "grpc.client") vid := needle.VolumeId(*volumeId) @@ -38,7 +37,7 @@ func main() { sinceTimeNs = time.Now().Add(-*rewindDuration).UnixNano() } - err := operation.TailVolume(*master, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) { + err := operation.TailVolume(func()string{return *master}, grpcDialOption, vid, uint64(sinceTimeNs), *timeoutSeconds, func(n *needle.Needle) (err error) { if n.Size == 0 { println("-", n.String()) return nil @@ -49,8 +48,8 @@ func main() { if *showTextFile { data := n.Data - if n.IsGzipped() { - if data, err = util2.UnGzipData(data); err != nil { + if n.IsCompressed() { + if data, err = util2.DecompressData(data); err != nil { return err } } @@ -58,7 +57,7 @@ func main() { println(string(data)) } - println("-", n.String(), "compressed", n.IsGzipped(), "original size", len(data)) + println("-", n.String(), "compressed", n.IsCompressed(), "original size", len(data)) } return nil }) diff --git a/weed/Makefile b/weed/Makefile new file mode 100644 index 000000000..8f1257d09 --- /dev/null +++ b/weed/Makefile @@ -0,0 +1,39 @@ +BINARY = weed + +SOURCE_DIR = . + +all: debug_mount + +.PHONY : clean debug_mount + +clean: + go clean $(SOURCE_DIR) + rm -f $(BINARY) + +debug_shell: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- shell + +debug_mount: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets + +debug_server: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- server -dir=/Volumes/mobile_disk/99 -filer -volume.port=8343 -s3 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1 + +debug_volume: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- volume -dir=/Volumes/mobile_disk/100 -port 8564 -max=30 -preStopSeconds=2 + +debug_webdav: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 webdav + +debug_s3: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 s3 + +debug_filer_copy: + go build -gcflags="all=-N -l" + dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 filer.backup -filer=localhost:8888 -filerProxy -timeAgo=10h diff --git a/weed/command/backup.go b/weed/command/backup.go index 0f6bed225..207df770b 100644 --- a/weed/command/backup.go +++ b/weed/command/backup.go @@ -3,8 +3,6 @@ package command import ( "fmt" - "github.com/spf13/viper" - "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/super_block" @@ -66,7 +64,7 @@ var cmdBackup = &Command{ func runBackup(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") if *s.volumeId == -1 { return false @@ -74,7 +72,7 @@ func runBackup(cmd *Command, args []string) bool { vid := needle.VolumeId(*s.volumeId) // find volume location, replication, ttl info - lookup, err := operation.Lookup(*s.master, vid.String()) + lookup, err := operation.Lookup(func() string { return *s.master }, vid.String()) if err != nil { fmt.Printf("Error looking up volume %d: %v\n", vid, err) return true @@ -114,14 +112,14 @@ func runBackup(cmd *Command, args []string) bool { return true } } - v, err := storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) + v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) if err != nil { fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) return true } if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) { - if err = v.Compact2(30 * 1024 * 1024 * 1024); err != nil { + if err = v.Compact2(30*1024*1024*1024, 0); err != nil { fmt.Printf("Compact Volume before synchronizing %v\n", err) return true } @@ -139,7 +137,7 @@ func runBackup(cmd *Command, args []string) bool { // remove the old data v.Destroy() // recreate an empty volume - v, err = storage.NewVolume(*s.dir, *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) + v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0) if err != nil { fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err) return true diff --git a/weed/command/benchmark.go b/weed/command/benchmark.go index 26be1fe3a..4fedb55f1 100644 --- a/weed/command/benchmark.go +++ b/weed/command/benchmark.go @@ -2,7 +2,6 @@ package command import ( "bufio" - "context" "fmt" "io" "math" @@ -15,7 +14,6 @@ import ( "sync" "time" - "github.com/spf13/viper" "google.golang.org/grpc" "github.com/chrislusf/seaweedfs/weed/glog" @@ -37,10 +35,13 @@ type BenchmarkOptions struct { sequentialRead *bool collection *string replication *string + diskType *string cpuprofile *string maxCpu *int grpcDialOption grpc.DialOption masterClient *wdclient.MasterClient + fsync *bool + useTcp *bool } var ( @@ -63,8 +64,11 @@ func init() { b.sequentialRead = cmdBenchmark.Flag.Bool("readSequentially", false, "randomly read by ids from \"-list\" specified file") b.collection = cmdBenchmark.Flag.String("collection", "benchmark", "write data to this collection") b.replication = cmdBenchmark.Flag.String("replication", "000", "replication type") + b.diskType = cmdBenchmark.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") b.cpuprofile = cmdBenchmark.Flag.String("cpuprofile", "", "cpu profile output file") b.maxCpu = cmdBenchmark.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs") + b.fsync = cmdBenchmark.Flag.Bool("fsync", false, "flush data to disk after write") + b.useTcp = cmdBenchmark.Flag.Bool("useTcp", false, "send data via tcp") sharedBytes = make([]byte, 1024) } @@ -109,9 +113,9 @@ var ( func runBenchmark(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - b.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") - fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) if *b.maxCpu < 1 { *b.maxCpu = runtime.NumCPU() } @@ -125,7 +129,7 @@ func runBenchmark(cmd *Command, args []string) bool { defer pprof.StopCPUProfile() } - b.masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ",")) + b.masterClient = wdclient.NewMasterClient(b.grpcDialOption, "client", "", 0, "", strings.Split(*b.masters, ",")) go b.masterClient.KeepConnectedToMaster() b.masterClient.WaitUntilConnected() @@ -221,25 +225,37 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) { random := rand.New(rand.NewSource(time.Now().UnixNano())) + volumeTcpClient := wdclient.NewVolumeTcpClient() + for id := range idChan { start := time.Now() fileSize := int64(*b.fileSize + random.Intn(64)) fp := &operation.FilePart{ - Reader: &FakeReader{id: uint64(id), size: fileSize}, + Reader: &FakeReader{id: uint64(id), size: fileSize, random: random}, FileSize: fileSize, MimeType: "image/bench", // prevent gzip benchmark content + Fsync: *b.fsync, } ar := &operation.VolumeAssignRequest{ Count: 1, Collection: *b.collection, Replication: *b.replication, + DiskType: *b.diskType, } - if assignResult, err := operation.Assign(b.masterClient.GetMaster(), b.grpcDialOption, ar); err == nil { + if assignResult, err := operation.Assign(b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil { fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection if !isSecure && assignResult.Auth != "" { isSecure = true } - if _, err := fp.Upload(0, b.masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil { + if *b.useTcp { + if uploadByTcp(volumeTcpClient, fp) { + fileIdLineChan <- fp.Fid + s.completed++ + s.transferred += fileSize + } else { + s.failed++ + } + } else if _, err := fp.Upload(0, b.masterClient.GetMaster, false, assignResult.Auth, b.grpcDialOption); err == nil { if random.Intn(100) < *b.deletePercentage { s.total++ delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} @@ -279,19 +295,29 @@ func readFiles(fileIdLineChan chan string, s *stat) { fmt.Printf("reading file %s\n", fid) } start := time.Now() - url, err := b.masterClient.LookupFileId(fid) + var bytesRead int + var err error + urls, err := b.masterClient.LookupFileId(fid) if err != nil { s.failed++ println("!!!! ", fid, " location not found!!!!!") continue } - if bytesRead, err := util.Get(url); err == nil { + var bytes []byte + for _, url := range urls { + bytes, _, err = util.Get(url) + if err == nil { + break + } + } + bytesRead = len(bytes) + if err == nil { s.completed++ - s.transferred += int64(len(bytesRead)) + s.transferred += int64(bytesRead) readStats.addSample(time.Now().Sub(start)) } else { s.failed++ - fmt.Printf("Failed to read %s error:%v\n", url, err) + fmt.Printf("Failed to read %s error:%v\n", fid, err) } } } @@ -315,6 +341,17 @@ func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan b } } +func uploadByTcp(volumeTcpClient *wdclient.VolumeTcpClient, fp *operation.FilePart) bool { + + err := volumeTcpClient.PutFileChunk(fp.Server, fp.Fid, uint32(fp.FileSize), fp.Reader) + if err != nil { + glog.Errorf("upload chunk err: %v", err) + return false + } + + return true +} + func readFileIds(fileName string, fileIdLineChan chan string) { file, err := os.Open(fileName) // For read access. if err != nil { @@ -353,7 +390,7 @@ func readFileIds(fileName string, fileIdLineChan chan string) { } const ( - benchResolution = 10000 //0.1 microsecond + benchResolution = 10000 // 0.1 microsecond benchBucket = 1000000000 / benchResolution ) @@ -476,7 +513,7 @@ func (s *stats) printStats() { fmt.Printf("\nConnection Times (ms)\n") fmt.Printf(" min avg max std\n") fmt.Printf("Total: %2.1f %3.1f %3.1f %3.1f\n", float32(min)/10, float32(avg)/10, float32(max)/10, std/10) - //printing percentiles + // printing percentiles fmt.Printf("\nPercentage of the requests served within a certain time (ms)\n") percentiles := make([]int, len(percentages)) for i := 0; i < len(percentages); i++ { @@ -510,8 +547,9 @@ func (s *stats) printStats() { // a fake reader to generate content to upload type FakeReader struct { - id uint64 // an id number - size int64 // max bytes + id uint64 // an id number + size int64 // max bytes + random *rand.Rand } func (l *FakeReader) Read(p []byte) (n int, err error) { @@ -527,6 +565,7 @@ func (l *FakeReader) Read(p []byte) (n int, err error) { for i := 0; i < 8; i++ { p[i] = byte(l.id >> uint(i*8)) } + l.random.Read(p[8:]) } l.size -= int64(n) return diff --git a/weed/command/command.go b/weed/command/command.go index 79c00d4cd..b6efcead2 100644 --- a/weed/command/command.go +++ b/weed/command/command.go @@ -1,8 +1,8 @@ package command import ( - "flag" "fmt" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" "os" "strings" ) @@ -12,20 +12,28 @@ var Commands = []*Command{ cmdBackup, cmdCompact, cmdCopy, - cmdFix, + cmdDownload, + cmdExport, + cmdFiler, + cmdFilerBackup, + cmdFilerCat, + cmdFilerMetaBackup, + cmdFilerMetaTail, cmdFilerReplicate, - cmdServer, + cmdFilerSynchronize, + cmdFix, + cmdGateway, cmdMaster, - cmdFiler, + cmdMount, cmdS3, - cmdUpload, - cmdDownload, + cmdIam, + cmdMsgBroker, cmdScaffold, + cmdServer, cmdShell, + cmdUpload, cmdVersion, cmdVolume, - cmdExport, - cmdMount, cmdWebDav, } diff --git a/weed/command/compact.go b/weed/command/compact.go index 85313b749..92e25f474 100644 --- a/weed/command/compact.go +++ b/weed/command/compact.go @@ -4,6 +4,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -40,8 +41,7 @@ func runCompact(cmd *Command, args []string) bool { preallocate := *compactVolumePreallocate * (1 << 20) vid := needle.VolumeId(*compactVolumeId) - v, err := storage.NewVolume(*compactVolumePath, *compactVolumeCollection, vid, - storage.NeedleMapInMemory, nil, nil, preallocate, 0) + v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0) if err != nil { glog.Fatalf("Load Volume [ERROR] %s\n", err) } @@ -50,7 +50,7 @@ func runCompact(cmd *Command, args []string) bool { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } else { - if err = v.Compact2(preallocate); err != nil { + if err = v.Compact2(preallocate, 0); err != nil { glog.Fatalf("Compact Volume [ERROR] %s\n", err) } } diff --git a/weed/command/download.go b/weed/command/download.go index b3e33defd..7bbff9448 100644 --- a/weed/command/download.go +++ b/weed/command/download.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "io/ioutil" + "net/http" "os" "path" "strings" @@ -43,15 +44,15 @@ var cmdDownload = &Command{ func runDownload(cmd *Command, args []string) bool { for _, fid := range args { - if e := downloadToFile(*d.server, fid, *d.dir); e != nil { + if e := downloadToFile(func() string { return *d.server }, fid, util.ResolvePath(*d.dir)); e != nil { fmt.Println("Download Error: ", fid, e) } } return true } -func downloadToFile(server, fileId, saveDir string) error { - fileUrl, lookupError := operation.LookupFileId(server, fileId) +func downloadToFile(masterFn operation.GetMasterFn, fileId, saveDir string) error { + fileUrl, lookupError := operation.LookupFileId(masterFn, fileId) if lookupError != nil { return lookupError } @@ -59,7 +60,7 @@ func downloadToFile(server, fileId, saveDir string) error { if err != nil { return err } - defer rc.Close() + defer util.CloseResponse(rc) if filename == "" { filename = fileId } @@ -75,14 +76,14 @@ func downloadToFile(server, fileId, saveDir string) error { } defer f.Close() if isFileList { - content, err := ioutil.ReadAll(rc) + content, err := ioutil.ReadAll(rc.Body) if err != nil { return err } fids := strings.Split(string(content), "\n") for _, partId := range fids { var n int - _, part, err := fetchContent(*d.server, partId) + _, part, err := fetchContent(masterFn, partId) if err == nil { n, err = f.Write(part) } @@ -94,7 +95,7 @@ func downloadToFile(server, fileId, saveDir string) error { } } } else { - if _, err = io.Copy(f, rc); err != nil { + if _, err = io.Copy(f, rc.Body); err != nil { return err } @@ -102,17 +103,17 @@ func downloadToFile(server, fileId, saveDir string) error { return nil } -func fetchContent(server string, fileId string) (filename string, content []byte, e error) { - fileUrl, lookupError := operation.LookupFileId(server, fileId) +func fetchContent(masterFn operation.GetMasterFn, fileId string) (filename string, content []byte, e error) { + fileUrl, lookupError := operation.LookupFileId(masterFn, fileId) if lookupError != nil { return "", nil, lookupError } - var rc io.ReadCloser + var rc *http.Response if filename, _, rc, e = util.DownloadFile(fileUrl); e != nil { return "", nil, e } - content, e = ioutil.ReadAll(rc) - rc.Close() + defer util.CloseResponse(rc) + content, e = ioutil.ReadAll(rc.Body) return } diff --git a/weed/command/export.go b/weed/command/export.go index 8d664ad3b..1c32e1050 100644 --- a/weed/command/export.go +++ b/weed/command/export.go @@ -19,10 +19,11 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( - defaultFnFormat = `{{.Mime}}/{{.Id}}:{{.Name}}` + defaultFnFormat = `{{.Id}}_{{.Name}}{{.Ext}}` timeFormat = "2006-01-02T15:04:05" ) @@ -55,7 +56,7 @@ func init() { var ( output = cmdExport.Flag.String("o", "", "output tar file name, must ends with .tar, or just a \"-\" for stdout") - format = cmdExport.Flag.String("fileNameFormat", defaultFnFormat, "filename formatted with {{.Mime}} {{.Id}} {{.Name}} {{.Ext}}") + format = cmdExport.Flag.String("fileNameFormat", defaultFnFormat, "filename formatted with {{.Id}} {{.Name}} {{.Ext}}") newer = cmdExport.Flag.String("newer", "", "export only files newer than this time, default is all files. Must be specified in RFC3339 without timezone, e.g. 2006-01-02T15:04:05") showDeleted = cmdExport.Flag.Bool("deleted", false, "export deleted files. only applies if -o is not specified") limit = cmdExport.Flag.Int("limit", 0, "only show first n entries if specified") @@ -69,21 +70,23 @@ var ( localLocation, _ = time.LoadLocation("Local") ) -func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, deleted bool) { +func printNeedle(vid needle.VolumeId, n *needle.Needle, version needle.Version, deleted bool, offset int64, onDiskSize int64) { key := needle.NewFileIdFromNeedle(vid, n).String() - size := n.DataSize + size := int32(n.DataSize) if version == needle.Version1 { - size = n.Size + size = int32(n.Size) } - fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\n", + fmt.Printf("%s\t%s\t%d\t%t\t%s\t%s\t%s\t%t\t%d\t%d\n", key, n.Name, size, - n.IsGzipped(), + n.IsCompressed(), n.Mime, n.LastModifiedString(), n.Ttl.String(), deleted, + offset, + offset+onDiskSize, ) } @@ -108,9 +111,9 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in vid := scanner.vid nv, ok := needleMap.Get(n.Id) - glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v", - n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv) - if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && nv.Offset.ToAcutalOffset() == offset { + glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v", + n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv) + if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToActualOffset() == offset { if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) { glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d", n.LastModified, newerThanUnix) @@ -123,17 +126,17 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in if tarOutputFile != nil { return writeFile(vid, n) } else { - printNeedle(vid, n, scanner.version, false) + printNeedle(vid, n, scanner.version, false, offset, n.DiskSize(scanner.version)) return nil } } if !ok { if *showDeleted && tarOutputFile == nil { if n.DataSize > 0 { - printNeedle(vid, n, scanner.version, true) + printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version)) } else { n.Name = []byte("*tombstone") - printNeedle(vid, n, scanner.version, true) + printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version)) } } glog.V(2).Infof("This seems deleted %d size %d", n.Id, n.Size) @@ -195,7 +198,9 @@ func runExport(cmd *Command, args []string) bool { vid := needle.VolumeId(*export.volumeId) needleMap := needle_map.NewMemDb() - if err := needleMap.LoadFromIdx(path.Join(*export.dir, fileName+".idx")); err != nil { + defer needleMap.Close() + + if err := needleMap.LoadFromIdx(path.Join(util.ResolvePath(*export.dir), fileName+".idx")); err != nil { glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err) } @@ -205,12 +210,12 @@ func runExport(cmd *Command, args []string) bool { } if tarOutputFile == nil { - fmt.Printf("key\tname\tsize\tgzip\tmime\tmodified\tttl\tdeleted\n") + fmt.Printf("key\tname\tsize\tgzip\tmime\tmodified\tttl\tdeleted\tstart\tstop\n") } - err = storage.ScanVolumeFile(*export.dir, *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner) + err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner) if err != nil && err != io.EOF { - glog.Fatalf("Export Volume File [ERROR] %s\n", err) + glog.Errorf("Export Volume File [ERROR] %s\n", err) } return true } @@ -240,8 +245,11 @@ func writeFile(vid needle.VolumeId, n *needle.Needle) (err error) { fileName := fileNameTemplateBuffer.String() - if n.IsGzipped() && path.Ext(fileName) != ".gz" { - fileName = fileName + ".gz" + if n.IsCompressed() { + if util.IsGzippedContent(n.Data) && path.Ext(fileName) != ".gz" { + fileName = fileName + ".gz" + } + // TODO other compression method } tarHeader.Name, tarHeader.Size = fileName, int64(len(n.Data)) diff --git a/weed/command/filer.go b/weed/command/filer.go index b1ceb46f5..a723b4d8a 100644 --- a/weed/command/filer.go +++ b/weed/command/filer.go @@ -1,58 +1,102 @@ package command import ( + "fmt" "net/http" + "os" "strconv" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "google.golang.org/grpc/reflection" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( - f FilerOptions + f FilerOptions + filerStartS3 *bool + filerS3Options S3Options + filerStartWebDav *bool + filerWebDavOptions WebDavOption + filerStartIam *bool + filerIamOptions IamOptions ) type FilerOptions struct { masters *string ip *string + bindIp *string port *int publicPort *int collection *string defaultReplicaPlacement *string - redirectOnRead *bool disableDirListing *bool maxMB *int dirListingLimit *int dataCenter *string + rack *string enableNotification *bool disableHttp *bool - - // default leveldb directory, used in "weed server" mode + cipher *bool + peers *string + metricsHttpPort *int + saveToFilerLimit *int defaultLevelDbDirectory *string + concurrentUploadLimitMB *int } func init() { cmdFiler.Run = runFiler // break init cycle f.masters = cmdFiler.Flag.String("master", "localhost:9333", "comma-separated master servers") - f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this collection") - f.ip = cmdFiler.Flag.String("ip", "", "filer server http listen ip address") + f.collection = cmdFiler.Flag.String("collection", "", "all data will be stored in this default collection") + f.ip = cmdFiler.Flag.String("ip", util.DetectedHostAddress(), "filer server http listen ip address") + f.bindIp = cmdFiler.Flag.String("ip.bind", "", "ip address to bind to") f.port = cmdFiler.Flag.Int("port", 8888, "filer server http listen port") f.publicPort = cmdFiler.Flag.Int("port.readonly", 0, "readonly port opened to public") - f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "000", "default replication type if not specified") - f.redirectOnRead = cmdFiler.Flag.Bool("redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") + f.defaultReplicaPlacement = cmdFiler.Flag.String("defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.") f.disableDirListing = cmdFiler.Flag.Bool("disableDirListing", false, "turn off directory listing") - f.maxMB = cmdFiler.Flag.Int("maxMB", 32, "split files larger than the limit") + f.maxMB = cmdFiler.Flag.Int("maxMB", 4, "split files larger than the limit") f.dirListingLimit = cmdFiler.Flag.Int("dirListLimit", 100000, "limit sub dir listing size") - f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to write to volumes in this data center") + f.dataCenter = cmdFiler.Flag.String("dataCenter", "", "prefer to read and write to volumes in this data center") + f.rack = cmdFiler.Flag.String("rack", "", "prefer to write to volumes in this rack") f.disableHttp = cmdFiler.Flag.Bool("disableHttp", false, "disable http request, only gRpc operations are allowed") + f.cipher = cmdFiler.Flag.Bool("encryptVolumeData", false, "encrypt data on volume servers") + f.peers = cmdFiler.Flag.String("peers", "", "all filers sharing the same filer store in comma separated ip:port list") + f.metricsHttpPort = cmdFiler.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") + f.saveToFilerLimit = cmdFiler.Flag.Int("saveToFilerLimit", 0, "files smaller than this limit will be saved in filer store") + f.defaultLevelDbDirectory = cmdFiler.Flag.String("defaultStoreDir", ".", "if filer.toml is empty, use an embedded filer store in the directory") + f.concurrentUploadLimitMB = cmdFiler.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size") + + // start s3 on filer + filerStartS3 = cmdFiler.Flag.Bool("s3", false, "whether to start S3 gateway") + filerS3Options.port = cmdFiler.Flag.Int("s3.port", 8333, "s3 server http listen port") + filerS3Options.domainName = cmdFiler.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}") + filerS3Options.tlsPrivateKey = cmdFiler.Flag.String("s3.key.file", "", "path to the TLS private key file") + filerS3Options.tlsCertificate = cmdFiler.Flag.String("s3.cert.file", "", "path to the TLS certificate file") + filerS3Options.config = cmdFiler.Flag.String("s3.config", "", "path to the config file") + filerS3Options.allowEmptyFolder = cmdFiler.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders") + + // start webdav on filer + filerStartWebDav = cmdFiler.Flag.Bool("webdav", false, "whether to start webdav gateway") + filerWebDavOptions.port = cmdFiler.Flag.Int("webdav.port", 7333, "webdav server http listen port") + filerWebDavOptions.collection = cmdFiler.Flag.String("webdav.collection", "", "collection to create the files") + filerWebDavOptions.replication = cmdFiler.Flag.String("webdav.replication", "", "replication to create the files") + filerWebDavOptions.disk = cmdFiler.Flag.String("webdav.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") + filerWebDavOptions.tlsPrivateKey = cmdFiler.Flag.String("webdav.key.file", "", "path to the TLS private key file") + filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file") + filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks") + filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB") + + // start iam on filer + filerStartIam = cmdFiler.Flag.Bool("iam", false, "whether to start IAM service") + filerIamOptions.port = cmdFiler.Flag.Int("iam.port", 8111, "iam server http listen port") } var cmdFiler = &Command{ @@ -69,7 +113,8 @@ var cmdFiler = &Command{ //return a json format subdirectory and files listing GET /path/to/ - The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. + The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order. + If the "filer.toml" is not found, an embedded filer store will be created under "-defaultStoreDir". The example filer.toml configuration file can be generated by "weed scaffold -config=filer" @@ -80,6 +125,37 @@ func runFiler(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) + go stats_collect.StartMetricsServer(*f.metricsHttpPort) + + filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port) + startDelay := time.Duration(2) + if *filerStartS3 { + filerS3Options.filer = &filerAddress + go func() { + time.Sleep(startDelay * time.Second) + filerS3Options.startS3Server() + }() + startDelay++ + } + + if *filerStartWebDav { + filerWebDavOptions.filer = &filerAddress + go func() { + time.Sleep(startDelay * time.Second) + filerWebDavOptions.startWebDav() + }() + startDelay++ + } + + if *filerStartIam { + filerIamOptions.filer = &filerAddress + filerIamOptions.masters = f.masters + go func() { + time.Sleep(startDelay * time.Second) + filerIamOptions.startIamServer() + }() + } + f.startFiler() return true @@ -94,31 +170,38 @@ func (fo *FilerOptions) startFiler() { publicVolumeMux = http.NewServeMux() } - defaultLevelDbDirectory := "./filerldb2" - if fo.defaultLevelDbDirectory != nil { - defaultLevelDbDirectory = *fo.defaultLevelDbDirectory + "/filerldb2" + defaultLevelDbDirectory := util.ResolvePath(*fo.defaultLevelDbDirectory + "/filerldb2") + + var peers []string + if *fo.peers != "" { + peers = strings.Split(*fo.peers, ",") } fs, nfs_err := weed_server.NewFilerServer(defaultMux, publicVolumeMux, &weed_server.FilerOption{ - Masters: strings.Split(*fo.masters, ","), - Collection: *fo.collection, - DefaultReplication: *fo.defaultReplicaPlacement, - RedirectOnRead: *fo.redirectOnRead, - DisableDirListing: *fo.disableDirListing, - MaxMB: *fo.maxMB, - DirListingLimit: *fo.dirListingLimit, - DataCenter: *fo.dataCenter, - DefaultLevelDbDir: defaultLevelDbDirectory, - DisableHttp: *fo.disableHttp, - Port: *fo.port, + Masters: strings.Split(*fo.masters, ","), + Collection: *fo.collection, + DefaultReplication: *fo.defaultReplicaPlacement, + DisableDirListing: *fo.disableDirListing, + MaxMB: *fo.maxMB, + DirListingLimit: *fo.dirListingLimit, + DataCenter: *fo.dataCenter, + Rack: *fo.rack, + DefaultLevelDbDir: defaultLevelDbDirectory, + DisableHttp: *fo.disableHttp, + Host: *fo.ip, + Port: uint32(*fo.port), + Cipher: *fo.cipher, + SaveToFilerLimit: int64(*fo.saveToFilerLimit), + Filers: peers, + ConcurrentUploadLimit: int64(*fo.concurrentUploadLimitMB) * 1024 * 1024, }) if nfs_err != nil { glog.Fatalf("Filer startup error: %v", nfs_err) } if *fo.publicPort != 0 { - publicListeningAddress := *fo.ip + ":" + strconv.Itoa(*fo.publicPort) - glog.V(0).Infoln("Start Seaweed filer server", util.VERSION, "public at", publicListeningAddress) + publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort) + glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress) publicListener, e := util.NewListener(publicListeningAddress, 0) if e != nil { glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e) @@ -130,9 +213,9 @@ func (fo *FilerOptions) startFiler() { }() } - glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.VERSION, *fo.ip, *fo.port) + glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port) filerListener, e := util.NewListener( - *fo.ip+":"+strconv.Itoa(*fo.port), + *fo.bindIp+":"+strconv.Itoa(*fo.port), time.Duration(10)*time.Second, ) if e != nil { @@ -141,11 +224,11 @@ func (fo *FilerOptions) startFiler() { // starting grpc server grpcPort := *fo.port + 10000 - grpcL, err := util.NewListener(":"+strconv.Itoa(grpcPort), 0) + grpcL, err := util.NewListener(*fo.bindIp+":"+strconv.Itoa(grpcPort), 0) if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "filer")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer")) filer_pb.RegisterSeaweedFilerServer(grpcS, fs) reflection.Register(grpcS) go grpcS.Serve(grpcL) diff --git a/weed/command/filer_backup.go b/weed/command/filer_backup.go new file mode 100644 index 000000000..888b46fe7 --- /dev/null +++ b/weed/command/filer_backup.go @@ -0,0 +1,157 @@ +package command + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + "io" + "time" +) + +type FilerBackupOptions struct { + isActivePassive *bool + filer *string + path *string + debug *bool + proxyByFiler *bool + timeAgo *time.Duration +} + +var ( + filerBackupOptions FilerBackupOptions +) + +func init() { + cmdFilerBackup.Run = runFilerBackup // break init cycle + filerBackupOptions.filer = cmdFilerBackup.Flag.String("filer", "localhost:8888", "filer of one SeaweedFS cluster") + filerBackupOptions.path = cmdFilerBackup.Flag.String("filerPath", "/", "directory to sync on filer") + filerBackupOptions.proxyByFiler = cmdFilerBackup.Flag.Bool("filerProxy", false, "read and write file chunks by filer instead of volume servers") + filerBackupOptions.debug = cmdFilerBackup.Flag.Bool("debug", false, "debug mode to print out received files") + filerBackupOptions.timeAgo = cmdFilerBackup.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"") +} + +var cmdFilerBackup = &Command{ + UsageLine: "filer.backup -filer=: ", + Short: "resume-able continuously replicate files from a SeaweedFS cluster to another location defined in replication.toml", + Long: `resume-able continuously replicate files from a SeaweedFS cluster to another location defined in replication.toml + + filer.backup listens on filer notifications. If any file is updated, it will fetch the updated content, + and write to the destination. This is to replace filer.replicate command since additional message queue is not needed. + + If restarted and "-timeAgo" is not set, the synchronization will resume from the previous checkpoints, persisted every minute. + A fresh sync will start from the earliest metadata logs. To reset the checkpoints, just set "-timeAgo" to a high value. + +`, +} + +func runFilerBackup(cmd *Command, args []string) bool { + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + util.LoadConfiguration("security", false) + util.LoadConfiguration("replication", true) + + for { + err := doFilerBackup(grpcDialOption, &filerBackupOptions) + if err != nil { + glog.Errorf("backup from %s: %v", *filerBackupOptions.filer, err) + time.Sleep(1747 * time.Millisecond) + } + } + + return true +} + +const ( + BackupKeyPrefix = "backup." +) + +func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOptions) error { + + // find data sink + config := util.GetViper() + dataSink := findSink(config) + if dataSink == nil { + return fmt.Errorf("no data sink configured in replication.toml") + } + + sourceFiler := *backupOption.filer + sourcePath := *backupOption.path + timeAgo := *backupOption.timeAgo + targetPath := dataSink.GetSinkToDirectory() + debug := *backupOption.debug + + // get start time for the data sink + startFrom := time.Unix(0, 0) + sinkId := util.HashStringToLong(dataSink.GetName() + dataSink.GetSinkToDirectory()) + if timeAgo.Milliseconds() == 0 { + lastOffsetTsNs, err := getOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId)) + if err != nil { + glog.V(0).Infof("starting from %v", startFrom) + } else { + startFrom = time.Unix(0, lastOffsetTsNs) + glog.V(0).Infof("resuming from %v", startFrom) + } + } else { + startFrom = time.Now().Add(-timeAgo) + glog.V(0).Infof("start time is set to %v", startFrom) + } + + // create filer sink + filerSource := &source.FilerSource{} + filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, *backupOption.proxyByFiler) + dataSink.SetSourceFiler(filerSource) + + processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug) + + return pb.WithFilerClient(sourceFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "backup_" + dataSink.GetName(), + PathPrefix: sourcePath, + SinceNs: startFrom.UnixNano(), + }) + if err != nil { + return fmt.Errorf("listen: %v", err) + } + + var counter int64 + var lastWriteTime time.Time + for { + resp, listenErr := stream.Recv() + + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + return fmt.Errorf("processEventFn: %v", err) + } + + counter++ + if lastWriteTime.Add(3 * time.Second).Before(time.Now()) { + glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3)) + counter = 0 + lastWriteTime = time.Now() + if err := setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), resp.TsNs); err != nil { + return fmt.Errorf("setOffset: %v", err) + } + } + + } + + }) + +} diff --git a/weed/command/filer_cat.go b/weed/command/filer_cat.go new file mode 100644 index 000000000..c4281feba --- /dev/null +++ b/weed/command/filer_cat.go @@ -0,0 +1,118 @@ +package command + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "google.golang.org/grpc" + "math" + "net/url" + "os" + "strings" + + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + filerCat FilerCatOptions +) + +type FilerCatOptions struct { + grpcDialOption grpc.DialOption + filerAddress string + filerClient filer_pb.SeaweedFilerClient + output *string +} + +func (fco *FilerCatOptions) GetLookupFileIdFunction() wdclient.LookupFileIdFunctionType { + return func(fileId string) (targetUrls []string, err error) { + vid := filer.VolumeId(fileId) + resp, err := fco.filerClient.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ + VolumeIds: []string{vid}, + }) + if err != nil { + return nil, err + } + locations := resp.LocationsMap[vid] + for _, loc := range locations.Locations { + targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId)) + } + return + } +} + +func init() { + cmdFilerCat.Run = runFilerCat // break init cycle + filerCat.output = cmdFilerCat.Flag.String("o", "", "write to file instead of stdout") +} + +var cmdFilerCat = &Command{ + UsageLine: "filer.cat [-o ] http://localhost:8888/path/to/file", + Short: "copy one file to local", + Long: `read one file to stdout or write to a file + +`, +} + +func runFilerCat(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + if len(args) == 0 { + return false + } + filerSource := args[len(args)-1] + + filerUrl, err := url.Parse(filerSource) + if err != nil { + fmt.Printf("The last argument should be a URL on filer: %v\n", err) + return false + } + urlPath := filerUrl.Path + if strings.HasSuffix(urlPath, "/") { + fmt.Printf("The last argument should be a file: %v\n", err) + return false + } + + filerCat.filerAddress = filerUrl.Host + filerCat.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + dir, name := util.FullPath(urlPath).DirAndName() + + writer := os.Stdout + if *filerCat.output != "" { + + fmt.Printf("saving %s to %s\n", filerSource, *filerCat.output) + + f, err := os.OpenFile(*filerCat.output, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755) + if err != nil { + fmt.Printf("open file %s: %v\n", *filerCat.output, err) + return false + } + defer f.Close() + writer = f + } + + pb.WithFilerClient(filerCat.filerAddress, filerCat.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Name: name, + Directory: dir, + } + respLookupEntry, err := filer_pb.LookupEntry(client, request) + if err != nil { + return err + } + + filerCat.filerClient = client + + return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) + + }) + + return true +} diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go index f14d18c52..e7a9b107f 100644 --- a/weed/command/filer_copy.go +++ b/weed/command/filer_copy.go @@ -14,13 +14,17 @@ import ( "sync" "time" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/wdclient" - "github.com/spf13/viper" - "google.golang.org/grpc" ) var ( @@ -33,13 +37,15 @@ type CopyOptions struct { replication *string collection *string ttl *string + diskType *string maxMB *int masterClient *wdclient.MasterClient concurrenctFiles *int concurrenctChunks *int - compressionLevel *int grpcDialOption grpc.DialOption masters []string + cipher bool + ttlSec int32 } func init() { @@ -49,10 +55,10 @@ func init() { copy.replication = cmdCopy.Flag.String("replication", "", "replication type") copy.collection = cmdCopy.Flag.String("collection", "", "optional collection name") copy.ttl = cmdCopy.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - copy.maxMB = cmdCopy.Flag.Int("maxMB", 32, "split files larger than the limit") + copy.diskType = cmdCopy.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") + copy.maxMB = cmdCopy.Flag.Int("maxMB", 4, "split files larger than the limit") copy.concurrenctFiles = cmdCopy.Flag.Int("c", 8, "concurrent file copy goroutines") copy.concurrenctChunks = cmdCopy.Flag.Int("concurrentChunks", 8, "concurrent chunk copy goroutines for each file") - copy.compressionLevel = cmdCopy.Flag.Int("compressionLevel", 9, "local file compression level 1 ~ 9") } var cmdCopy = &Command{ @@ -68,7 +74,7 @@ var cmdCopy = &Command{ If "maxMB" is set to a positive number, files larger than it would be split into chunks. - `, +`, } func runCopy(cmd *Command, args []string) bool { @@ -88,7 +94,7 @@ func runCopy(cmd *Command, args []string) bool { } urlPath := filerUrl.Path if !strings.HasSuffix(urlPath, "/") { - fmt.Printf("The last argument should be a folder and end with \"/\": %v\n", err) + fmt.Printf("The last argument should be a folder and end with \"/\"\n") return false } @@ -105,15 +111,25 @@ func runCopy(cmd *Command, args []string) bool { filerGrpcPort := filerPort + 10000 filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) - copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") - - ctx := context.Background() + copy.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") - masters, collection, replication, maxMB, err := readFilerConfiguration(ctx, copy.grpcDialOption, filerGrpcAddress) + masters, collection, replication, dirBuckets, maxMB, cipher, err := readFilerConfiguration(copy.grpcDialOption, filerGrpcAddress) if err != nil { fmt.Printf("read from filer %s: %v\n", filerGrpcAddress, err) return false } + if strings.HasPrefix(urlPath, dirBuckets+"/") { + restPath := urlPath[len(dirBuckets)+1:] + if strings.Index(restPath, "/") > 0 { + expectedBucket := restPath[:strings.Index(restPath, "/")] + if *copy.collection == "" { + *copy.collection = expectedBucket + } else if *copy.collection != expectedBucket { + fmt.Printf("destination %s uses collection \"%s\": unexpected collection \"%v\"\n", urlPath, expectedBucket, *copy.collection) + return true + } + } + } if *copy.collection == "" { *copy.collection = collection } @@ -124,13 +140,17 @@ func runCopy(cmd *Command, args []string) bool { *copy.maxMB = int(maxMB) } copy.masters = masters + copy.cipher = cipher - copy.masterClient = wdclient.NewMasterClient(ctx, copy.grpcDialOption, "client", copy.masters) - go copy.masterClient.KeepConnectedToMaster() - copy.masterClient.WaitUntilConnected() + ttl, err := needle.ReadTTL(*copy.ttl) + if err != nil { + fmt.Printf("parsing ttl %s: %v\n", *copy.ttl, err) + return false + } + copy.ttlSec = int32(ttl.Minutes()) * 60 if *cmdCopy.IsDebug { - util.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") + grace.SetupProfiling("filer.copy.cpu.pprof", "filer.copy.mem.pprof") } fileCopyTaskChan := make(chan FileCopyTask, *copy.concurrenctFiles) @@ -139,7 +159,7 @@ func runCopy(cmd *Command, args []string) bool { defer close(fileCopyTaskChan) for _, fileOrDir := range fileOrDirs { if err := genFileCopyTask(fileOrDir, urlPath, fileCopyTaskChan); err != nil { - fmt.Fprintf(os.Stderr, "gen file list error: %v\n", err) + fmt.Fprintf(os.Stderr, "genFileCopyTask : %v\n", err) break } } @@ -153,7 +173,7 @@ func runCopy(cmd *Command, args []string) bool { filerHost: filerUrl.Host, filerGrpcAddress: filerGrpcAddress, } - if err := worker.copyFiles(ctx, fileCopyTaskChan); err != nil { + if err := worker.copyFiles(fileCopyTaskChan); err != nil { fmt.Fprintf(os.Stderr, "copy file error: %v\n", err) return } @@ -164,13 +184,15 @@ func runCopy(cmd *Command, args []string) bool { return true } -func readFilerConfiguration(ctx context.Context, grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, maxMB uint32, err error) { - err = withFilerClient(ctx, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { - resp, err := client.GetFilerConfiguration(ctx, &filer_pb.GetFilerConfigurationRequest{}) +func readFilerConfiguration(grpcDialOption grpc.DialOption, filerGrpcAddress string) (masters []string, collection, replication string, dirBuckets string, maxMB uint32, cipher bool, err error) { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) if err != nil { return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) } masters, collection, replication, maxMB = resp.Masters, resp.Collection, resp.Replication, resp.MaxMb + dirBuckets = resp.DirBuckets + cipher = resp.Cipher return nil }) return @@ -180,21 +202,11 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi fi, err := os.Stat(fileOrDir) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to get stat for file %s: %v\n", fileOrDir, err) + fmt.Fprintf(os.Stderr, "Error: read file %s: %v\n", fileOrDir, err) return nil } mode := fi.Mode() - if mode.IsDir() { - files, _ := ioutil.ReadDir(fileOrDir) - for _, subFileOrDir := range files { - if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil { - return err - } - } - return nil - } - uid, gid := util.GetFileUidGid(fi) fileCopyTaskChan <- FileCopyTask{ @@ -206,6 +218,16 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi gid: gid, } + if mode.IsDir() { + files, _ := ioutil.ReadDir(fileOrDir) + println("checking directory", fileOrDir) + for _, subFileOrDir := range files { + if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil { + return err + } + } + } + return nil } @@ -215,9 +237,9 @@ type FileCopyWorker struct { filerGrpcAddress string } -func (worker *FileCopyWorker) copyFiles(ctx context.Context, fileCopyTaskChan chan FileCopyTask) error { +func (worker *FileCopyWorker) copyFiles(fileCopyTaskChan chan FileCopyTask) error { for task := range fileCopyTaskChan { - if err := worker.doEachCopy(ctx, task); err != nil { + if err := worker.doEachCopy(task); err != nil { return err } } @@ -233,7 +255,7 @@ type FileCopyTask struct { gid uint32 } -func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) error { +func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error { f, err := os.Open(task.sourceLocation) if err != nil { @@ -261,36 +283,58 @@ func (worker *FileCopyWorker) doEachCopy(ctx context.Context, task FileCopyTask) } if chunkCount == 1 { - return worker.uploadFileAsOne(ctx, task, f) + return worker.uploadFileAsOne(task, f) } - return worker.uploadFileInChunks(ctx, task, f, chunkCount, chunkSize) + return worker.uploadFileInChunks(task, f, chunkCount, chunkSize) } -func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopyTask, f *os.File) error { +func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) error { // upload the file content fileName := filepath.Base(f.Name()) - mimeType := detectMimeType(f) + var mimeType string var chunks []*filer_pb.FileChunk + var assignResult *filer_pb.AssignVolumeResponse + var assignError error - if task.fileSize > 0 { + if task.fileMode & os.ModeDir == 0 && task.fileSize > 0 { + + mimeType = detectMimeType(f) + data, err := ioutil.ReadAll(f) + if err != nil { + return err + } // assign a volume - assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - Ttl: *worker.options.ttl, + err = pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + DiskType: *worker.options.diskType, + Path: task.destinationUrlPath, + } + + assignResult, assignError = client.AssignVolume(context.Background(), request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } + return nil }) if err != nil { - fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) + return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err) } - targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId - uploadResult, err := operation.UploadWithLocalCompressionLevel(targetUrl, fileName, f, false, mimeType, nil, assignResult.Auth, *worker.options.compressionLevel) + uploadResult, err := operation.UploadData(targetUrl, fileName, worker.options.cipher, data, false, mimeType, nil, security.EncodedJwt(assignResult.Auth)) if err != nil { return fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) } @@ -299,18 +343,12 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy } fmt.Printf("uploaded %s to %s\n", fileName, targetUrl) - chunks = append(chunks, &filer_pb.FileChunk{ - FileId: assignResult.Fid, - Offset: 0, - Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, - }) + chunks = append(chunks, uploadResult.ToPbFileChunk(assignResult.FileId, 0)) fmt.Printf("copied %s => http://%s%s%s\n", fileName, worker.filerHost, task.destinationUrlPath, fileName) } - if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -325,13 +363,13 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy Mime: mimeType, Replication: *worker.options.replication, Collection: *worker.options.collection, - TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + TtlSec: worker.options.ttlSec, }, Chunks: chunks, }, } - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -342,7 +380,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(ctx context.Context, task FileCopy return nil } -func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { +func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File, chunkCount int, chunkSize int64) error { fileName := filepath.Base(f.Name()) mimeType := detectMimeType(f) @@ -352,6 +390,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC concurrentChunks := make(chan struct{}, *worker.options.concurrenctChunks) var wg sync.WaitGroup var uploadError error + var collection, replication string fmt.Printf("uploading %s in %d chunks ...\n", fileName, chunkCount) for i := int64(0); i < int64(chunkCount) && uploadError == nil; i++ { @@ -363,22 +402,43 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC <-concurrentChunks }() // assign a volume - assignResult, err := operation.Assign(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, &operation.VolumeAssignRequest{ - Count: 1, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - Ttl: *worker.options.ttl, + var assignResult *filer_pb.AssignVolumeResponse + var assignError error + err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: *worker.options.replication, + Collection: *worker.options.collection, + TtlSec: worker.options.ttlSec, + DiskType: *worker.options.diskType, + Path: task.destinationUrlPath + fileName, + } + + assignResult, assignError = client.AssignVolume(context.Background(), request) + if assignError != nil { + return fmt.Errorf("assign volume failure %v: %v", request, assignError) + } + if assignResult.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error) + } + return nil }) if err != nil { fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) } + if err != nil { + fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err) + } - targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId + if collection == "" { + collection = assignResult.Collection + } + if replication == "" { + replication = assignResult.Replication + } - uploadResult, err := operation.Upload(targetUrl, - fileName+"-"+strconv.FormatInt(i+1, 10), - io.NewSectionReader(f, i*chunkSize, chunkSize), - false, "", nil, assignResult.Auth) + uploadResult, err, _ := operation.Upload(targetUrl, fileName+"-"+strconv.FormatInt(i+1, 10), worker.options.cipher, io.NewSectionReader(f, i*chunkSize, chunkSize), false, "", nil, security.EncodedJwt(assignResult.Auth)) if err != nil { uploadError = fmt.Errorf("upload data %v to %s: %v\n", fileName, targetUrl, err) return @@ -387,13 +447,8 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error) return } - chunksChan <- &filer_pb.FileChunk{ - FileId: assignResult.Fid, - Offset: i * chunkSize, - Size: uint64(uploadResult.Size), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, - } + chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i*chunkSize) + fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) }(i) } @@ -410,11 +465,13 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC for _, chunk := range chunks { fileIds = append(fileIds, chunk.FileId) } - operation.DeleteFiles(worker.options.masterClient.GetMaster(), worker.options.grpcDialOption, fileIds) + operation.DeleteFiles(func() string { + return copy.masters[0] + }, false, worker.options.grpcDialOption, fileIds) return uploadError } - if err := withFilerClient(ctx, worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.CreateEntryRequest{ Directory: task.destinationUrlPath, Entry: &filer_pb.Entry{ @@ -427,15 +484,15 @@ func (worker *FileCopyWorker) uploadFileInChunks(ctx context.Context, task FileC FileSize: uint64(task.fileSize), FileMode: uint32(task.fileMode), Mime: mimeType, - Replication: *worker.options.replication, - Collection: *worker.options.collection, - TtlSec: int32(util.ParseInt(*worker.options.ttl, 0)), + Replication: replication, + Collection: collection, + TtlSec: worker.options.ttlSec, }, Chunks: chunks, }, } - if _, err := client.CreateEntry(ctx, request); err != nil { + if err := filer_pb.CreateEntry(client, request); err != nil { return fmt.Errorf("update fh: %v", err) } return nil @@ -457,18 +514,12 @@ func detectMimeType(f *os.File) string { } if err != nil { fmt.Printf("read head of %v: %v\n", f.Name(), err) - return "application/octet-stream" + return "" } f.Seek(0, io.SeekStart) mimeType := http.DetectContentType(head[:n]) + if mimeType == "application/octet-stream" { + return "" + } return mimeType } - -func withFilerClient(ctx context.Context, filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { - - return util.WithCachedGrpcClient(ctx, func(clientConn *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(clientConn) - return fn(client) - }, filerAddress, grpcDialOption) - -} diff --git a/weed/command/filer_meta_backup.go b/weed/command/filer_meta_backup.go new file mode 100644 index 000000000..ba0b44659 --- /dev/null +++ b/weed/command/filer_meta_backup.go @@ -0,0 +1,268 @@ +package command + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/spf13/viper" + "google.golang.org/grpc" + "io" + "reflect" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + metaBackup FilerMetaBackupOptions +) + +type FilerMetaBackupOptions struct { + grpcDialOption grpc.DialOption + filerAddress *string + filerDirectory *string + restart *bool + backupFilerConfig *string + + store filer.FilerStore +} + +func init() { + cmdFilerMetaBackup.Run = runFilerMetaBackup // break init cycle + metaBackup.filerAddress = cmdFilerMetaBackup.Flag.String("filer", "localhost:8888", "filer hostname:port") + metaBackup.filerDirectory = cmdFilerMetaBackup.Flag.String("filerDir", "/", "a folder on the filer") + metaBackup.restart = cmdFilerMetaBackup.Flag.Bool("restart", false, "copy the full metadata before async incremental backup") + metaBackup.backupFilerConfig = cmdFilerMetaBackup.Flag.String("config", "", "path to filer.toml specifying backup filer store") +} + +var cmdFilerMetaBackup = &Command{ + UsageLine: "filer.meta.backup [-filer=localhost:8888] [-filerDir=/] [-restart] -config=/path/to/backup_filer.toml", + Short: "continuously backup filer meta data changes to anther filer store specified in a backup_filer.toml", + Long: `continuously backup filer meta data changes. +The backup writes to another filer store specified in a backup_filer.toml. + + weed filer.meta.backup -config=/path/to/backup_filer.toml -filer="localhost:8888" + weed filer.meta.backup -config=/path/to/backup_filer.toml -filer="localhost:8888" -restart + + `, +} + +func runFilerMetaBackup(cmd *Command, args []string) bool { + + metaBackup.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + // load backup_filer.toml + v := viper.New() + v.SetConfigFile(*metaBackup.backupFilerConfig) + + if err := v.ReadInConfig(); err != nil { // Handle errors reading the config file + glog.Fatalf("Failed to load %s file.\nPlease use this command to generate the a %s.toml file\n"+ + " weed scaffold -config=%s -output=.\n\n\n", + *metaBackup.backupFilerConfig, "backup_filer", "filer") + } + + if err := metaBackup.initStore(v); err != nil { + glog.V(0).Infof("init backup filer store: %v", err) + return true + } + + missingPreviousBackup := false + _, err := metaBackup.getOffset() + if err != nil { + missingPreviousBackup = true + } + + if *metaBackup.restart || missingPreviousBackup { + glog.V(0).Infof("traversing metadata tree...") + startTime := time.Now() + if err := metaBackup.traverseMetadata(); err != nil { + glog.Errorf("traverse meta data: %v", err) + return true + } + glog.V(0).Infof("metadata copied up to %v", startTime) + if err := metaBackup.setOffset(startTime); err != nil { + startTime = time.Now() + } + } + + for { + err := metaBackup.streamMetadataBackup() + if err != nil { + glog.Errorf("filer meta backup from %s: %v", *metaBackup.filerAddress, err) + time.Sleep(1747 * time.Millisecond) + } + } + + return true +} + +func (metaBackup *FilerMetaBackupOptions) initStore(v *viper.Viper) error { + // load configuration for default filer store + hasDefaultStoreConfigured := false + for _, store := range filer.Stores { + if v.GetBool(store.GetName() + ".enabled") { + store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(filer.FilerStore) + if err := store.Initialize(v, store.GetName()+"."); err != nil { + glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) + } + glog.V(0).Infof("configured filer store to %s", store.GetName()) + hasDefaultStoreConfigured = true + metaBackup.store = filer.NewFilerStoreWrapper(store) + break + } + } + if !hasDefaultStoreConfigured { + return fmt.Errorf("no filer store enabled in %s", v.ConfigFileUsed()) + } + + return nil +} + +func (metaBackup *FilerMetaBackupOptions) traverseMetadata() (err error) { + var saveErr error + + traverseErr := filer_pb.TraverseBfs(metaBackup, util.FullPath(*metaBackup.filerDirectory), func(parentPath util.FullPath, entry *filer_pb.Entry) { + + println("+", parentPath.Child(entry.Name)) + if err := metaBackup.store.InsertEntry(context.Background(), filer.FromPbEntry(string(parentPath), entry)); err != nil { + saveErr = fmt.Errorf("insert entry error: %v\n", err) + return + } + + }) + + if traverseErr != nil { + return fmt.Errorf("traverse: %v", traverseErr) + } + return saveErr +} + +var ( + MetaBackupKey = []byte("metaBackup") +) + +func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error { + + startTime, err := metaBackup.getOffset() + if err != nil { + startTime = time.Now() + } + glog.V(0).Infof("streaming from %v", startTime) + + store := metaBackup.store + + eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error { + + ctx := context.Background() + message := resp.EventNotification + + if message.OldEntry == nil && message.NewEntry == nil { + return nil + } + if message.OldEntry == nil && message.NewEntry != nil { + println("+", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name)) + entry := filer.FromPbEntry(message.NewParentPath, message.NewEntry) + return store.InsertEntry(ctx, entry) + } + if message.OldEntry != nil && message.NewEntry == nil { + println("-", util.FullPath(resp.Directory).Child(message.OldEntry.Name)) + return store.DeleteEntry(ctx, util.FullPath(resp.Directory).Child(message.OldEntry.Name)) + } + if message.OldEntry != nil && message.NewEntry != nil { + if resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name { + println("~", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name)) + entry := filer.FromPbEntry(message.NewParentPath, message.NewEntry) + return store.UpdateEntry(ctx, entry) + } + println("-", util.FullPath(resp.Directory).Child(message.OldEntry.Name)) + if err := store.DeleteEntry(ctx, util.FullPath(resp.Directory).Child(message.OldEntry.Name)); err != nil { + return err + } + println("+", util.FullPath(message.NewParentPath).Child(message.NewEntry.Name)) + return store.InsertEntry(ctx, filer.FromPbEntry(message.NewParentPath, message.NewEntry)) + } + + return nil + } + + tailErr := pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "meta_backup", + PathPrefix: *metaBackup.filerDirectory, + SinceNs: startTime.UnixNano(), + }) + if err != nil { + return fmt.Errorf("listen: %v", err) + } + + var counter int64 + var lastWriteTime time.Time + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + if err = eachEntryFunc(resp); err != nil { + return err + } + + counter++ + if lastWriteTime.Add(3 * time.Second).Before(time.Now()) { + glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, time.Unix(0, resp.TsNs), float64(counter)/float64(3)) + counter = 0 + lastWriteTime = time.Now() + if err2 := metaBackup.setOffset(lastWriteTime); err2 != nil { + return err2 + } + } + + } + + }) + return tailErr +} + +func (metaBackup *FilerMetaBackupOptions) getOffset() (lastWriteTime time.Time, err error) { + value, err := metaBackup.store.KvGet(context.Background(), MetaBackupKey) + if err != nil { + return + } + tsNs := util.BytesToUint64(value) + + return time.Unix(0, int64(tsNs)), nil +} + +func (metaBackup *FilerMetaBackupOptions) setOffset(lastWriteTime time.Time) error { + valueBuf := make([]byte, 8) + util.Uint64toBytes(valueBuf, uint64(lastWriteTime.UnixNano())) + + if err := metaBackup.store.KvPut(context.Background(), MetaBackupKey, valueBuf); err != nil { + return err + } + return nil +} + +var _ = filer_pb.FilerClient(&FilerMetaBackupOptions{}) + +func (metaBackup *FilerMetaBackupOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithFilerClient(*metaBackup.filerAddress, metaBackup.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + return fn(client) + }) + +} + +func (metaBackup *FilerMetaBackupOptions) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} diff --git a/weed/command/filer_meta_tail.go b/weed/command/filer_meta_tail.go new file mode 100644 index 000000000..8451ffd78 --- /dev/null +++ b/weed/command/filer_meta_tail.go @@ -0,0 +1,211 @@ +package command + +import ( + "context" + "fmt" + "github.com/golang/protobuf/jsonpb" + jsoniter "github.com/json-iterator/go" + "github.com/olivere/elastic/v7" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + cmdFilerMetaTail.Run = runFilerMetaTail // break init cycle +} + +var cmdFilerMetaTail = &Command{ + UsageLine: "filer.meta.tail [-filer=localhost:8888] [-pathPrefix=/]", + Short: "see continuous changes on a filer", + Long: `See continuous changes on a filer. + + weed filer.meta.tail -timeAgo=30h | grep truncate + weed filer.meta.tail -timeAgo=30h | jq . + weed filer.meta.tail -timeAgo=30h | jq .eventNotification.newEntry.name + + `, +} + +var ( + tailFiler = cmdFilerMetaTail.Flag.String("filer", "localhost:8888", "filer hostname:port") + tailTarget = cmdFilerMetaTail.Flag.String("pathPrefix", "/", "path to a folder or common prefix for the folders or files on filer") + tailStart = cmdFilerMetaTail.Flag.Duration("timeAgo", 0, "start time before now. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\"") + tailPattern = cmdFilerMetaTail.Flag.String("pattern", "", "full path or just filename pattern, ex: \"/home/?opher\", \"*.pdf\", see https://golang.org/pkg/path/filepath/#Match ") + esServers = cmdFilerMetaTail.Flag.String("es", "", "comma-separated elastic servers http://") + esIndex = cmdFilerMetaTail.Flag.String("es.index", "seaweedfs", "ES index name") +) + +func runFilerMetaTail(cmd *Command, args []string) bool { + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + var filterFunc func(dir, fname string) bool + if *tailPattern != "" { + if strings.Contains(*tailPattern, "/") { + println("watch path pattern", *tailPattern) + filterFunc = func(dir, fname string) bool { + matched, err := filepath.Match(*tailPattern, dir+"/"+fname) + if err != nil { + fmt.Printf("error: %v", err) + } + return matched + } + } else { + println("watch file pattern", *tailPattern) + filterFunc = func(dir, fname string) bool { + matched, err := filepath.Match(*tailPattern, fname) + if err != nil { + fmt.Printf("error: %v", err) + } + return matched + } + } + } + + shouldPrint := func(resp *filer_pb.SubscribeMetadataResponse) bool { + if filterFunc == nil { + return true + } + if resp.EventNotification.OldEntry == nil && resp.EventNotification.NewEntry == nil { + return false + } + if resp.EventNotification.OldEntry != nil && filterFunc(resp.Directory, resp.EventNotification.OldEntry.Name) { + return true + } + if resp.EventNotification.NewEntry != nil && filterFunc(resp.EventNotification.NewParentPath, resp.EventNotification.NewEntry.Name) { + return true + } + return false + } + + jsonpbMarshaler := jsonpb.Marshaler{ + EmitDefaults: false, + } + eachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error { + jsonpbMarshaler.Marshal(os.Stdout, resp) + fmt.Fprintln(os.Stdout) + return nil + } + if *esServers != "" { + var err error + eachEntryFunc, err = sendToElasticSearchFunc(*esServers, *esIndex) + if err != nil { + fmt.Printf("create elastic search client to %s: %+v\n", *esServers, err) + return false + } + } + + tailErr := pb.WithFilerClient(*tailFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "tail", + PathPrefix: *tailTarget, + SinceNs: time.Now().Add(-*tailStart).UnixNano(), + }) + if err != nil { + return fmt.Errorf("listen: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + if !shouldPrint(resp) { + continue + } + if err = eachEntryFunc(resp); err != nil { + return err + } + } + + }) + if tailErr != nil { + fmt.Printf("tail %s: %v\n", *tailFiler, tailErr) + } + + return true +} + +type EsDocument struct { + Dir string `json:"dir,omitempty"` + Name string `json:"name,omitempty"` + IsDirectory bool `json:"isDir,omitempty"` + Size uint64 `json:"size,omitempty"` + Uid uint32 `json:"uid,omitempty"` + Gid uint32 `json:"gid,omitempty"` + UserName string `json:"userName,omitempty"` + Collection string `json:"collection,omitempty"` + Crtime int64 `json:"crtime,omitempty"` + Mtime int64 `json:"mtime,omitempty"` + Mime string `json:"mime,omitempty"` +} + +func toEsEntry(event *filer_pb.EventNotification) (*EsDocument, string) { + entry := event.NewEntry + dir, name := event.NewParentPath, entry.Name + id := util.Md5String([]byte(util.NewFullPath(dir, name))) + esEntry := &EsDocument{ + Dir: dir, + Name: name, + IsDirectory: entry.IsDirectory, + Size: entry.Attributes.FileSize, + Uid: entry.Attributes.Uid, + Gid: entry.Attributes.Gid, + UserName: entry.Attributes.UserName, + Collection: entry.Attributes.Collection, + Crtime: entry.Attributes.Crtime, + Mtime: entry.Attributes.Mtime, + Mime: entry.Attributes.Mime, + } + return esEntry, id +} + +func sendToElasticSearchFunc(servers string, esIndex string) (func(resp *filer_pb.SubscribeMetadataResponse) error, error) { + options := []elastic.ClientOptionFunc{} + options = append(options, elastic.SetURL(strings.Split(servers, ",")...)) + options = append(options, elastic.SetSniff(false)) + options = append(options, elastic.SetHealthcheck(false)) + client, err := elastic.NewClient(options...) + if err != nil { + return nil, err + } + return func(resp *filer_pb.SubscribeMetadataResponse) error { + event := resp.EventNotification + if event.OldEntry != nil && + (event.NewEntry == nil || resp.Directory != event.NewParentPath || event.OldEntry.Name != event.NewEntry.Name) { + // delete or not update the same file + dir, name := resp.Directory, event.OldEntry.Name + id := util.Md5String([]byte(util.NewFullPath(dir, name))) + println("delete", id) + _, err := client.Delete().Index(esIndex).Id(id).Do(context.Background()) + return err + } + if event.NewEntry != nil { + // add a new file or update the same file + esEntry, id := toEsEntry(event) + value, err := jsoniter.Marshal(esEntry) + if err != nil { + return err + } + println(string(value)) + _, err = client.Index().Index(esIndex).Id(id).BodyJson(string(value)).Do(context.Background()) + return err + } + return nil + }, nil +} diff --git a/weed/command/filer_replication.go b/weed/command/filer_replication.go index c6e7f5dba..885c95540 100644 --- a/weed/command/filer_replication.go +++ b/weed/command/filer_replication.go @@ -11,10 +11,10 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/replication/sink/b2sink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/gcssink" + _ "github.com/chrislusf/seaweedfs/weed/replication/sink/localsink" _ "github.com/chrislusf/seaweedfs/weed/replication/sink/s3sink" "github.com/chrislusf/seaweedfs/weed/replication/sub" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) func init() { @@ -39,7 +39,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("replication", true) util.LoadConfiguration("notification", true) - config := viper.GetViper() + config := util.GetViper() var notificationInput sub.NotificationInput @@ -47,8 +47,7 @@ func runFilerReplicate(cmd *Command, args []string) bool { for _, input := range sub.NotificationInputs { if config.GetBool("notification." + input.GetName() + ".enabled") { - viperSub := config.Sub("notification." + input.GetName()) - if err := input.Initialize(viperSub); err != nil { + if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification input for %s: %+v", input.GetName(), err) } @@ -66,29 +65,16 @@ func runFilerReplicate(cmd *Command, args []string) bool { // avoid recursive replication if config.GetBool("notification.source.filer.enabled") && config.GetBool("notification.sink.filer.enabled") { - sourceConfig, sinkConfig := config.Sub("source.filer"), config.Sub("sink.filer") - if sourceConfig.GetString("grpcAddress") == sinkConfig.GetString("grpcAddress") { - fromDir := sourceConfig.GetString("directory") - toDir := sinkConfig.GetString("directory") + if config.GetString("source.filer.grpcAddress") == config.GetString("sink.filer.grpcAddress") { + fromDir := config.GetString("source.filer.directory") + toDir := config.GetString("sink.filer.directory") if strings.HasPrefix(toDir, fromDir) { glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir) } } } - var dataSink sink.ReplicationSink - for _, sk := range sink.Sinks { - if config.GetBool("sink." + sk.GetName() + ".enabled") { - viperSub := config.Sub("sink." + sk.GetName()) - if err := sk.Initialize(viperSub); err != nil { - glog.Fatalf("Failed to initialize sink for %s: %+v", - sk.GetName(), err) - } - glog.V(0).Infof("Configure sink to %s", sk.GetName()) - dataSink = sk - break - } - } + dataSink := findSink(config) if dataSink == nil { println("no data sink configured in replication.toml:") @@ -98,16 +84,22 @@ func runFilerReplicate(cmd *Command, args []string) bool { return true } - replicator := replication.NewReplicator(config.Sub("source.filer"), dataSink) + replicator := replication.NewReplicator(config, "source.filer.", dataSink) for { - key, m, err := notificationInput.ReceiveMessage() + key, m, onSuccessFn, onFailureFn, err := notificationInput.ReceiveMessage() if err != nil { glog.Errorf("receive %s: %+v", key, err) + if onFailureFn != nil { + onFailureFn() + } continue } if key == "" { // long poll received no messages + if onSuccessFn != nil { + onSuccessFn() + } continue } if m.OldEntry != nil && m.NewEntry == nil { @@ -119,15 +111,36 @@ func runFilerReplicate(cmd *Command, args []string) bool { } if err = replicator.Replicate(context.Background(), key, m); err != nil { glog.Errorf("replicate %s: %+v", key, err) + if onFailureFn != nil { + onFailureFn() + } } else { glog.V(1).Infof("replicated %s", key) + if onSuccessFn != nil { + onSuccessFn() + } } } - return true } -func validateOneEnabledInput(config *viper.Viper) { +func findSink(config *util.ViperProxy) sink.ReplicationSink { + var dataSink sink.ReplicationSink + for _, sk := range sink.Sinks { + if config.GetBool("sink." + sk.GetName() + ".enabled") { + if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil { + glog.Fatalf("Failed to initialize sink for %s: %+v", + sk.GetName(), err) + } + glog.V(0).Infof("Configure sink to %s", sk.GetName()) + dataSink = sk + break + } + } + return dataSink +} + +func validateOneEnabledInput(config *util.ViperProxy) { enabledInput := "" for _, input := range sub.NotificationInputs { if config.GetBool("notification." + input.GetName() + ".enabled") { diff --git a/weed/command/filer_sync.go b/weed/command/filer_sync.go new file mode 100644 index 000000000..0f34e5701 --- /dev/null +++ b/weed/command/filer_sync.go @@ -0,0 +1,374 @@ +package command + +import ( + "context" + "errors" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/sink/filersink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/grace" + "google.golang.org/grpc" + "io" + "strings" + "time" +) + +type SyncOptions struct { + isActivePassive *bool + filerA *string + filerB *string + aPath *string + bPath *string + aReplication *string + bReplication *string + aCollection *string + bCollection *string + aTtlSec *int + bTtlSec *int + aDiskType *string + bDiskType *string + aDebug *bool + bDebug *bool + aProxyByFiler *bool + bProxyByFiler *bool +} + +var ( + syncOptions SyncOptions + syncCpuProfile *string + syncMemProfile *string +) + +func init() { + cmdFilerSynchronize.Run = runFilerSynchronize // break init cycle + syncOptions.isActivePassive = cmdFilerSynchronize.Flag.Bool("isActivePassive", false, "one directional follow from A to B if true") + syncOptions.filerA = cmdFilerSynchronize.Flag.String("a", "", "filer A in one SeaweedFS cluster") + syncOptions.filerB = cmdFilerSynchronize.Flag.String("b", "", "filer B in the other SeaweedFS cluster") + syncOptions.aPath = cmdFilerSynchronize.Flag.String("a.path", "/", "directory to sync on filer A") + syncOptions.bPath = cmdFilerSynchronize.Flag.String("b.path", "/", "directory to sync on filer B") + syncOptions.aReplication = cmdFilerSynchronize.Flag.String("a.replication", "", "replication on filer A") + syncOptions.bReplication = cmdFilerSynchronize.Flag.String("b.replication", "", "replication on filer B") + syncOptions.aCollection = cmdFilerSynchronize.Flag.String("a.collection", "", "collection on filer A") + syncOptions.bCollection = cmdFilerSynchronize.Flag.String("b.collection", "", "collection on filer B") + syncOptions.aTtlSec = cmdFilerSynchronize.Flag.Int("a.ttlSec", 0, "ttl in seconds on filer A") + syncOptions.bTtlSec = cmdFilerSynchronize.Flag.Int("b.ttlSec", 0, "ttl in seconds on filer B") + syncOptions.aDiskType = cmdFilerSynchronize.Flag.String("a.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag on filer A") + syncOptions.bDiskType = cmdFilerSynchronize.Flag.String("b.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag on filer B") + syncOptions.aProxyByFiler = cmdFilerSynchronize.Flag.Bool("a.filerProxy", false, "read and write file chunks by filer A instead of volume servers") + syncOptions.bProxyByFiler = cmdFilerSynchronize.Flag.Bool("b.filerProxy", false, "read and write file chunks by filer B instead of volume servers") + syncOptions.aDebug = cmdFilerSynchronize.Flag.Bool("a.debug", false, "debug mode to print out filer A received files") + syncOptions.bDebug = cmdFilerSynchronize.Flag.Bool("b.debug", false, "debug mode to print out filer B received files") + syncCpuProfile = cmdFilerSynchronize.Flag.String("cpuprofile", "", "cpu profile output file") + syncMemProfile = cmdFilerSynchronize.Flag.String("memprofile", "", "memory profile output file") +} + +var cmdFilerSynchronize = &Command{ + UsageLine: "filer.sync -a=: -b=:", + Short: "resumeable continuous synchronization between two active-active or active-passive SeaweedFS clusters", + Long: `resumeable continuous synchronization for file changes between two active-active or active-passive filers + + filer.sync listens on filer notifications. If any file is updated, it will fetch the updated content, + and write to the other destination. Different from filer.replicate: + + * filer.sync only works between two filers. + * filer.sync does not need any special message queue setup. + * filer.sync supports both active-active and active-passive modes. + + If restarted, the synchronization will resume from the previous checkpoints, persisted every minute. + A fresh sync will start from the earliest metadata logs. + +`, +} + +func runFilerSynchronize(cmd *Command, args []string) bool { + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + grace.SetupProfiling(*syncCpuProfile, *syncMemProfile) + + go func() { + for { + err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.aProxyByFiler, *syncOptions.filerB, + *syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bProxyByFiler, *syncOptions.bDiskType, *syncOptions.bDebug) + if err != nil { + glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err) + time.Sleep(1747 * time.Millisecond) + } + } + }() + + if !*syncOptions.isActivePassive { + go func() { + for { + err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.bProxyByFiler, *syncOptions.filerA, + *syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aProxyByFiler, *syncOptions.aDiskType, *syncOptions.aDebug) + if err != nil { + glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err) + time.Sleep(2147 * time.Millisecond) + } + } + }() + } + + select {} + + return true +} + +func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, sourcePath string, sourceReadChunkFromFiler bool, targetFiler, targetPath string, + replicationStr, collection string, ttlSec int, sinkWriteChunkByFiler bool, diskType string, debug bool) error { + + // read source filer signature + sourceFilerSignature, sourceErr := replication.ReadFilerSignature(grpcDialOption, sourceFiler) + if sourceErr != nil { + return sourceErr + } + // read target filer signature + targetFilerSignature, targetErr := replication.ReadFilerSignature(grpcDialOption, targetFiler) + if targetErr != nil { + return targetErr + } + + // if first time, start from now + // if has previously synced, resume from that point of time + sourceFilerOffsetTsNs, err := getOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature) + if err != nil { + return err + } + + glog.V(0).Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs) + + // create filer sink + filerSource := &source.FilerSource{} + filerSource.DoInitialize(sourceFiler, pb.ServerToGrpcAddress(sourceFiler), sourcePath, sourceReadChunkFromFiler) + filerSink := &filersink.FilerSink{} + filerSink.DoInitialize(targetFiler, pb.ServerToGrpcAddress(targetFiler), targetPath, replicationStr, collection, ttlSec, diskType, grpcDialOption, sinkWriteChunkByFiler) + filerSink.SetSourceFiler(filerSource) + + persistEventFn := genProcessFunction(sourcePath, targetPath, filerSink, debug) + + processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { + message := resp.EventNotification + for _, sig := range message.Signatures { + if sig == targetFilerSignature && targetFilerSignature != 0 { + fmt.Printf("%s skipping %s change to %v\n", targetFiler, sourceFiler, message) + return nil + } + } + return persistEventFn(resp) + } + + return pb.WithFilerClient(sourceFiler, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "syncTo_" + targetFiler, + PathPrefix: sourcePath, + SinceNs: sourceFilerOffsetTsNs, + Signature: targetFilerSignature, + }) + if err != nil { + return fmt.Errorf("listen: %v", err) + } + + var counter int64 + var lastWriteTime time.Time + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + return err + } + + counter++ + if lastWriteTime.Add(3 * time.Second).Before(time.Now()) { + glog.V(0).Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3)) + counter = 0 + lastWriteTime = time.Now() + if err := setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, resp.TsNs); err != nil { + return err + } + } + + } + + }) + +} + +const ( + SyncKeyPrefix = "sync." +) + +func getOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix string, signature int32) (lastOffsetTsNs int64, readErr error) { + + readErr = pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + syncKey := []byte(signaturePrefix + "____") + util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature)) + + resp, err := client.KvGet(context.Background(), &filer_pb.KvGetRequest{Key: syncKey}) + if err != nil { + return err + } + + if len(resp.Error) != 0 { + return errors.New(resp.Error) + } + if len(resp.Value) < 8 { + return nil + } + + lastOffsetTsNs = int64(util.BytesToUint64(resp.Value)) + + return nil + }) + + return + +} + +func setOffset(grpcDialOption grpc.DialOption, filer string, signaturePrefix string, signature int32, offsetTsNs int64) error { + return pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + + syncKey := []byte(signaturePrefix + "____") + util.Uint32toBytes(syncKey[len(signaturePrefix):len(signaturePrefix)+4], uint32(signature)) + + valueBuf := make([]byte, 8) + util.Uint64toBytes(valueBuf, uint64(offsetTsNs)) + + resp, err := client.KvPut(context.Background(), &filer_pb.KvPutRequest{ + Key: syncKey, + Value: valueBuf, + }) + if err != nil { + return err + } + + if len(resp.Error) != 0 { + return errors.New(resp.Error) + } + + return nil + + }) + +} + +func genProcessFunction(sourcePath string, targetPath string, dataSink sink.ReplicationSink, debug bool) func(resp *filer_pb.SubscribeMetadataResponse) error { + // process function + processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { + message := resp.EventNotification + + var sourceOldKey, sourceNewKey util.FullPath + if message.OldEntry != nil { + sourceOldKey = util.FullPath(resp.Directory).Child(message.OldEntry.Name) + } + if message.NewEntry != nil { + sourceNewKey = util.FullPath(message.NewParentPath).Child(message.NewEntry.Name) + } + + if debug { + glog.V(0).Infof("received %v", resp) + } + + if !strings.HasPrefix(resp.Directory, sourcePath) { + return nil + } + + // handle deletions + if message.OldEntry != nil && message.NewEntry == nil { + if !strings.HasPrefix(string(sourceOldKey), sourcePath) { + return nil + } + key := buildKey(dataSink, message, targetPath, sourceOldKey, sourcePath) + return dataSink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) + } + + // handle new entries + if message.OldEntry == nil && message.NewEntry != nil { + if !strings.HasPrefix(string(sourceNewKey), sourcePath) { + return nil + } + key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) + return dataSink.CreateEntry(key, message.NewEntry, message.Signatures) + } + + // this is something special? + if message.OldEntry == nil && message.NewEntry == nil { + return nil + } + + // handle updates + if strings.HasPrefix(string(sourceOldKey), sourcePath) { + // old key is in the watched directory + if strings.HasPrefix(string(sourceNewKey), sourcePath) { + // new key is also in the watched directory + if !dataSink.IsIncremental() { + oldKey := util.Join(targetPath, string(sourceOldKey)[len(sourcePath):]) + message.NewParentPath = util.Join(targetPath, message.NewParentPath[len(sourcePath):]) + foundExisting, err := dataSink.UpdateEntry(string(oldKey), message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures) + if foundExisting { + return err + } + + // not able to find old entry + if err = dataSink.DeleteEntry(string(oldKey), message.OldEntry.IsDirectory, false, message.Signatures); err != nil { + return fmt.Errorf("delete old entry %v: %v", oldKey, err) + } + } + // create the new entry + newKey := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) + return dataSink.CreateEntry(newKey, message.NewEntry, message.Signatures) + + } else { + // new key is outside of the watched directory + if !dataSink.IsIncremental() { + key := buildKey(dataSink, message, targetPath, sourceOldKey, sourcePath) + return dataSink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) + } + } + } else { + // old key is outside of the watched directory + if strings.HasPrefix(string(sourceNewKey), sourcePath) { + // new key is in the watched directory + key := buildKey(dataSink, message, targetPath, sourceNewKey, sourcePath) + return dataSink.CreateEntry(key, message.NewEntry, message.Signatures) + } else { + // new key is also outside of the watched directory + // skip + } + } + + return nil + } + return processEventFn +} + +func buildKey(dataSink sink.ReplicationSink, message *filer_pb.EventNotification, targetPath string, sourceKey util.FullPath, sourcePath string) string { + if !dataSink.IsIncremental() { + return util.Join(targetPath, string(sourceKey)[len(sourcePath):]) + } + var mTime int64 + if message.NewEntry != nil { + mTime = message.NewEntry.Attributes.Mtime + } else if message.OldEntry != nil { + mTime = message.OldEntry.Attributes.Mtime + } + dateKey := time.Unix(mTime, 0).Format("2006-01-02") + return util.Join(targetPath, dateKey, string(sourceKey)[len(sourcePath):]) +} diff --git a/weed/command/fix.go b/weed/command/fix.go index 76bc19f7e..ae9a051b8 100644 --- a/weed/command/fix.go +++ b/weed/command/fix.go @@ -11,6 +11,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/storage/needle_map" "github.com/chrislusf/seaweedfs/weed/storage/super_block" "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/util" ) func init() { @@ -46,8 +47,8 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool { } func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error { - glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped()) - if n.Size > 0 && n.Size != types.TombstoneFileSize { + glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed()) + if n.Size.IsValid() { pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size) glog.V(2).Infof("saved %d with error %v", n.Size, pe) } else { @@ -67,23 +68,23 @@ func runFix(cmd *Command, args []string) bool { if *fixVolumeCollection != "" { baseFileName = *fixVolumeCollection + "_" + baseFileName } - indexFileName := path.Join(*fixVolumePath, baseFileName+".idx") - indexFile, err := os.OpenFile(indexFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - glog.Fatalf("Create Volume Index [ERROR] %s\n", err) - } - defer indexFile.Close() + indexFileName := path.Join(util.ResolvePath(*fixVolumePath), baseFileName+".idx") nm := needle_map.NewMemDb() + defer nm.Close() vid := needle.VolumeId(*fixVolumeId) scanner := &VolumeFileScanner4Fix{ nm: nm, } - err = storage.ScanVolumeFile(*fixVolumePath, *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner) - if err != nil { - glog.Fatalf("Export Volume File [ERROR] %s\n", err) + if err := storage.ScanVolumeFile(util.ResolvePath(*fixVolumePath), *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil { + glog.Fatalf("scan .dat File: %v", err) + os.Remove(indexFileName) + } + + if err := nm.SaveToIdx(indexFileName); err != nil { + glog.Fatalf("save to .idx File: %v", err) os.Remove(indexFileName) } diff --git a/weed/command/gateway.go b/weed/command/gateway.go new file mode 100644 index 000000000..8a6f852a5 --- /dev/null +++ b/weed/command/gateway.go @@ -0,0 +1,93 @@ +package command + +import ( + "net/http" + "strconv" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + gatewayOptions GatewayOptions +) + +type GatewayOptions struct { + masters *string + filers *string + bindIp *string + port *int + maxMB *int +} + +func init() { + cmdGateway.Run = runGateway // break init cycle + gatewayOptions.masters = cmdGateway.Flag.String("master", "localhost:9333", "comma-separated master servers") + gatewayOptions.filers = cmdGateway.Flag.String("filer", "localhost:8888", "comma-separated filer servers") + gatewayOptions.bindIp = cmdGateway.Flag.String("ip.bind", "localhost", "ip address to bind to") + gatewayOptions.port = cmdGateway.Flag.Int("port", 5647, "gateway http listen port") + gatewayOptions.maxMB = cmdGateway.Flag.Int("maxMB", 4, "split files larger than the limit") +} + +var cmdGateway = &Command{ + UsageLine: "gateway -port=8888 -master=[,]* -filer=[,]*", + Short: "start a gateway server that points to a list of master servers or a list of filers", + Long: `start a gateway server which accepts REST operation to write any blobs, files, or topic messages. + + POST /blobs/ + upload the blob and return a chunk id + DELETE /blobs/ + delete a chunk id + + /* + POST /files/path/to/a/file + save /path/to/a/file on filer + DELETE /files/path/to/a/file + delete /path/to/a/file on filer + + POST /topics/topicName + save on filer to /topics/topicName//ts.json + */ +`, +} + +func runGateway(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + gatewayOptions.startGateway() + + return true +} + +func (gw *GatewayOptions) startGateway() { + + defaultMux := http.NewServeMux() + + _, gws_err := weed_server.NewGatewayServer(defaultMux, &weed_server.GatewayOption{ + Masters: strings.Split(*gw.masters, ","), + Filers: strings.Split(*gw.filers, ","), + MaxMB: *gw.maxMB, + }) + if gws_err != nil { + glog.Fatalf("Gateway startup error: %v", gws_err) + } + + glog.V(0).Infof("Start Seaweed Gateway %s at %s:%d", util.Version(), *gw.bindIp, *gw.port) + gatewayListener, e := util.NewListener( + *gw.bindIp+":"+strconv.Itoa(*gw.port), + time.Duration(10)*time.Second, + ) + if e != nil { + glog.Fatalf("Filer listener error: %v", e) + } + + httpS := &http.Server{Handler: defaultMux} + if err := httpS.Serve(gatewayListener); err != nil { + glog.Fatalf("Gateway Fail to serve: %v", e) + } + +} diff --git a/weed/command/iam.go b/weed/command/iam.go new file mode 100644 index 000000000..17d0832cb --- /dev/null +++ b/weed/command/iam.go @@ -0,0 +1,97 @@ +package command + +import ( + "context" + "fmt" + "net/http" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/iamapi" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/gorilla/mux" + "time" +) + +var ( + iamStandaloneOptions IamOptions +) + +type IamOptions struct { + filer *string + masters *string + port *int +} + +func init() { + cmdIam.Run = runIam // break init cycle + iamStandaloneOptions.filer = cmdIam.Flag.String("filer", "localhost:8888", "filer server address") + iamStandaloneOptions.masters = cmdIam.Flag.String("master", "localhost:9333", "comma-separated master servers") + iamStandaloneOptions.port = cmdIam.Flag.Int("port", 8111, "iam server http listen port") +} + +var cmdIam = &Command{ + UsageLine: "iam [-port=8111] [-filer=] [-masters=,]", + Short: "start a iam API compatible server", + Long: "start a iam API compatible server.", +} + +func runIam(cmd *Command, args []string) bool { + return iamStandaloneOptions.startIamServer() +} + +func (iamopt *IamOptions) startIamServer() bool { + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*iamopt.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + glog.V(0).Infof("IAM read filer configuration: %s", resp) + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress) + break + } + } + + router := mux.NewRouter().SkipClean(true) + _, iamApiServer_err := iamapi.NewIamApiServer(router, &iamapi.IamServerOption{ + Filer: *iamopt.filer, + Port: *iamopt.port, + FilerGrpcAddress: filerGrpcAddress, + GrpcDialOption: grpcDialOption, + }) + glog.V(0).Info("NewIamApiServer created") + if iamApiServer_err != nil { + glog.Fatalf("IAM API Server startup error: %v", iamApiServer_err) + } + + httpS := &http.Server{Handler: router} + + listenAddress := fmt.Sprintf(":%d", *iamopt.port) + iamApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second) + if err != nil { + glog.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err) + } + + glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port) + if err = httpS.Serve(iamApiListener); err != nil { + glog.Fatalf("IAM API Server Fail to serve: %v", err) + } + + return true +} diff --git a/weed/command/master.go b/weed/command/master.go index 8d0a3289c..0f5e2156d 100644 --- a/weed/command/master.go +++ b/weed/command/master.go @@ -1,22 +1,25 @@ package command import ( + "github.com/chrislusf/raft/protobuf" + "github.com/gorilla/mux" + "google.golang.org/grpc/reflection" "net/http" "os" - "runtime" + "sort" "strconv" "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/util/grace" - "github.com/chrislusf/raft/protobuf" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/storage/backend" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" - "github.com/spf13/viper" - "google.golang.org/grpc/reflection" ) var ( @@ -24,38 +27,40 @@ var ( ) type MasterOptions struct { - port *int - ip *string - ipBind *string - metaFolder *string - peers *string - volumeSizeLimitMB *uint - volumePreallocate *bool - pulseSeconds *int + port *int + ip *string + ipBind *string + metaFolder *string + peers *string + volumeSizeLimitMB *uint + volumePreallocate *bool + // pulseSeconds *int defaultReplication *string garbageThreshold *float64 whiteList *string disableHttp *bool metricsAddress *string metricsIntervalSec *int + raftResumeState *bool } func init() { cmdMaster.Run = runMaster // break init cycle m.port = cmdMaster.Flag.Int("port", 9333, "http listen port") - m.ip = cmdMaster.Flag.String("ip", "localhost", "master | address") - m.ipBind = cmdMaster.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + m.ip = cmdMaster.Flag.String("ip", util.DetectedHostAddress(), "master | address, also used as identifier") + m.ipBind = cmdMaster.Flag.String("ip.bind", "", "ip address to bind to") m.metaFolder = cmdMaster.Flag.String("mdir", os.TempDir(), "data directory to store meta data") - m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094") + m.peers = cmdMaster.Flag.String("peers", "", "all master nodes in comma separated ip:port list, example: 127.0.0.1:9093,127.0.0.1:9094,127.0.0.1:9095") m.volumeSizeLimitMB = cmdMaster.Flag.Uint("volumeSizeLimitMB", 30*1000, "Master stops directing writes to oversized volumes.") m.volumePreallocate = cmdMaster.Flag.Bool("volumePreallocate", false, "Preallocate disk space for volumes.") - m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") + // m.pulseSeconds = cmdMaster.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") m.defaultReplication = cmdMaster.Flag.String("defaultReplication", "000", "Default replication type if not specified.") m.garbageThreshold = cmdMaster.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") m.whiteList = cmdMaster.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") m.disableHttp = cmdMaster.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") - m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address") + m.metricsAddress = cmdMaster.Flag.String("metrics.address", "", "Prometheus gateway address :") m.metricsIntervalSec = cmdMaster.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") + m.raftResumeState = cmdMaster.Flag.Bool("resumeState", false, "resume previous state on start master server") } var cmdMaster = &Command{ @@ -63,7 +68,7 @@ var cmdMaster = &Command{ Short: "start a master server", Long: `start a master server to provide volume=>location mapping service and sequence number of file ids - The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", or "/etc/seaweedfs/", in that order. + The configuration file "security.toml" is read from ".", "$HOME/.seaweedfs/", "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order. The example security.toml configuration file can be generated by "weed scaffold -config=security" @@ -80,10 +85,13 @@ func runMaster(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("master", false) - runtime.GOMAXPROCS(runtime.NumCPU()) - util.SetupProfiling(*masterCpuProfile, *masterMemProfile) + grace.SetupProfiling(*masterCpuProfile, *masterMemProfile) - if err := util.TestFolderWritable(*m.metaFolder); err != nil { + parent, _ := util.FullPath(*m.metaFolder).DirAndName() + if util.FileExists(string(parent)) && !util.FileExists(*m.metaFolder) { + os.MkdirAll(*m.metaFolder, 0755) + } + if err := util.TestFolderWritable(util.ResolvePath(*m.metaFolder)); err != nil { glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err) } @@ -102,23 +110,23 @@ func runMaster(cmd *Command, args []string) bool { func startMaster(masterOption MasterOptions, masterWhiteList []string) { - backend.LoadConfiguration(viper.GetViper()) + backend.LoadConfiguration(util.GetViper()) myMasterAddress, peers := checkPeers(*masterOption.ip, *masterOption.port, *masterOption.peers) r := mux.NewRouter() ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers) listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port) - glog.V(0).Infof("Start Seaweed Master %s at %s", util.VERSION, listeningAddress) + glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress) masterListener, e := util.NewListener(listeningAddress, 0) if e != nil { glog.Fatalf("Master startup error: %v", e) } // start raftServer - raftServer := weed_server.NewRaftServer(security.LoadClientTLS(viper.Sub("grpc"), "master"), - peers, myMasterAddress, *masterOption.metaFolder, ms.Topo, *masterOption.pulseSeconds) + raftServer, err := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"), + peers, myMasterAddress, util.ResolvePath(*masterOption.metaFolder), ms.Topo, *masterOption.raftResumeState) if raftServer == nil { - glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717", *masterOption.metaFolder) + glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err) } ms.SetRaftServer(raftServer) r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET") @@ -128,14 +136,22 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { if err != nil { glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) } - // Create your protocol servers. - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master")) master_pb.RegisterSeaweedServer(grpcS, ms) protobuf.RegisterRaftServer(grpcS, raftServer) reflection.Register(grpcS) - glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.VERSION, *masterOption.ipBind, grpcPort) + glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort) go grpcS.Serve(grpcL) + go func() { + time.Sleep(1500 * time.Millisecond) + if ms.Topo.RaftServer.Leader() == "" && ms.Topo.RaftServer.IsLogEmpty() && isTheFirstOne(myMasterAddress, peers) { + if ms.MasterClient.FindLeaderFromOtherPeers(myMasterAddress) == "" { + raftServer.DoJoinCommand() + } + } + }() + go ms.MasterClient.KeepConnectedToMaster() // start http server @@ -146,6 +162,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) { } func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) { + glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers) masterAddress = masterIp + ":" + strconv.Itoa(masterPort) if peers != "" { cleanedPeers = strings.Split(peers, ",") @@ -168,13 +185,22 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st return } +func isTheFirstOne(self string, peers []string) bool { + sort.Strings(peers) + if len(peers) <= 0 { + return true + } + return self == peers[0] +} + func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOption { return &weed_server.MasterOption{ - Port: *m.port, - MetaFolder: *m.metaFolder, - VolumeSizeLimitMB: *m.volumeSizeLimitMB, - VolumePreallocate: *m.volumePreallocate, - PulseSeconds: *m.pulseSeconds, + Host: *m.ip, + Port: *m.port, + MetaFolder: *m.metaFolder, + VolumeSizeLimitMB: *m.volumeSizeLimitMB, + VolumePreallocate: *m.volumePreallocate, + // PulseSeconds: *m.pulseSeconds, DefaultReplicaPlacement: *m.defaultReplication, GarbageThreshold: *m.garbageThreshold, WhiteList: whiteList, diff --git a/weed/command/mount.go b/weed/command/mount.go index f09b285f7..5811f0b99 100644 --- a/weed/command/mount.go +++ b/weed/command/mount.go @@ -1,29 +1,38 @@ package command import ( - "fmt" - "strconv" - "strings" + "os" + "time" ) type MountOptions struct { filer *string filerMountRootPath *string dir *string - dirListCacheLimit *int64 + dirAutoCreate *bool collection *string replication *string + diskType *string ttlSec *int chunkSizeLimitMB *int + concurrentWriters *int + cacheDir *string + cacheSizeMB *int64 dataCenter *string allowOthers *bool umaskString *string + nonempty *bool + volumeServerAccess *string + uidMap *string + gidMap *string + readOnly *bool } var ( - mountOptions MountOptions - mountCpuProfile *string - mountMemProfile *string + mountOptions MountOptions + mountCpuProfile *string + mountMemProfile *string + mountReadRetryTime *time.Duration ) func init() { @@ -31,16 +40,27 @@ func init() { mountOptions.filer = cmdMount.Flag.String("filer", "localhost:8888", "weed filer location") mountOptions.filerMountRootPath = cmdMount.Flag.String("filer.path", "/", "mount this remote path from filer server") mountOptions.dir = cmdMount.Flag.String("dir", ".", "mount weed filer to this directory") - mountOptions.dirListCacheLimit = cmdMount.Flag.Int64("dirListCacheLimit", 1000000, "limit cache size to speed up directory long format listing") + mountOptions.dirAutoCreate = cmdMount.Flag.Bool("dirAutoCreate", false, "auto create the directory to mount to") mountOptions.collection = cmdMount.Flag.String("collection", "", "collection to create the files") mountOptions.replication = cmdMount.Flag.String("replication", "", "replication(e.g. 000, 001) to create to files. If empty, let filer decide.") + mountOptions.diskType = cmdMount.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") mountOptions.ttlSec = cmdMount.Flag.Int("ttl", 0, "file ttl in seconds") - mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 4, "local write buffer size, also chunk large files") + mountOptions.chunkSizeLimitMB = cmdMount.Flag.Int("chunkSizeLimitMB", 2, "local write buffer size, also chunk large files") + mountOptions.concurrentWriters = cmdMount.Flag.Int("concurrentWriters", 32, "limit concurrent goroutine writers if not 0") + mountOptions.cacheDir = cmdMount.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks and meta data") + mountOptions.cacheSizeMB = cmdMount.Flag.Int64("cacheCapacityMB", 1000, "local file chunk cache capacity in MB (0 will disable cache)") mountOptions.dataCenter = cmdMount.Flag.String("dataCenter", "", "prefer to write to the data center") mountOptions.allowOthers = cmdMount.Flag.Bool("allowOthers", true, "allows other users to access the file system") mountOptions.umaskString = cmdMount.Flag.String("umask", "022", "octal umask, e.g., 022, 0111") + mountOptions.nonempty = cmdMount.Flag.Bool("nonempty", false, "allows the mounting over a non-empty directory") + mountOptions.volumeServerAccess = cmdMount.Flag.String("volumeServerAccess", "direct", "access volume servers by [direct|publicUrl|filerProxy]") + mountOptions.uidMap = cmdMount.Flag.String("map.uid", "", "map local uid to uid on filer, comma-separated :") + mountOptions.gidMap = cmdMount.Flag.String("map.gid", "", "map local gid to gid on filer, comma-separated :") + mountOptions.readOnly = cmdMount.Flag.Bool("readOnly", false, "read only") + mountCpuProfile = cmdMount.Flag.String("cpuprofile", "", "cpu profile output file") mountMemProfile = cmdMount.Flag.String("memprofile", "", "memory profile output file") + mountReadRetryTime = cmdMount.Flag.Duration("readRetryTime", 6*time.Second, "maximum read retry wait time") } var cmdMount = &Command{ @@ -60,19 +80,3 @@ var cmdMount = &Command{ `, } - -func parseFilerGrpcAddress(filer string) (filerGrpcAddress string, err error) { - hostnameAndPort := strings.Split(filer, ":") - if len(hostnameAndPort) != 2 { - return "", fmt.Errorf("filer should have hostname:port format: %v", hostnameAndPort) - } - - filerPort, parseErr := strconv.ParseUint(hostnameAndPort[1], 10, 64) - if parseErr != nil { - return "", fmt.Errorf("filer port parse error: %v", parseErr) - } - - filerGrpcPort := int(filerPort) + 10000 - - return fmt.Sprintf("%s:%d", hostnameAndPort[0], filerGrpcPort), nil -} diff --git a/weed/command/mount_linux.go b/weed/command/mount_linux.go index 80a5f9da4..25c4f72cf 100644 --- a/weed/command/mount_linux.go +++ b/weed/command/mount_linux.go @@ -138,9 +138,7 @@ func parseInfoFile(r io.Reader) ([]*Info, error) { } func osSpecificMountOptions() []fuse.MountOption { - return []fuse.MountOption{ - fuse.AllowNonEmptyMount(), - } + return []fuse.MountOption{} } func checkMountPointAvailable(dir string) bool { diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go index 453531d00..2474cf7dd 100644 --- a/weed/command/mount_std.go +++ b/weed/command/mount_std.go @@ -3,7 +3,9 @@ package command import ( + "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "os" "os/user" "path" @@ -12,20 +14,27 @@ import ( "strings" "time" - "github.com/jacobsa/daemonize" - "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" + + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" "github.com/chrislusf/seaweedfs/weed/filesys" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/seaweedfs/fuse" - "github.com/seaweedfs/fuse/fs" + "github.com/chrislusf/seaweedfs/weed/util/grace" ) func runMount(cmd *Command, args []string) bool { - util.SetupProfiling(*mountCpuProfile, *mountMemProfile) + grace.SetupProfiling(*mountCpuProfile, *mountMemProfile) + if *mountReadRetryTime < time.Second { + *mountReadRetryTime = time.Second + } + util.RetryWaitTime = *mountReadRetryTime umask, umaskErr := strconv.ParseUint(*mountOptions.umaskString, 8, 64) if umaskErr != nil { @@ -33,27 +42,52 @@ func runMount(cmd *Command, args []string) bool { return false } - return RunMount( - *mountOptions.filer, - *mountOptions.filerMountRootPath, - *mountOptions.dir, - *mountOptions.collection, - *mountOptions.replication, - *mountOptions.dataCenter, - *mountOptions.chunkSizeLimitMB, - *mountOptions.allowOthers, - *mountOptions.ttlSec, - *mountOptions.dirListCacheLimit, - os.FileMode(umask), - ) + if len(args) > 0 { + return false + } + + return RunMount(&mountOptions, os.FileMode(umask)) } -func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCenter string, chunkSizeLimitMB int, - allowOthers bool, ttlSec int, dirListCacheLimit int64, umask os.FileMode) bool { +func RunMount(option *MountOptions, umask os.FileMode) bool { + + filer := *option.filer + // parse filer grpc address + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(filer) + if err != nil { + glog.V(0).Infof("ParseFilerGrpcAddress: %v", err) + return true + } util.LoadConfiguration("security", false) + // try to connect to filer, filerBucketsPath may be useful later + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + var cipher bool + for i := 0; i < 10; i++ { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer grpc address %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("failed to talk to filer %s: %v", filerGrpcAddress, err) + glog.V(0).Infof("wait for %d seconds ...", i+1) + time.Sleep(time.Duration(i+1) * time.Second) + } + } + if err != nil { + glog.Errorf("failed to talk to filer %s: %v", filerGrpcAddress, err) + return true + } + + filerMountRootPath := *option.filerMountRootPath + dir := util.ResolvePath(*option.dir) + chunkSizeLimitMB := *mountOptions.chunkSizeLimitMB - fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) + fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) if dir == "" { fmt.Printf("Please specify the mount directory via \"-dir\"") return false @@ -65,15 +99,21 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente fuse.Unmount(dir) - uid, gid := uint32(0), uint32(0) - // detect mount folder mode - mountMode := os.ModeDir | 0755 + if *option.dirAutoCreate { + os.MkdirAll(dir, os.FileMode(0777)&^umask) + } fileInfo, err := os.Stat(dir) + + uid, gid := uint32(0), uint32(0) + mountMode := os.ModeDir | 0777 if err == nil { - mountMode = os.ModeDir | fileInfo.Mode() + mountMode = os.ModeDir | os.FileMode(0777)&^umask uid, gid = util.GetFileUidGid(fileInfo) - fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, fileInfo.Mode()) + fmt.Printf("mount point owner uid=%d gid=%d mode=%s\n", uid, gid, mountMode) + } else { + fmt.Printf("can not stat %s\n", dir) + return false } if uid == 0 { @@ -88,10 +128,17 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente } } + // mapping uid, gid + uidGidMapper, err := meta_cache.NewUidGidMapper(*option.uidMap, *option.gidMap) + if err != nil { + fmt.Printf("failed to parse %s %s: %v\n", *option.uidMap, *option.gidMap, err) + return false + } + // Ensure target mount point availability if isValid := checkMountPointAvailable(dir); !isValid { glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir) - return false + return true } mountName := path.Base(dir) @@ -100,10 +147,8 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente fuse.VolumeName(mountName), fuse.FSName(filer + ":" + filerMountRootPath), fuse.Subtype("seaweedfs"), - fuse.NoAppleDouble(), + // fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders fuse.NoAppleXattr(), - fuse.NoBrowse(), - fuse.AutoXattr(), fuse.ExclCreate(), fuse.DaemonTimeout("3600"), fuse.AllowSUID(), @@ -111,68 +156,77 @@ func RunMount(filer, filerMountRootPath, dir, collection, replication, dataCente fuse.MaxReadahead(1024 * 128), fuse.AsyncRead(), fuse.WritebackCache(), - fuse.AllowNonEmptyMount(), + fuse.MaxBackground(128), + fuse.CongestionThreshold(128), } options = append(options, osSpecificMountOptions()...) - - if allowOthers { + if *option.allowOthers { options = append(options, fuse.AllowOther()) } - - c, err := fuse.Mount(dir, options...) - if err != nil { - glog.V(0).Infof("mount: %v", err) - daemonize.SignalOutcome(err) - return true + if *option.nonempty { + options = append(options, fuse.AllowNonEmptyMount()) } - - util.OnInterrupt(func() { - fuse.Unmount(dir) - c.Close() - }) - - filerGrpcAddress, err := parseFilerGrpcAddress(filer) - if err != nil { - glog.V(0).Infof("parseFilerGrpcAddress: %v", err) - daemonize.SignalOutcome(err) - return true + if *option.readOnly { + options = append(options, fuse.ReadOnly()) } + // find mount point mountRoot := filerMountRootPath if mountRoot != "/" && strings.HasSuffix(mountRoot, "/") { mountRoot = mountRoot[0 : len(mountRoot)-1] } - daemonize.SignalOutcome(nil) + diskType := types.ToDiskType(*option.diskType) - err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ + seaweedFileSystem := filesys.NewSeaweedFileSystem(&filesys.Option{ + MountDirectory: dir, + FilerAddress: filer, FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: grpcDialOption, FilerMountRootPath: mountRoot, - Collection: collection, - Replication: replication, - TtlSec: int32(ttlSec), + Collection: *option.collection, + Replication: *option.replication, + TtlSec: int32(*option.ttlSec), + DiskType: diskType, ChunkSizeLimit: int64(chunkSizeLimitMB) * 1024 * 1024, - DataCenter: dataCenter, - DirListCacheLimit: dirListCacheLimit, - EntryCacheTtl: 3 * time.Second, + ConcurrentWriters: *option.concurrentWriters, + CacheDir: *option.cacheDir, + CacheSizeMB: *option.cacheSizeMB, + DataCenter: *option.dataCenter, MountUid: uid, MountGid: gid, MountMode: mountMode, MountCtime: fileInfo.ModTime(), MountMtime: time.Now(), Umask: umask, - })) + VolumeServerAccess: *mountOptions.volumeServerAccess, + Cipher: cipher, + UidGidMapper: uidGidMapper, + }) + + // mount + c, err := fuse.Mount(dir, options...) if err != nil { - fuse.Unmount(dir) + glog.V(0).Infof("mount: %v", err) + return true } + defer fuse.Unmount(dir) + + grace.OnInterrupt(func() { + fuse.Unmount(dir) + c.Close() + }) + + glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir) + server := fs.New(c, nil) + seaweedFileSystem.Server = server + err = server.Serve(seaweedFileSystem) // check if the mount process has an error to report <-c.Ready if err := c.MountError; err != nil { glog.V(0).Infof("mount process: %v", err) - daemonize.SignalOutcome(err) return true } diff --git a/weed/command/msg_broker.go b/weed/command/msg_broker.go new file mode 100644 index 000000000..db0b4148d --- /dev/null +++ b/weed/command/msg_broker.go @@ -0,0 +1,114 @@ +package command + +import ( + "context" + "fmt" + "strconv" + "time" + + "google.golang.org/grpc/reflection" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + messageBrokerStandaloneOptions MessageBrokerOptions +) + +type MessageBrokerOptions struct { + filer *string + ip *string + port *int + cpuprofile *string + memprofile *string +} + +func init() { + cmdMsgBroker.Run = runMsgBroker // break init cycle + messageBrokerStandaloneOptions.filer = cmdMsgBroker.Flag.String("filer", "localhost:8888", "filer server address") + messageBrokerStandaloneOptions.ip = cmdMsgBroker.Flag.String("ip", util.DetectedHostAddress(), "broker host address") + messageBrokerStandaloneOptions.port = cmdMsgBroker.Flag.Int("port", 17777, "broker gRPC listen port") + messageBrokerStandaloneOptions.cpuprofile = cmdMsgBroker.Flag.String("cpuprofile", "", "cpu profile output file") + messageBrokerStandaloneOptions.memprofile = cmdMsgBroker.Flag.String("memprofile", "", "memory profile output file") +} + +var cmdMsgBroker = &Command{ + UsageLine: "msgBroker [-port=17777] [-filer=]", + Short: "start a message queue broker", + Long: `start a message queue broker + + The broker can accept gRPC calls to write or read messages. The messages are stored via filer. + The brokers are stateless. To scale up, just add more brokers. + +`, +} + +func runMsgBroker(cmd *Command, args []string) bool { + + util.LoadConfiguration("security", false) + + return messageBrokerStandaloneOptions.startQueueServer() + +} + +func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool { + + grace.SetupProfiling(*messageBrokerStandaloneOptions.cpuprofile, *messageBrokerStandaloneOptions.memprofile) + + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*msgBrokerOpt.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.msg_broker") + cipher := false + + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress) + break + } + } + + qs, err := broker.NewMessageBroker(&broker.MessageBrokerOption{ + Filers: []string{*msgBrokerOpt.filer}, + DefaultReplication: "", + MaxMB: 0, + Ip: *msgBrokerOpt.ip, + Port: *msgBrokerOpt.port, + Cipher: cipher, + }, grpcDialOption) + + // start grpc listener + grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0) + if err != nil { + glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err) + } + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker")) + messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs) + reflection.Register(grpcS) + grpcS.Serve(grpcL) + + return true + +} diff --git a/weed/command/s3.go b/weed/command/s3.go index e004bb066..c8292a7d5 100644 --- a/weed/command/s3.go +++ b/weed/command/s3.go @@ -1,18 +1,21 @@ package command import ( + "context" + "fmt" "net/http" "time" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "fmt" + "github.com/gorilla/mux" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/s3api" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gorilla/mux" ) var ( @@ -21,28 +24,104 @@ var ( type S3Options struct { filer *string - filerBucketsPath *string port *int + config *string domainName *string tlsPrivateKey *string tlsCertificate *string + metricsHttpPort *int + allowEmptyFolder *bool } func init() { cmdS3.Run = runS3 // break init cycle s3StandaloneOptions.filer = cmdS3.Flag.String("filer", "localhost:8888", "filer server address") - s3StandaloneOptions.filerBucketsPath = cmdS3.Flag.String("filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3StandaloneOptions.port = cmdS3.Flag.Int("port", 8333, "s3 server http listen port") - s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name, {bucket}.{domainName}") + s3StandaloneOptions.domainName = cmdS3.Flag.String("domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}") + s3StandaloneOptions.config = cmdS3.Flag.String("config", "", "path to the config file") s3StandaloneOptions.tlsPrivateKey = cmdS3.Flag.String("key.file", "", "path to the TLS private key file") s3StandaloneOptions.tlsCertificate = cmdS3.Flag.String("cert.file", "", "path to the TLS certificate file") + s3StandaloneOptions.metricsHttpPort = cmdS3.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") + s3StandaloneOptions.allowEmptyFolder = cmdS3.Flag.Bool("allowEmptyFolder", false, "allow empty folders") } var cmdS3 = &Command{ - UsageLine: "s3 -port=8333 -filer=", + UsageLine: "s3 [-port=8333] [-filer=] [-config=]", Short: "start a s3 API compatible server that is backed by a filer", Long: `start a s3 API compatible server that is backed by a filer. + By default, you can use any access key and secret key to access the S3 APIs. + To enable credential based access, create a config.json file similar to this: + +{ + "identities": [ + { + "name": "anonymous", + "actions": [ + "Read" + ] + }, + { + "name": "some_admin_user", + "credentials": [ + { + "accessKey": "some_access_key1", + "secretKey": "some_secret_key1" + } + ], + "actions": [ + "Admin", + "Read", + "List", + "Tagging", + "Write" + ] + }, + { + "name": "some_read_only_user", + "credentials": [ + { + "accessKey": "some_access_key2", + "secretKey": "some_secret_key2" + } + ], + "actions": [ + "Read" + ] + }, + { + "name": "some_normal_user", + "credentials": [ + { + "accessKey": "some_access_key3", + "secretKey": "some_secret_key3" + } + ], + "actions": [ + "Read", + "List", + "Tagging", + "Write" + ] + }, + { + "name": "user_limited_to_bucket1", + "credentials": [ + { + "accessKey": "some_access_key4", + "secretKey": "some_secret_key4" + } + ], + "actions": [ + "Read:bucket1", + "List:bucket1", + "Tagging:bucket1", + "Write:bucket1" + ] + } + ] +} + `, } @@ -50,26 +129,61 @@ func runS3(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) + go stats_collect.StartMetricsServer(*s3StandaloneOptions.metricsHttpPort) + return s3StandaloneOptions.startS3Server() } func (s3opt *S3Options) startS3Server() bool { - filerGrpcAddress, err := parseFilerGrpcAddress(*s3opt.filer) + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*s3opt.filer) if err != nil { glog.Fatal(err) return false } + filerBucketsPath := "/buckets" + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + // metrics read from the filer + var metricsAddress string + var metricsIntervalSec int + + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + filerBucketsPath = resp.DirBuckets + metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec) + glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath) + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress) + break + } + } + + go stats_collect.LoopPushingMetric("s3", stats_collect.SourceName(uint32(*s3opt.port)), metricsAddress, metricsIntervalSec) + router := mux.NewRouter().SkipClean(true) _, s3ApiServer_err := s3api.NewS3ApiServer(router, &s3api.S3ApiServerOption{ Filer: *s3opt.filer, + Port: *s3opt.port, FilerGrpcAddress: filerGrpcAddress, + Config: *s3opt.config, DomainName: *s3opt.domainName, - BucketsPath: *s3opt.filerBucketsPath, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + BucketsPath: filerBucketsPath, + GrpcDialOption: grpcDialOption, + AllowEmptyFolder: *s3opt.allowEmptyFolder, }) if s3ApiServer_err != nil { glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) @@ -84,12 +198,12 @@ func (s3opt *S3Options) startS3Server() bool { } if *s3opt.tlsPrivateKey != "" { - glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.VERSION, *s3opt.port) + glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port) if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } } else { - glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.VERSION, *s3opt.port) + glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port) if err = httpS.Serve(s3ApiListener); err != nil { glog.Fatalf("S3 API Server Fail to serve: %v", err) } diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go index a76466ed6..88dc94df1 100644 --- a/weed/command/scaffold.go +++ b/weed/command/scaffold.go @@ -14,6 +14,14 @@ var cmdScaffold = &Command{ Short: "generate basic configuration files", Long: `Generate filer.toml with all possible configurations for you to customize. + The options can also be overwritten by environment variables. + For example, the filer.toml mysql password can be overwritten by environment variable + export WEED_MYSQL_PASSWORD=some_password + Environment variable rules: + * Prefix the variable name with "WEED_" + * Upppercase the reset of variable name. + * Replace '.' with '_' + `, } @@ -36,6 +44,8 @@ func runScaffold(cmd *Command, args []string) bool { content = SECURITY_TOML_EXAMPLE case "master": content = MASTER_TOML_EXAMPLE + case "shell": + content = SHELL_TOML_EXAMPLE } if content == "" { println("need a valid -config option") @@ -59,21 +69,43 @@ const ( # $HOME/.seaweedfs/filer.toml # /etc/seaweedfs/filer.toml +#################################################### +# Customizable filer server options +#################################################### +[filer.options] +# with http DELETE, by default the filer would check whether a folder is empty. +# recursive_delete will delete all sub folders and files, similar to "rm -Rf" +recursive_delete = false +# directories under this folder will be automatically creating a separate bucket +buckets_folder = "/buckets" + +#################################################### +# The following are filer store options +#################################################### + [leveldb2] # local on disk, mostly for simple single-machine setup, fairly scalable # faster than previous leveldb, recommended. enabled = true -dir = "." # directory to store level db files +dir = "./filerldb2" # directory to store level db files -#################################################### -# multiple filers on shared storage, fairly scalable -#################################################### +[leveldb3] +# similar to leveldb2. +# each bucket has its own meta store. +enabled = false +dir = "./filerldb3" # directory to store level db files -[mysql] # or tidb +[rocksdb] +# local on disk, similar to leveldb +# since it is using a C wrapper, you need to install rocksdb and build it by yourself +enabled = false +dir = "./filerrdb" # directory to store rocksdb files + +[mysql] # or memsql, tidb # CREATE TABLE IF NOT EXISTS filemeta ( -# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', -# name VARCHAR(1000) COMMENT 'directory or file name', -# directory TEXT COMMENT 'full path to parent directory', +# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field', +# name VARCHAR(1000) BINARY COMMENT 'directory or file name', +# directory TEXT COMMENT 'full path to parent directory', # meta LONGBLOB, # PRIMARY KEY (dirhash, name) # ) DEFAULT CHARSET=utf8; @@ -86,9 +118,37 @@ password = "" database = "" # create or use an existing database connection_max_idle = 2 connection_max_open = 100 +connection_max_lifetime_seconds = 0 +interpolateParams = false +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" + +[mysql2] # or memsql, tidb +enabled = false +createTable = """ + CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` ( + dirhash BIGINT, + name VARCHAR(1000) BINARY, + directory TEXT, + meta LONGBLOB, + PRIMARY KEY (dirhash, name) + ) DEFAULT CHARSET=utf8; +""" +hostname = "localhost" +port = 3306 +username = "root" +password = "" +database = "" # create or use an existing database +connection_max_idle = 2 +connection_max_open = 100 +connection_max_lifetime_seconds = 0 interpolateParams = false +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)""" -[postgres] # or cockroachdb +[postgres] # or cockroachdb, YugabyteDB # CREATE TABLE IF NOT EXISTS filemeta ( # dirhash BIGINT, # name VARCHAR(65535), @@ -101,10 +161,40 @@ hostname = "localhost" port = 5432 username = "postgres" password = "" -database = "" # create or use an existing database +database = "postgres" # create or use an existing database +schema = "" +sslmode = "disable" +connection_max_idle = 100 +connection_max_open = 100 +connection_max_lifetime_seconds = 0 +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta""" + +[postgres2] +enabled = false +createTable = """ + CREATE TABLE IF NOT EXISTS "%s" ( + dirhash BIGINT, + name VARCHAR(65535), + directory VARCHAR(65535), + meta bytea, + PRIMARY KEY (dirhash, name) + ); +""" +hostname = "localhost" +port = 5432 +username = "postgres" +password = "" +database = "postgres" # create or use an existing database +schema = "" sslmode = "disable" connection_max_idle = 100 connection_max_open = 100 +connection_max_lifetime_seconds = 0 +# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax: +enableUpsert = true +upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta""" [cassandra] # CREATE TABLE filemeta ( @@ -118,14 +208,25 @@ keyspace="seaweedfs" hosts=[ "localhost:9042", ] +username="" +password="" +# This changes the data layout. Only add new directories. Removing/Updating will cause data loss. +superLargeDirectories = [] + +[hbase] +enabled = false +zkquorum = "" +table = "seaweedfs" -[redis] +[redis2] enabled = false address = "localhost:6379" password = "" database = 0 +# This changes the data layout. Only add new directories. Removing/Updating will cause data loss. +superLargeDirectories = [] -[redis_cluster] +[redis_cluster2] enabled = false addresses = [ "localhost:30001", @@ -136,20 +237,58 @@ addresses = [ "localhost:30006", ] password = "" -// allows reads from slave servers or the master, but all writes still go to the master -readOnly = true -// automatically use the closest Redis server for reads -routeByLatency = true +# allows reads from slave servers or the master, but all writes still go to the master +readOnly = false +# automatically use the closest Redis server for reads +routeByLatency = false +# This changes the data layout. Only add new directories. Removing/Updating will cause data loss. +superLargeDirectories = [] [etcd] enabled = false servers = "localhost:2379" timeout = "3s" -[tikv] +[mongodb] enabled = false -pdAddress = "192.168.199.113:2379" +uri = "mongodb://localhost:27017" +option_pool_size = 0 +database = "seaweedfs" +[elastic7] +enabled = false +servers = [ + "http://localhost1:9200", + "http://localhost2:9200", + "http://localhost3:9200", +] +username = "" +password = "" +sniff_enabled = false +healthcheck_enabled = false +# increase the value is recommend, be sure the value in Elastic is greater or equal here +index.max_result_window = 10000 + + + +########################## +########################## +# To add path-specific filer store: +# +# 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp +# 2. Add a location configuraiton. E.g., location = "/tmp/" +# 3. Copy and customize all other configurations. +# Make sure they are not the same if using the same store type! +# 4. Set enabled to true +# +# The following is just using redis as an example +########################## +[redis2.tmp] +enabled = false +location = "/tmp/" +address = "localhost:6379" +password = "" +database = 1 ` @@ -204,7 +343,8 @@ enabled = false # This URL will Dial the RabbitMQ server at the URL in the environment # variable RABBIT_SERVER_URL and open the exchange "myexchange". # The exchange must have already been created by some other means, like -# the RabbitMQ management plugin. +# the RabbitMQ management plugin. Сreate myexchange of type fanout and myqueue then +# create binding myexchange => myqueue topic_url = "rabbit://myexchange" sub_url = "rabbit://myqueue" ` @@ -225,6 +365,19 @@ grpcAddress = "localhost:18888" # i.e., all files with this "prefix" are sent to notification message queue. directory = "/buckets" +[sink.local] +enabled = false +directory = "/data" +# all replicated files are under modified time as yyyy-mm-dd directories +# so each date directory contains all new and updated files. +is_incremental = false + +[sink.local_incremental] +# all replicated files are under modified time as yyyy-mm-dd directories +# so each date directory contains all new and updated files. +enabled = false +directory = "/backup" + [sink.filer] enabled = false grpcAddress = "localhost:18888" @@ -235,6 +388,7 @@ directory = "/backup" replication = "" collection = "" ttlSec = 0 +is_incremental = false [sink.s3] # read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html @@ -245,6 +399,8 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil region = "us-east-2" bucket = "your_bucket_name" # an existing bucket directory = "/" # destination directory +endpoint = "" +is_incremental = false [sink.google_cloud_storage] # read credentials doc at https://cloud.google.com/docs/authentication/getting-started @@ -252,6 +408,7 @@ enabled = false google_application_credentials = "/path/to/x.json" # path to json credential file bucket = "your_bucket_seaweedfs" # an existing bucket directory = "/" # destination directory +is_incremental = false [sink.azure] # experimental, let me know if it works @@ -260,6 +417,7 @@ account_name = "" account_key = "" container = "mycontainer" # an existing container directory = "/" # destination directory +is_incremental = false [sink.backblaze] enabled = false @@ -267,6 +425,7 @@ b2_account_id = "" b2_master_application_key = "" bucket = "mybucket" # an existing bucket directory = "/" # destination directory +is_incremental = false ` @@ -293,18 +452,28 @@ expires_after_seconds = 10 # seconds # the host name is not checked, so the PERM files can be shared. [grpc] ca = "" +# Set wildcard domain for enable TLS authentication by common names +allowed_wildcard_domain = "" # .mycompany.com [grpc.volume] cert = "" key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names [grpc.master] cert = "" key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names [grpc.filer] cert = "" key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names + +[grpc.msg_broker] +cert = "" +key = "" +allowed_commonNames = "" # comma-separated SSL certificate common names # use this for any place needs a grpc client # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload" @@ -312,7 +481,6 @@ key = "" cert = "" key = "" - # volume server https options # Note: work in progress! # this does not work with other clients, e.g., "weed filer|mount" etc, yet. @@ -335,23 +503,29 @@ key = "" [master.maintenance] # periodically run these scripts are the same as running them from 'weed shell' scripts = """ + lock ec.encode -fullPercent=95 -quietFor=1h ec.rebuild -force ec.balance -force volume.balance -force + volume.fix.replication + unlock """ sleep_minutes = 17 # sleep minutes between each script execution [master.filer] -default_filer_url = "http://localhost:8888/" +default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands + [master.sequencer] -type = "memory" # Choose [memory|etcd] type for storing the file id sequence +type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence # when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence # example : http://127.0.0.1:2379,http://127.0.0.1:2389 sequencer_etcd_urls = "http://127.0.0.1:2379" +# configurations for tiered cloud storage +# old volumes are transparently moved to cloud for cost efficiency [storage.backend] [storage.backend.s3.default] enabled = false @@ -359,6 +533,41 @@ sequencer_etcd_urls = "http://127.0.0.1:2379" aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials). region = "us-east-2" bucket = "your_bucket_name" # an existing bucket + endpoint = "" + +# create this number of logical volumes if no more writable volumes +# count_x means how many copies of data. +# e.g.: +# 000 has only one copy, copy_1 +# 010 and 001 has two copies, copy_2 +# 011 has only 3 copies, copy_3 +[master.volume_growth] +copy_1 = 7 # create 1 x 7 = 7 actual volumes +copy_2 = 6 # create 2 x 6 = 12 actual volumes +copy_3 = 3 # create 3 x 3 = 9 actual volumes +copy_other = 1 # create n x 1 = n actual volumes + +# configuration flags for replication +[master.replication] +# any replication counts should be considered minimums. If you specify 010 and +# have 3 different racks, that's still considered writable. Writes will still +# try to replicate to all available volumes. You should only use this option +# if you are doing your own replication or periodic sync of volumes. +treat_replication_as_minimums = false + +` + SHELL_TOML_EXAMPLE = ` + +[cluster] +default = "c1" + +[cluster.c1] +master = "localhost:9333" # comma-separated master servers +filer = "localhost:8888" # filer host and port + +[cluster.c2] +master = "" +filer = "" ` ) diff --git a/weed/command/server.go b/weed/command/server.go index 87f404ed3..6eb3bf97c 100644 --- a/weed/command/server.go +++ b/weed/command/server.go @@ -2,26 +2,30 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/util/grace" "os" - "runtime" - "runtime/pprof" "strings" "time" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/util" ) type ServerOptions struct { cpuprofile *string + memprofile *string v VolumeServerOptions } var ( - serverOptions ServerOptions - masterOptions MasterOptions - filerOptions FilerOptions - s3Options S3Options + serverOptions ServerOptions + masterOptions MasterOptions + filerOptions FilerOptions + s3Options S3Options + webdavOptions WebDavOption + msgBrokerOptions MessageBrokerOptions ) func init() { @@ -29,7 +33,7 @@ func init() { } var cmdServer = &Command{ - UsageLine: "server -port=8080 -dir=/tmp -volume.max=5 -ip=server_name", + UsageLine: "server -dir=/tmp -volume.max=5 -ip=server_name", Short: "start a master server, a volume server, and optionally a filer and a S3 gateway", Long: `start both a volume server to provide storage spaces and a master server to provide volume=>location mapping service and sequence number of file ids @@ -45,24 +49,34 @@ var cmdServer = &Command{ } var ( - serverIp = cmdServer.Flag.String("ip", "localhost", "ip or server name") - serverBindIp = cmdServer.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + serverIp = cmdServer.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier") + serverBindIp = cmdServer.Flag.String("ip.bind", "", "ip address to bind to") serverTimeout = cmdServer.Flag.Int("idleTimeout", 30, "connection idle seconds") serverDataCenter = cmdServer.Flag.String("dataCenter", "", "current volume server's data center name") serverRack = cmdServer.Flag.String("rack", "", "current volume server's rack name") serverWhiteListOption = cmdServer.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") serverDisableHttp = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.") volumeDataFolders = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") - volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "7", "maximum numbers of volumes, count[,count]...") - pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") - isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") - isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway") + volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.") + volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.") + serverMetricsHttpPort = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") + + // pulseSeconds = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats") + isStartingMasterServer = cmdServer.Flag.Bool("master", true, "whether to start master server") + isStartingVolumeServer = cmdServer.Flag.Bool("volume", true, "whether to start volume server") + isStartingFiler = cmdServer.Flag.Bool("filer", false, "whether to start filer") + isStartingS3 = cmdServer.Flag.Bool("s3", false, "whether to start S3 gateway") + isStartingWebDav = cmdServer.Flag.Bool("webdav", false, "whether to start WebDAV gateway") + isStartingMsgBroker = cmdServer.Flag.Bool("msgBroker", false, "whether to start message broker") serverWhiteList []string + + False = false ) func init() { serverOptions.cpuprofile = cmdServer.Flag.String("cpuprofile", "", "cpu profile output file") + serverOptions.memprofile = cmdServer.Flag.String("memprofile", "", "memory profile output file") masterOptions.port = cmdServer.Flag.Int("master.port", 9333, "master server http listen port") masterOptions.metaFolder = cmdServer.Flag.String("master.dir", "", "data directory to store meta data, default to same as -dir specified") @@ -73,29 +87,52 @@ func init() { masterOptions.garbageThreshold = cmdServer.Flag.Float64("garbageThreshold", 0.3, "threshold to vacuum and reclaim spaces") masterOptions.metricsAddress = cmdServer.Flag.String("metrics.address", "", "Prometheus gateway address") masterOptions.metricsIntervalSec = cmdServer.Flag.Int("metrics.intervalSeconds", 15, "Prometheus push interval in seconds") + masterOptions.raftResumeState = cmdServer.Flag.Bool("resumeState", false, "resume previous state on start master server") filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection") filerOptions.port = cmdServer.Flag.Int("filer.port", 8888, "filer server http listen port") filerOptions.publicPort = cmdServer.Flag.Int("filer.port.public", 0, "filer server public http listen port") - filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "Default replication type if not specified during runtime.") - filerOptions.redirectOnRead = cmdServer.Flag.Bool("filer.redirectOnRead", false, "whether proxy or redirect to volume server during file GET request") + filerOptions.defaultReplicaPlacement = cmdServer.Flag.String("filer.defaultReplicaPlacement", "", "default replication type. If not specified, use master setting.") filerOptions.disableDirListing = cmdServer.Flag.Bool("filer.disableDirListing", false, "turn off directory listing") - filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 32, "split files larger than the limit") + filerOptions.maxMB = cmdServer.Flag.Int("filer.maxMB", 4, "split files larger than the limit") filerOptions.dirListingLimit = cmdServer.Flag.Int("filer.dirListLimit", 1000, "limit sub dir listing size") + filerOptions.cipher = cmdServer.Flag.Bool("filer.encryptVolumeData", false, "encrypt data on volume servers") + filerOptions.peers = cmdServer.Flag.String("filer.peers", "", "all filers sharing the same filer store in comma separated ip:port list") + filerOptions.saveToFilerLimit = cmdServer.Flag.Int("filer.saveToFilerLimit", 0, "Small files smaller than this limit can be cached in filer store.") + filerOptions.concurrentUploadLimitMB = cmdServer.Flag.Int("filer.concurrentUploadLimitMB", 64, "limit total concurrent upload size") serverOptions.v.port = cmdServer.Flag.Int("volume.port", 8080, "volume server http listen port") serverOptions.v.publicPort = cmdServer.Flag.Int("volume.port.public", 0, "volume server public port") serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") + serverOptions.v.diskType = cmdServer.Flag.String("volume.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second") + serverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int("volume.fileSizeLimitMB", 256, "limit file size to avoid out of memory") + serverOptions.v.concurrentUploadLimitMB = cmdServer.Flag.Int("volume.concurrentUploadLimitMB", 64, "limit total concurrent upload size") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") + serverOptions.v.preStopSeconds = cmdServer.Flag.Int("volume.preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server") + serverOptions.v.pprof = cmdServer.Flag.Bool("volume.pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") + serverOptions.v.idxFolder = cmdServer.Flag.String("volume.dir.idx", "", "directory to store .idx files") + serverOptions.v.enableTcp = cmdServer.Flag.Bool("volume.tcp", false, " enable tcp port") - s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3Options.port = cmdServer.Flag.Int("s3.port", 8333, "s3 server http listen port") - s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name, {bucket}.{domainName}") + s3Options.domainName = cmdServer.Flag.String("s3.domainName", "", "suffix of the host name in comma separated list, {bucket}.{domainName}") s3Options.tlsPrivateKey = cmdServer.Flag.String("s3.key.file", "", "path to the TLS private key file") s3Options.tlsCertificate = cmdServer.Flag.String("s3.cert.file", "", "path to the TLS certificate file") + s3Options.config = cmdServer.Flag.String("s3.config", "", "path to the config file") + s3Options.allowEmptyFolder = cmdServer.Flag.Bool("s3.allowEmptyFolder", false, "allow empty folders") + + webdavOptions.port = cmdServer.Flag.Int("webdav.port", 7333, "webdav server http listen port") + webdavOptions.collection = cmdServer.Flag.String("webdav.collection", "", "collection to create the files") + webdavOptions.replication = cmdServer.Flag.String("webdav.replication", "", "replication to create the files") + webdavOptions.disk = cmdServer.Flag.String("webdav.disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") + webdavOptions.tlsPrivateKey = cmdServer.Flag.String("webdav.key.file", "", "path to the TLS private key file") + webdavOptions.tlsCertificate = cmdServer.Flag.String("webdav.cert.file", "", "path to the TLS certificate file") + webdavOptions.cacheDir = cmdServer.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks") + webdavOptions.cacheSizeMB = cmdServer.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB") + + msgBrokerOptions.port = cmdServer.Flag.Int("msgBroker.port", 17777, "broker gRPC listen port") } @@ -104,55 +141,54 @@ func runServer(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) util.LoadConfiguration("master", false) - if *serverOptions.cpuprofile != "" { - f, err := os.Create(*serverOptions.cpuprofile) - if err != nil { - glog.Fatal(err) - } - pprof.StartCPUProfile(f) - defer pprof.StopCPUProfile() - } + grace.SetupProfiling(*serverOptions.cpuprofile, *serverOptions.memprofile) - if *filerOptions.redirectOnRead { + if *isStartingS3 { *isStartingFiler = true } - - if *isStartingS3 { + if *isStartingWebDav { + *isStartingFiler = true + } + if *isStartingMsgBroker { *isStartingFiler = true } - _, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers) - peers := strings.Join(peerList, ",") - masterOptions.peers = &peers + if *isStartingMasterServer { + _, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers) + peers := strings.Join(peerList, ",") + masterOptions.peers = &peers + } + // ip address masterOptions.ip = serverIp masterOptions.ipBind = serverBindIp - filerOptions.masters = &peers - filerOptions.ip = serverBindIp + filerOptions.masters = masterOptions.peers + filerOptions.ip = serverIp + filerOptions.bindIp = serverBindIp serverOptions.v.ip = serverIp serverOptions.v.bindIp = serverBindIp - serverOptions.v.masters = &peers + serverOptions.v.masters = masterOptions.peers serverOptions.v.idleConnectionTimeout = serverTimeout serverOptions.v.dataCenter = serverDataCenter serverOptions.v.rack = serverRack + msgBrokerOptions.ip = serverIp - serverOptions.v.pulseSeconds = pulseSeconds - masterOptions.pulseSeconds = pulseSeconds + // serverOptions.v.pulseSeconds = pulseSeconds + // masterOptions.pulseSeconds = pulseSeconds masterOptions.whiteList = serverWhiteListOption filerOptions.dataCenter = serverDataCenter + filerOptions.rack = serverRack filerOptions.disableHttp = serverDisableHttp masterOptions.disableHttp = serverDisableHttp filerAddress := fmt.Sprintf("%s:%d", *serverIp, *filerOptions.port) s3Options.filer = &filerAddress + webdavOptions.filer = &filerAddress + msgBrokerOptions.filer = &filerAddress - if *filerOptions.defaultReplicaPlacement == "" { - *filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication - } - - runtime.GOMAXPROCS(runtime.NumCPU()) + go stats_collect.StartMetricsServer(*serverMetricsHttpPort) folders := strings.Split(*volumeDataFolders, ",") @@ -163,7 +199,7 @@ func runServer(cmd *Command, args []string) bool { if *masterOptions.metaFolder == "" { *masterOptions.metaFolder = folders[0] } - if err := util.TestFolderWritable(*masterOptions.metaFolder); err != nil { + if err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil { glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err) } filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder @@ -190,12 +226,33 @@ func runServer(cmd *Command, args []string) bool { }() } + if *isStartingWebDav { + go func() { + time.Sleep(2 * time.Second) + + webdavOptions.startWebDav() + + }() + } + + if *isStartingMsgBroker { + go func() { + time.Sleep(2 * time.Second) + msgBrokerOptions.startQueueServer() + }() + } + // start volume server - { - go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption) + if *isStartingVolumeServer { + go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, *volumeMinFreeSpacePercent) + + } + + if *isStartingMasterServer { + go startMaster(masterOptions, serverWhiteList) } - startMaster(masterOptions, serverWhiteList) + select {} return true } diff --git a/weed/command/shell.go b/weed/command/shell.go index 34b5aef31..c9976e809 100644 --- a/weed/command/shell.go +++ b/weed/command/shell.go @@ -6,18 +6,19 @@ import ( "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/shell" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) var ( - shellOptions shell.ShellOptions - shellInitialFilerUrl *string + shellOptions shell.ShellOptions + shellInitialFiler *string + shellCluster *string ) func init() { cmdShell.Run = runShell // break init cycle - shellOptions.Masters = cmdShell.Flag.String("master", "localhost:9333", "comma-separated master servers") - shellInitialFilerUrl = cmdShell.Flag.String("filer.url", "http://localhost:8888/", "initial filer url") + shellOptions.Masters = cmdShell.Flag.String("master", "", "comma-separated master servers, e.g. localhost:9333") + shellInitialFiler = cmdShell.Flag.String("filer", "", "filer host and port, e.g. localhost:8888") + shellCluster = cmdShell.Flag.String("cluster", "", "cluster defined in shell.toml") } var cmdShell = &Command{ @@ -25,20 +26,40 @@ var cmdShell = &Command{ Short: "run interactive administrative commands", Long: `run interactive administrative commands. + Generate shell.toml via "weed scaffold -config=shell" + `, } func runShell(command *Command, args []string) bool { util.LoadConfiguration("security", false) - shellOptions.GrpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + shellOptions.GrpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + + if *shellOptions.Masters == "" && *shellInitialFiler == "" { + util.LoadConfiguration("shell", false) + v := util.GetViper() + cluster := v.GetString("cluster.default") + if *shellCluster != "" { + cluster = *shellCluster + } + if cluster == "" { + *shellOptions.Masters, *shellInitialFiler = "localhost:9333", "localhost:8888" + } else { + *shellOptions.Masters = v.GetString("cluster." + cluster + ".master") + *shellInitialFiler = v.GetString("cluster." + cluster + ".filer") + } + } + + fmt.Printf("master: %s filer: %s\n", *shellOptions.Masters, *shellInitialFiler) - var filerPwdErr error - shellOptions.FilerHost, shellOptions.FilerPort, shellOptions.Directory, filerPwdErr = util.ParseFilerUrl(*shellInitialFilerUrl) - if filerPwdErr != nil { - fmt.Printf("failed to parse url filer.url=%s : %v\n", *shellInitialFilerUrl, filerPwdErr) + var err error + shellOptions.FilerHost, shellOptions.FilerPort, err = util.ParseHostPort(*shellInitialFiler) + if err != nil { + fmt.Printf("failed to parse filer %s: %v\n", *shellInitialFiler, err) return false } + shellOptions.Directory = "/" shell.RunShell(shellOptions) diff --git a/weed/command/upload.go b/weed/command/upload.go index 25e938d9b..0f9361b40 100644 --- a/weed/command/upload.go +++ b/weed/command/upload.go @@ -1,16 +1,18 @@ package command import ( + "context" "encoding/json" "fmt" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "google.golang.org/grpc" "os" "path/filepath" + "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" - - "github.com/chrislusf/seaweedfs/weed/operation" ) var ( @@ -18,14 +20,16 @@ var ( ) type UploadOptions struct { - master *string - dir *string - include *string - replication *string - collection *string - dataCenter *string - ttl *string - maxMB *int + master *string + dir *string + include *string + replication *string + collection *string + dataCenter *string + ttl *string + diskType *string + maxMB *int + usePublicUrl *bool } func init() { @@ -37,8 +41,10 @@ func init() { upload.replication = cmdUpload.Flag.String("replication", "", "replication type") upload.collection = cmdUpload.Flag.String("collection", "", "optional collection name") upload.dataCenter = cmdUpload.Flag.String("dataCenter", "", "optional data center name") + upload.diskType = cmdUpload.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") upload.ttl = cmdUpload.Flag.String("ttl", "", "time to live, e.g.: 1m, 1h, 1d, 1M, 1y") - upload.maxMB = cmdUpload.Flag.Int("maxMB", 32, "split files larger than the limit") + upload.maxMB = cmdUpload.Flag.Int("maxMB", 4, "split files larger than the limit") + upload.usePublicUrl = cmdUpload.Flag.Bool("usePublicUrl", false, "upload to public url from volume server") } var cmdUpload = &Command{ @@ -63,13 +69,22 @@ var cmdUpload = &Command{ func runUpload(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client") + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + defaultCollection, err := readMasterConfiguration(grpcDialOption, *upload.master) + if err != nil { + fmt.Printf("upload: %v", err) + return false + } + if *upload.replication == "" { + *upload.replication = defaultCollection + } if len(args) == 0 { if *upload.dir == "" { return false } - filepath.Walk(*upload.dir, func(path string, info os.FileInfo, err error) error { + filepath.Walk(util.ResolvePath(*upload.dir), func(path string, info os.FileInfo, err error) error { if err == nil { if !info.IsDir() { if *upload.include != "" { @@ -81,9 +96,7 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { return e } - results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts, - *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB) + results, e := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) if e != nil { @@ -100,11 +113,21 @@ func runUpload(cmd *Command, args []string) bool { if e != nil { fmt.Println(e.Error()) } - results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts, - *upload.replication, *upload.collection, *upload.dataCenter, - *upload.ttl, *upload.maxMB) + results, _ := operation.SubmitFiles(func() string { return *upload.master }, grpcDialOption, parts, *upload.replication, *upload.collection, *upload.dataCenter, *upload.ttl, *upload.diskType, *upload.maxMB, *upload.usePublicUrl) bytes, _ := json.Marshal(results) fmt.Println(string(bytes)) } return true } + +func readMasterConfiguration(grpcDialOption grpc.DialOption, masterAddress string) (replication string, err error) { + err = pb.WithMasterClient(masterAddress, grpcDialOption, func(client master_pb.SeaweedClient) error { + resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get master %s configuration: %v", masterAddress, err) + } + replication = resp.DefaultReplication + return nil + }) + return +} diff --git a/weed/command/version.go b/weed/command/version.go index 8fdd68ec8..9caf7dc4e 100644 --- a/weed/command/version.go +++ b/weed/command/version.go @@ -19,6 +19,6 @@ func runVersion(cmd *Command, args []string) bool { cmd.Usage() } - fmt.Printf("version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) + fmt.Printf("version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH) return true } diff --git a/weed/command/volume.go b/weed/command/volume.go index 3e8341ef8..9df500178 100644 --- a/weed/command/volume.go +++ b/weed/command/volume.go @@ -2,25 +2,32 @@ package command import ( "fmt" + "github.com/chrislusf/seaweedfs/weed/storage/types" "net/http" + httppprof "net/http/pprof" "os" - "runtime" "runtime/pprof" "strconv" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/util/httpdown" "github.com/spf13/viper" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util/httpdown" + + "google.golang.org/grpc/reflection" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/server" + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc/reflection" ) var ( @@ -28,45 +35,62 @@ var ( ) type VolumeServerOptions struct { - port *int - publicPort *int - folders []string - folderMaxLimits []int - ip *string - publicUrl *string - bindIp *string - masters *string - pulseSeconds *int - idleConnectionTimeout *int - dataCenter *string - rack *string - whiteList []string - indexType *string - fixJpgOrientation *bool - readRedirect *bool - cpuProfile *string - memProfile *string - compactionMBPerSecond *int + port *int + publicPort *int + folders []string + folderMaxLimits []int + idxFolder *string + ip *string + publicUrl *string + bindIp *string + masters *string + idleConnectionTimeout *int + dataCenter *string + rack *string + whiteList []string + indexType *string + diskType *string + fixJpgOrientation *bool + readRedirect *bool + cpuProfile *string + memProfile *string + compactionMBPerSecond *int + fileSizeLimitMB *int + concurrentUploadLimitMB *int + minFreeSpacePercents []float32 + pprof *bool + preStopSeconds *int + metricsHttpPort *int + // pulseSeconds *int + enableTcp *bool } func init() { cmdVolume.Run = runVolume // break init cycle v.port = cmdVolume.Flag.Int("port", 8080, "http listen port") v.publicPort = cmdVolume.Flag.Int("port.public", 0, "port opened to public") - v.ip = cmdVolume.Flag.String("ip", "", "ip or server name") + v.ip = cmdVolume.Flag.String("ip", util.DetectedHostAddress(), "ip or server name, also used as identifier") v.publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible address") - v.bindIp = cmdVolume.Flag.String("ip.bind", "0.0.0.0", "ip address to bind to") + v.bindIp = cmdVolume.Flag.String("ip.bind", "", "ip address to bind to") v.masters = cmdVolume.Flag.String("mserver", "localhost:9333", "comma-separated master servers") - v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting") + v.preStopSeconds = cmdVolume.Flag.Int("preStopSeconds", 10, "number of seconds between stop send heartbeats and stop volume server") + // v.pulseSeconds = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than or equal to the master's setting") v.idleConnectionTimeout = cmdVolume.Flag.Int("idleTimeout", 30, "connection idle seconds") v.dataCenter = cmdVolume.Flag.String("dataCenter", "", "current volume server's data center name") v.rack = cmdVolume.Flag.String("rack", "", "current volume server's rack name") v.indexType = cmdVolume.Flag.String("index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") + v.diskType = cmdVolume.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") v.fixJpgOrientation = cmdVolume.Flag.Bool("images.fix.orientation", false, "Adjust jpg orientation when uploading.") v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.") v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file") v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit background compaction or copying speed in mega bytes per second") + v.fileSizeLimitMB = cmdVolume.Flag.Int("fileSizeLimitMB", 256, "limit file size to avoid out of memory") + v.concurrentUploadLimitMB = cmdVolume.Flag.Int("concurrentUploadLimitMB", 128, "limit total concurrent upload size") + v.pprof = cmdVolume.Flag.Bool("pprof", false, "enable pprof http handlers. precludes --memprofile and --cpuprofile") + v.metricsHttpPort = cmdVolume.Flag.Int("metricsPort", 0, "Prometheus metrics listen port") + v.idxFolder = cmdVolume.Flag.String("dir.idx", "", "directory to store .idx files") + v.enableTcp = cmdVolume.Flag.Bool("tcp", false, " enable tcp port") } var cmdVolume = &Command{ @@ -79,26 +103,39 @@ var cmdVolume = &Command{ var ( volumeFolders = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...") - maxVolumeCounts = cmdVolume.Flag.String("max", "7", "maximum numbers of volumes, count[,count]...") + maxVolumeCounts = cmdVolume.Flag.String("max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.") volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.") + minFreeSpacePercent = cmdVolume.Flag.String("minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.") ) func runVolume(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - runtime.GOMAXPROCS(runtime.NumCPU()) - util.SetupProfiling(*v.cpuProfile, *v.memProfile) + // If --pprof is set we assume the caller wants to be able to collect + // cpu and memory profiles via go tool pprof + if !*v.pprof { + grace.SetupProfiling(*v.cpuProfile, *v.memProfile) + } + + go stats_collect.StartMetricsServer(*v.metricsHttpPort) - v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption) + v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, *minFreeSpacePercent) return true } -func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption string) { +func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption, minFreeSpacePercent string) { // Set multiple folders and each folder's max volume count limit' v.folders = strings.Split(volumeFolders, ",") + for _, folder := range v.folders { + if err := util.TestFolderWritable(util.ResolvePath(folder)); err != nil { + glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err) + } + } + + // set max maxCountStrings := strings.Split(maxVolumeCounts, ",") for _, maxString := range maxCountStrings { if max, e := strconv.Atoi(maxString); e == nil { @@ -107,14 +144,47 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v glog.Fatalf("The max specified in -max not a valid number %s", maxString) } } + if len(v.folderMaxLimits) == 1 && len(v.folders) > 1 { + for i := 0; i < len(v.folders)-1; i++ { + v.folderMaxLimits = append(v.folderMaxLimits, v.folderMaxLimits[0]) + } + } if len(v.folders) != len(v.folderMaxLimits) { glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits)) } - for _, folder := range v.folders { - if err := util.TestFolderWritable(folder); err != nil { - glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err) + + // set minFreeSpacePercent + minFreeSpacePercentStrings := strings.Split(minFreeSpacePercent, ",") + for _, freeString := range minFreeSpacePercentStrings { + if value, e := strconv.ParseFloat(freeString, 32); e == nil { + v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value)) + } else { + glog.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString) + } + } + if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 { + for i := 0; i < len(v.folders)-1; i++ { + v.minFreeSpacePercents = append(v.minFreeSpacePercents, v.minFreeSpacePercents[0]) + } + } + if len(v.folders) != len(v.minFreeSpacePercents) { + glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents)) + } + + // set disk types + var diskTypes []types.DiskType + diskTypeStrings := strings.Split(*v.diskType, ",") + for _, diskTypeString := range diskTypeStrings { + diskTypes = append(diskTypes, types.ToDiskType(diskTypeString)) + } + if len(diskTypes) == 1 && len(v.folders) > 1 { + for i := 0; i < len(v.folders)-1; i++ { + diskTypes = append(diskTypes, diskTypes[0]) } } + if len(v.folders) != len(diskTypes) { + glog.Fatalf("%d directories by -dir, but only %d disk types is set by -disk", len(v.folders), len(diskTypes)) + } // security related white list configuration if volumeWhiteListOption != "" { @@ -122,7 +192,8 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } if *v.ip == "" { - *v.ip = "127.0.0.1" + *v.ip = util.DetectedHostAddress() + glog.V(0).Infof("detected volume server ip address: %v", *v.ip) } if *v.publicPort == 0 { @@ -138,6 +209,14 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v publicVolumeMux = http.NewServeMux() } + if *v.pprof { + volumeMux.HandleFunc("/debug/pprof/", httppprof.Index) + volumeMux.HandleFunc("/debug/pprof/cmdline", httppprof.Cmdline) + volumeMux.HandleFunc("/debug/pprof/profile", httppprof.Profile) + volumeMux.HandleFunc("/debug/pprof/symbol", httppprof.Symbol) + volumeMux.HandleFunc("/debug/pprof/trace", httppprof.Trace) + } + volumeNeedleMapKind := storage.NeedleMapInMemory switch *v.indexType { case "leveldb": @@ -152,14 +231,16 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux, *v.ip, *v.port, *v.publicUrl, - v.folders, v.folderMaxLimits, + v.folders, v.folderMaxLimits, v.minFreeSpacePercents, diskTypes, + *v.idxFolder, volumeNeedleMapKind, - strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack, + strings.Split(masters, ","), 5, *v.dataCenter, *v.rack, v.whiteList, *v.fixJpgOrientation, *v.readRedirect, *v.compactionMBPerSecond, + *v.fileSizeLimitMB, + int64(*v.concurrentUploadLimitMB)*1024*1024, ) - // starting grpc server grpcS := v.startGrpcService(volumeServer) @@ -172,50 +253,56 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v } } + // starting tcp server + if *v.enableTcp { + go v.startTcpService(volumeServer) + } + // starting the cluster http server clusterHttpServer := v.startClusterHttpService(volumeMux) - stopChain := make(chan struct{}) - util.OnInterrupt(func() { + stopChan := make(chan bool) + grace.OnInterrupt(func() { fmt.Println("volume server has be killed") - var startTime time.Time - - // firstly, stop the public http service to prevent from receiving new user request - if nil != publicHttpDown { - startTime = time.Now() - if err := publicHttpDown.Stop(); err != nil { - glog.Warningf("stop the public http server failed, %v", err) - } - delta := time.Now().Sub(startTime).Nanoseconds() / 1e6 - glog.V(0).Infof("stop public http server, elapsed %dms", delta) - } - startTime = time.Now() - if err := clusterHttpServer.Stop(); err != nil { - glog.Warningf("stop the cluster http server failed, %v", err) + // Stop heartbeats + if !volumeServer.StopHeartbeat() { + glog.V(0).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds) + time.Sleep(time.Duration(*v.preStopSeconds) * time.Second) } - delta := time.Now().Sub(startTime).Nanoseconds() / 1e6 - glog.V(0).Infof("graceful stop cluster http server, elapsed [%d]", delta) - startTime = time.Now() - grpcS.GracefulStop() - delta = time.Now().Sub(startTime).Nanoseconds() / 1e6 - glog.V(0).Infof("graceful stop gRPC, elapsed [%d]", delta) + shutdown(publicHttpDown, clusterHttpServer, grpcS, volumeServer) + stopChan <- true + }) - startTime = time.Now() - volumeServer.Shutdown() - delta = time.Now().Sub(startTime).Nanoseconds() / 1e6 - glog.V(0).Infof("stop volume server, elapsed [%d]", delta) + select { + case <-stopChan: + } - pprof.StopCPUProfile() +} - close(stopChain) // notify exit - }) +func shutdown(publicHttpDown httpdown.Server, clusterHttpServer httpdown.Server, grpcS *grpc.Server, volumeServer *weed_server.VolumeServer) { - select { - case <-stopChain: + // firstly, stop the public http service to prevent from receiving new user request + if nil != publicHttpDown { + glog.V(0).Infof("stop public http server ... ") + if err := publicHttpDown.Stop(); err != nil { + glog.Warningf("stop the public http server failed, %v", err) + } + } + + glog.V(0).Infof("graceful stop cluster http server ... ") + if err := clusterHttpServer.Stop(); err != nil { + glog.Warningf("stop the cluster http server failed, %v", err) } - glog.Warningf("the volume server exit.") + + glog.V(0).Infof("graceful stop gRPC ...") + grpcS.GracefulStop() + + volumeServer.Shutdown() + + pprof.StopCPUProfile() + } // check whether configure the public port @@ -229,7 +316,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe if err != nil { glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) } - grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume")) + grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume")) volume_server_pb.RegisterVolumeServerServer(grpcS, vs) reflection.Register(grpcS) go func() { @@ -242,7 +329,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server { publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort) - glog.V(0).Infoln("Start Seaweed volume server", util.VERSION, "public at", publicListeningAddress) + glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress) publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) if e != nil { glog.Fatalf("Volume server listener error:%v", e) @@ -269,7 +356,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd } listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) - glog.V(0).Infof("Start Seaweed volume server %s at %s", util.VERSION, listeningAddress) + glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress) listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second) if e != nil { glog.Fatalf("Volume server listener error:%v", e) @@ -288,3 +375,22 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd }() return clusterHttpServer } + +func (v VolumeServerOptions) startTcpService(volumeServer *weed_server.VolumeServer) { + listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port+20000) + glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "tcp at", listeningAddress) + listener, e := util.NewListener(listeningAddress, 0) + if e != nil { + glog.Fatalf("Volume server listener error on %s:%v", listeningAddress, e) + } + defer listener.Close() + + for { + c, err := listener.Accept() + if err != nil { + fmt.Println(err) + return + } + go volumeServer.HandleTcpConnection(c) + } +} diff --git a/weed/command/webdav.go b/weed/command/webdav.go index 371c4a9ad..781ea1e36 100644 --- a/weed/command/webdav.go +++ b/weed/command/webdav.go @@ -1,17 +1,20 @@ package command import ( + "context" "fmt" "net/http" + "os" "os/user" "strconv" "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/server" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/spf13/viper" ) var ( @@ -22,8 +25,12 @@ type WebDavOption struct { filer *string port *int collection *string + replication *string + disk *string tlsPrivateKey *string tlsCertificate *string + cacheDir *string + cacheSizeMB *int64 } func init() { @@ -31,13 +38,17 @@ func init() { webDavStandaloneOptions.filer = cmdWebDav.Flag.String("filer", "localhost:8888", "filer server address") webDavStandaloneOptions.port = cmdWebDav.Flag.Int("port", 7333, "webdav server http listen port") webDavStandaloneOptions.collection = cmdWebDav.Flag.String("collection", "", "collection to create the files") + webDavStandaloneOptions.replication = cmdWebDav.Flag.String("replication", "", "replication to create the files") + webDavStandaloneOptions.disk = cmdWebDav.Flag.String("disk", "", "[hdd|ssd|] hard drive or solid state drive or any tag") webDavStandaloneOptions.tlsPrivateKey = cmdWebDav.Flag.String("key.file", "", "path to the TLS private key file") webDavStandaloneOptions.tlsCertificate = cmdWebDav.Flag.String("cert.file", "", "path to the TLS certificate file") + webDavStandaloneOptions.cacheDir = cmdWebDav.Flag.String("cacheDir", os.TempDir(), "local cache directory for file chunks") + webDavStandaloneOptions.cacheSizeMB = cmdWebDav.Flag.Int64("cacheCapacityMB", 1000, "local cache capacity in MB") } var cmdWebDav = &Command{ UsageLine: "webdav -port=7333 -filer=", - Short: " start a webdav server that is backed by a filer", + Short: "start a webdav server that is backed by a filer", Long: `start a webdav server that is backed by a filer. `, @@ -47,7 +58,7 @@ func runWebDav(cmd *Command, args []string) bool { util.LoadConfiguration("security", false) - glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.VERSION, *webDavStandaloneOptions.port) + glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port) return webDavStandaloneOptions.startWebDav() @@ -55,12 +66,6 @@ func runWebDav(cmd *Command, args []string) bool { func (wo *WebDavOption) startWebDav() bool { - filerGrpcAddress, err := parseFilerGrpcAddress(*wo.filer) - if err != nil { - glog.Fatal(err) - return false - } - // detect current user uid, gid := uint32(0), uint32(0) if u, err := user.Current(); err == nil { @@ -72,13 +77,47 @@ func (wo *WebDavOption) startWebDav() bool { } } + // parse filer grpc address + filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*wo.filer) + if err != nil { + glog.Fatal(err) + return false + } + + grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client") + + var cipher bool + // connect to filer + for { + err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err) + } + cipher = resp.Cipher + return nil + }) + if err != nil { + glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress) + time.Sleep(time.Second) + } else { + glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress) + break + } + } + ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ Filer: *wo.filer, FilerGrpcAddress: filerGrpcAddress, - GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"), + GrpcDialOption: grpcDialOption, Collection: *wo.collection, + Replication: *wo.replication, + DiskType: *wo.disk, Uid: uid, Gid: gid, + Cipher: cipher, + CacheDir: util.ResolvePath(*wo.cacheDir), + CacheSizeMB: *wo.cacheSizeMB, }) if webdavServer_err != nil { glog.Fatalf("WebDav Server startup error: %v", webdavServer_err) @@ -93,12 +132,12 @@ func (wo *WebDavOption) startWebDav() bool { } if *wo.tlsPrivateKey != "" { - glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.VERSION, *wo.port) + glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port) if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil { glog.Fatalf("WebDav Server Fail to serve: %v", err) } } else { - glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.VERSION, *wo.port) + glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port) if err = httpS.Serve(webDavListener); err != nil { glog.Fatalf("WebDav Server Fail to serve: %v", err) } diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go new file mode 100644 index 000000000..ab8f6bcbd --- /dev/null +++ b/weed/filer/abstract_sql/abstract_sql_store.go @@ -0,0 +1,364 @@ +package abstract_sql + +import ( + "context" + "database/sql" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "strings" + "sync" +) + +type SqlGenerator interface { + GetSqlInsert(tableName string) string + GetSqlUpdate(tableName string) string + GetSqlFind(tableName string) string + GetSqlDelete(tableName string) string + GetSqlDeleteFolderChildren(tableName string) string + GetSqlListExclusive(tableName string) string + GetSqlListInclusive(tableName string) string + GetSqlCreateTable(tableName string) string + GetSqlDropTable(tableName string) string +} + +type AbstractSqlStore struct { + SqlGenerator + DB *sql.DB + SupportBucketTable bool + dbs map[string]bool + dbsLock sync.Mutex +} + +func (store *AbstractSqlStore) OnBucketCreation(bucket string) { + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + store.CreateTable(context.Background(), bucket) + + if store.dbs == nil { + return + } + store.dbs[bucket] = true +} +func (store *AbstractSqlStore) OnBucketDeletion(bucket string) { + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + store.deleteTable(context.Background(), bucket) + + if store.dbs == nil { + return + } + delete(store.dbs, bucket) +} + +const ( + DEFAULT_TABLE = "filemeta" +) + +type TxOrDB interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) { + tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{ + Isolation: sql.LevelReadCommitted, + ReadOnly: false, + }) + if err != nil { + return ctx, err + } + + return context.WithValue(ctx, "tx", tx), nil +} +func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx.Commit() + } + return nil +} +func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error { + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + return tx.Rollback() + } + return nil +} + +func (store *AbstractSqlStore) getTxOrDB(ctx context.Context, fullpath util.FullPath, isForChildren bool) (txOrDB TxOrDB, bucket string, shortPath util.FullPath, err error) { + + shortPath = fullpath + bucket = DEFAULT_TABLE + + if tx, ok := ctx.Value("tx").(*sql.Tx); ok { + txOrDB = tx + } else { + txOrDB = store.DB + } + + if !store.SupportBucketTable { + return + } + + if !strings.HasPrefix(string(fullpath), "/buckets/") { + return + } + + // detect bucket + bucketAndObjectKey := string(fullpath)[len("/buckets/"):] + t := strings.Index(bucketAndObjectKey, "/") + if t < 0 && !isForChildren { + return + } + bucket = bucketAndObjectKey + shortPath = "/" + if t > 0 { + bucket = bucketAndObjectKey[:t] + shortPath = util.FullPath(bucketAndObjectKey[t:]) + } + + if isValidBucket(bucket) { + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + if store.dbs == nil { + store.dbs = make(map[string]bool) + } + + if _, found := store.dbs[bucket]; !found { + if err = store.CreateTable(ctx, bucket); err == nil { + store.dbs[bucket] = true + } + } + + } + + return +} + +func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false) + if err != nil { + return fmt.Errorf("findDB %s : %v", entry.FullPath, err) + } + + dir, name := shortPath.DirAndName() + meta, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encode %s: %s", entry.FullPath, err) + } + + if len(entry.Chunks) > 50 { + meta = util.MaybeGzipData(meta) + } + + res, err := db.ExecContext(ctx, store.GetSqlInsert(bucket), util.HashStringToLong(dir), name, dir, meta) + if err == nil { + return + } + + if !strings.Contains(strings.ToLower(err.Error()), "duplicate") { + // return fmt.Errorf("insert: %s", err) + // skip this since the error can be in a different language + } + + // now the insert failed possibly due to duplication constraints + glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err) + + res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir) + if err != nil { + return fmt.Errorf("upsert %s: %s", entry.FullPath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("upsert %s but no rows affected: %s", entry.FullPath, err) + } + return nil + +} + +func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, entry.FullPath, false) + if err != nil { + return fmt.Errorf("findDB %s : %v", entry.FullPath, err) + } + + dir, name := shortPath.DirAndName() + meta, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encode %s: %s", entry.FullPath, err) + } + + res, err := db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir) + if err != nil { + return fmt.Errorf("update %s: %s", entry.FullPath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("update %s but no rows affected: %s", entry.FullPath, err) + } + return nil +} + +func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath util.FullPath) (*filer.Entry, error) { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false) + if err != nil { + return nil, fmt.Errorf("findDB %s : %v", fullpath, err) + } + + dir, name := shortPath.DirAndName() + row := db.QueryRowContext(ctx, store.GetSqlFind(bucket), util.HashStringToLong(dir), name, dir) + + var data []byte + if err := row.Scan(&data); err != nil { + if err == sql.ErrNoRows { + return nil, filer_pb.ErrNotFound + } + return nil, fmt.Errorf("find %s: %v", fullpath, err) + } + + entry := &filer.Entry{ + FullPath: fullpath, + } + if err := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, false) + if err != nil { + return fmt.Errorf("findDB %s : %v", fullpath, err) + } + + dir, name := shortPath.DirAndName() + + res, err := db.ExecContext(ctx, store.GetSqlDelete(bucket), util.HashStringToLong(dir), name, dir) + if err != nil { + return fmt.Errorf("delete %s: %s", fullpath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("delete %s but no rows affected: %s", fullpath, err) + } + + return nil +} + +func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, fullpath, true) + if err != nil { + return fmt.Errorf("findDB %s : %v", fullpath, err) + } + + if isValidBucket(bucket) && shortPath == "/" { + if err = store.deleteTable(ctx, bucket); err == nil { + store.dbsLock.Lock() + delete(store.dbs, bucket) + store.dbsLock.Unlock() + return nil + } else { + return err + } + } + + glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath))) + + res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath)) + if err != nil { + return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err) + } + + return nil +} + +func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + db, bucket, shortPath, err := store.getTxOrDB(ctx, dirPath, true) + if err != nil { + return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err) + } + + sqlText := store.GetSqlListExclusive(bucket) + if includeStartFile { + sqlText = store.GetSqlListInclusive(bucket) + } + + rows, err := db.QueryContext(ctx, sqlText, util.HashStringToLong(string(shortPath)), startFileName, string(shortPath), prefix+"%", limit+1) + if err != nil { + return lastFileName, fmt.Errorf("list %s : %v", dirPath, err) + } + defer rows.Close() + + for rows.Next() { + var name string + var data []byte + if err = rows.Scan(&name, &data); err != nil { + glog.V(0).Infof("scan %s : %v", dirPath, err) + return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err) + } + lastFileName = name + + entry := &filer.Entry{ + FullPath: util.NewFullPath(string(dirPath), name), + } + if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil { + glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) + return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err) + } + + if !eachEntryFunc(entry) { + break + } + + } + + return lastFileName, nil +} + +func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", nil) +} + +func (store *AbstractSqlStore) Shutdown() { + store.DB.Close() +} + +func isValidBucket(bucket string) bool { + return bucket != DEFAULT_TABLE && bucket != "" +} + +func (store *AbstractSqlStore) CreateTable(ctx context.Context, bucket string) error { + if !store.SupportBucketTable { + return nil + } + _, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlCreateTable(bucket)) + return err +} + +func (store *AbstractSqlStore) deleteTable(ctx context.Context, bucket string) error { + if !store.SupportBucketTable { + return nil + } + _, err := store.DB.ExecContext(ctx, store.SqlGenerator.GetSqlDropTable(bucket)) + return err +} diff --git a/weed/filer/abstract_sql/abstract_sql_store_kv.go b/weed/filer/abstract_sql/abstract_sql_store_kv.go new file mode 100644 index 000000000..03b016c76 --- /dev/null +++ b/weed/filer/abstract_sql/abstract_sql_store_kv.go @@ -0,0 +1,105 @@ +package abstract_sql + +import ( + "context" + "database/sql" + "encoding/base64" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "strings" +) + +func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + db, _, _, err := store.getTxOrDB(ctx, "", false) + if err != nil { + return fmt.Errorf("findDB: %v", err) + } + + dirStr, dirHash, name := genDirAndName(key) + + res, err := db.ExecContext(ctx, store.GetSqlInsert(DEFAULT_TABLE), dirHash, name, dirStr, value) + if err == nil { + return + } + + if !strings.Contains(strings.ToLower(err.Error()), "duplicate") { + // return fmt.Errorf("kv insert: %s", err) + // skip this since the error can be in a different language + } + + // now the insert failed possibly due to duplication constraints + glog.V(1).Infof("kv insert falls back to update: %s", err) + + res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr) + if err != nil { + return fmt.Errorf("kv upsert: %s", err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("kv upsert no rows affected: %s", err) + } + return nil + +} + +func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + db, _, _, err := store.getTxOrDB(ctx, "", false) + if err != nil { + return nil, fmt.Errorf("findDB: %v", err) + } + + dirStr, dirHash, name := genDirAndName(key) + row := db.QueryRowContext(ctx, store.GetSqlFind(DEFAULT_TABLE), dirHash, name, dirStr) + + err = row.Scan(&value) + + if err == sql.ErrNoRows { + return nil, filer.ErrKvNotFound + } + + if err != nil { + return nil, fmt.Errorf("kv get: %v", err) + } + + return +} + +func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err error) { + + db, _, _, err := store.getTxOrDB(ctx, "", false) + if err != nil { + return fmt.Errorf("findDB: %v", err) + } + + dirStr, dirHash, name := genDirAndName(key) + + res, err := db.ExecContext(ctx, store.GetSqlDelete(DEFAULT_TABLE), dirHash, name, dirStr) + if err != nil { + return fmt.Errorf("kv delete: %s", err) + } + + _, err = res.RowsAffected() + if err != nil { + return fmt.Errorf("kv delete no rows affected: %s", err) + } + + return nil + +} + +func genDirAndName(key []byte) (dirStr string, dirHash int64, name string) { + for len(key) < 8 { + key = append(key, 0) + } + + dirHash = int64(util.BytesToUint64(key[:8])) + dirStr = base64.StdEncoding.EncodeToString(key[:8]) + name = base64.StdEncoding.EncodeToString(key[8:]) + + return +} diff --git a/weed/filer2/cassandra/README.txt b/weed/filer/cassandra/README.txt similarity index 100% rename from weed/filer2/cassandra/README.txt rename to weed/filer/cassandra/README.txt diff --git a/weed/filer/cassandra/cassandra_store.go b/weed/filer/cassandra/cassandra_store.go new file mode 100644 index 000000000..fd2ce91a6 --- /dev/null +++ b/weed/filer/cassandra/cassandra_store.go @@ -0,0 +1,212 @@ +package cassandra + +import ( + "context" + "fmt" + "github.com/gocql/gocql" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + filer.Stores = append(filer.Stores, &CassandraStore{}) +} + +type CassandraStore struct { + cluster *gocql.ClusterConfig + session *gocql.Session + superLargeDirectoryHash map[string]string +} + +func (store *CassandraStore) GetName() string { + return "cassandra" +} + +func (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"keyspace"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetStringSlice(prefix+"superLargeDirectories"), + ) +} + +func (store *CassandraStore) isSuperLargeDirectory(dir string) (dirHash string, isSuperLargeDirectory bool) { + dirHash, isSuperLargeDirectory = store.superLargeDirectoryHash[dir] + return +} + +func (store *CassandraStore) initialize(keyspace string, hosts []string, username string, password string, superLargeDirectories []string) (err error) { + store.cluster = gocql.NewCluster(hosts...) + if username != "" && password != "" { + store.cluster.Authenticator = gocql.PasswordAuthenticator{Username: username, Password: password} + } + store.cluster.Keyspace = keyspace + store.cluster.Consistency = gocql.LocalQuorum + store.session, err = store.cluster.CreateSession() + if err != nil { + glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) + } + + // set directory hash + store.superLargeDirectoryHash = make(map[string]string) + existingHash := make(map[string]string) + for _, dir := range superLargeDirectories { + // adding dir hash to avoid duplicated names + dirHash := util.Md5String([]byte(dir))[:4] + store.superLargeDirectoryHash[dir] = dirHash + if existingDir, found := existingHash[dirHash]; found { + glog.Fatalf("directory %s has the same hash as %s", dir, existingDir) + } + existingHash[dirHash] = dir + } + return +} + +func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *CassandraStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *CassandraStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + + dir, name := entry.FullPath.DirAndName() + if dirHash, ok := store.isSuperLargeDirectory(dir); ok { + dir, name = dirHash+name, "" + } + + meta, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encode %s: %s", entry.FullPath, err) + } + + if len(entry.Chunks) > 50 { + meta = util.MaybeGzipData(meta) + } + + if err := store.session.Query( + "INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? ", + dir, name, meta, entry.TtlSec).Exec(); err != nil { + return fmt.Errorf("insert %s: %s", entry.FullPath, err) + } + + return nil +} + +func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { + + dir, name := fullpath.DirAndName() + if dirHash, ok := store.isSuperLargeDirectory(dir); ok { + dir, name = dirHash+name, "" + } + + var data []byte + if err := store.session.Query( + "SELECT meta FROM filemeta WHERE directory=? AND name=?", + dir, name).Consistency(gocql.One).Scan(&data); err != nil { + if err != gocql.ErrNotFound { + return nil, filer_pb.ErrNotFound + } + } + + if len(data) == 0 { + return nil, filer_pb.ErrNotFound + } + + entry = &filer.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { + + dir, name := fullpath.DirAndName() + if dirHash, ok := store.isSuperLargeDirectory(dir); ok { + dir, name = dirHash+name, "" + } + + if err := store.session.Query( + "DELETE FROM filemeta WHERE directory=? AND name=?", + dir, name).Exec(); err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { + if _, ok := store.isSuperLargeDirectory(string(fullpath)); ok { + return nil // filer.ErrUnsupportedSuperLargeDirectoryListing + } + + if err := store.session.Query( + "DELETE FROM filemeta WHERE directory=?", + fullpath).Exec(); err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + if _, ok := store.isSuperLargeDirectory(string(dirPath)); ok { + return // nil, filer.ErrUnsupportedSuperLargeDirectoryListing + } + + cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" + if includeStartFile { + cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?" + } + + var data []byte + var name string + iter := store.session.Query(cqlStr, string(dirPath), startFileName, limit+1).Iter() + for iter.Scan(&name, &data) { + entry := &filer.Entry{ + FullPath: util.NewFullPath(string(dirPath), name), + } + lastFileName = name + if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + if !eachEntryFunc(entry) { + break + } + } + if err := iter.Close(); err != nil { + glog.V(0).Infof("list iterator close: %v", err) + } + + return lastFileName, err +} + +func (store *CassandraStore) Shutdown() { + store.session.Close() +} diff --git a/weed/filer/cassandra/cassandra_store_kv.go b/weed/filer/cassandra/cassandra_store_kv.go new file mode 100644 index 000000000..dafa9bb15 --- /dev/null +++ b/weed/filer/cassandra/cassandra_store_kv.go @@ -0,0 +1,62 @@ +package cassandra + +import ( + "context" + "encoding/base64" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/gocql/gocql" +) + +func (store *CassandraStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + dir, name := genDirAndName(key) + + if err := store.session.Query( + "INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? ", + dir, name, value, 0).Exec(); err != nil { + return fmt.Errorf("kv insert: %s", err) + } + + return nil +} + +func (store *CassandraStore) KvGet(ctx context.Context, key []byte) (data []byte, err error) { + dir, name := genDirAndName(key) + + if err := store.session.Query( + "SELECT meta FROM filemeta WHERE directory=? AND name=?", + dir, name).Consistency(gocql.One).Scan(&data); err != nil { + if err != gocql.ErrNotFound { + return nil, filer.ErrKvNotFound + } + } + + if len(data) == 0 { + return nil, filer.ErrKvNotFound + } + + return data, nil +} + +func (store *CassandraStore) KvDelete(ctx context.Context, key []byte) (err error) { + dir, name := genDirAndName(key) + + if err := store.session.Query( + "DELETE FROM filemeta WHERE directory=? AND name=?", + dir, name).Exec(); err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} + +func genDirAndName(key []byte) (dir string, name string) { + for len(key) < 8 { + key = append(key, 0) + } + + dir = base64.StdEncoding.EncodeToString(key[:8]) + name = base64.StdEncoding.EncodeToString(key[8:]) + + return +} diff --git a/weed/filer/configuration.go b/weed/filer/configuration.go new file mode 100644 index 000000000..9ef2f3e0f --- /dev/null +++ b/weed/filer/configuration.go @@ -0,0 +1,93 @@ +package filer + +import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "os" + "reflect" + "strings" +) + +var ( + Stores []FilerStore +) + +func (f *Filer) LoadConfiguration(config *util.ViperProxy) { + + validateOneEnabledStore(config) + + // load configuration for default filer store + hasDefaultStoreConfigured := false + for _, store := range Stores { + if config.GetBool(store.GetName() + ".enabled") { + store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore) + if err := store.Initialize(config, store.GetName()+"."); err != nil { + glog.Fatalf("failed to initialize store for %s: %+v", store.GetName(), err) + } + f.SetStore(store) + glog.V(0).Infof("configured filer store to %s", store.GetName()) + hasDefaultStoreConfigured = true + break + } + } + + if !hasDefaultStoreConfigured { + println() + println("Supported filer stores are:") + for _, store := range Stores { + println(" " + store.GetName()) + } + os.Exit(-1) + } + + // load path-specific filer store here + // f.Store.AddPathSpecificStore(path, store) + storeNames := make(map[string]FilerStore) + for _, store := range Stores { + storeNames[store.GetName()] = store + } + allKeys := config.AllKeys() + for _, key := range allKeys { + if !strings.HasSuffix(key, ".enabled") { + continue + } + key = key[:len(key)-len(".enabled")] + if !strings.Contains(key, ".") { + continue + } + + parts := strings.Split(key, ".") + storeName, storeId := parts[0], parts[1] + + store, found := storeNames[storeName] + if !found { + continue + } + store = reflect.New(reflect.ValueOf(store).Elem().Type()).Interface().(FilerStore) + if err := store.Initialize(config, key+"."); err != nil { + glog.Fatalf("Failed to initialize store for %s: %+v", key, err) + } + location := config.GetString(key + ".location") + if location == "" { + glog.Errorf("path-specific filer store needs %s", key+".location") + os.Exit(-1) + } + f.Store.AddPathSpecificStore(location, storeId, store) + + glog.V(0).Infof("configure filer %s for %s", store.GetName(), location) + } + +} + +func validateOneEnabledStore(config *util.ViperProxy) { + enabledStore := "" + for _, store := range Stores { + if config.GetBool(store.GetName() + ".enabled") { + if enabledStore == "" { + enabledStore = store.GetName() + } else { + glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName()) + } + } + } +} diff --git a/weed/filer/elastic/v7/elastic_store.go b/weed/filer/elastic/v7/elastic_store.go new file mode 100644 index 000000000..a16e5ebca --- /dev/null +++ b/weed/filer/elastic/v7/elastic_store.go @@ -0,0 +1,307 @@ +package elastic + +import ( + "context" + "fmt" + "math" + "strings" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" + jsoniter "github.com/json-iterator/go" + elastic "github.com/olivere/elastic/v7" +) + +var ( + indexType = "_doc" + indexPrefix = ".seaweedfs_" + indexKV = ".seaweedfs_kv_entries" + kvMappings = ` { + "mappings": { + "enabled": false, + "properties": { + "Value":{ + "type": "binary" + } + } + } + }` +) + +type ESEntry struct { + ParentId string `json:"ParentId"` + Entry *filer.Entry +} + +type ESKVEntry struct { + Value []byte `json:"Value"` +} + +func init() { + filer.Stores = append(filer.Stores, &ElasticStore{}) +} + +type ElasticStore struct { + client *elastic.Client + maxPageSize int +} + +func (store *ElasticStore) GetName() string { + return "elastic7" +} + +func (store *ElasticStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + options := []elastic.ClientOptionFunc{} + servers := configuration.GetStringSlice(prefix + "servers") + options = append(options, elastic.SetURL(servers...)) + username := configuration.GetString(prefix + "username") + password := configuration.GetString(prefix + "password") + if username != "" && password != "" { + options = append(options, elastic.SetBasicAuth(username, password)) + } + options = append(options, elastic.SetSniff(configuration.GetBool(prefix+"sniff_enabled"))) + options = append(options, elastic.SetHealthcheck(configuration.GetBool(prefix+"healthcheck_enabled"))) + store.maxPageSize = configuration.GetInt(prefix + "index.max_result_window") + if store.maxPageSize <= 0 { + store.maxPageSize = 10000 + } + glog.Infof("filer store elastic endpoints: %v.", servers) + return store.initialize(options) +} + +func (store *ElasticStore) initialize(options []elastic.ClientOptionFunc) (err error) { + ctx := context.Background() + store.client, err = elastic.NewClient(options...) + if err != nil { + return fmt.Errorf("init elastic %v.", err) + } + if ok, err := store.client.IndexExists(indexKV).Do(ctx); err == nil && !ok { + _, err = store.client.CreateIndex(indexKV).Body(kvMappings).Do(ctx) + if err != nil { + return fmt.Errorf("create index(%s) %v.", indexKV, err) + } + } + return nil +} + +func (store *ElasticStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *ElasticStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *ElasticStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *ElasticStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + index := getIndex(entry.FullPath, false) + dir, _ := entry.FullPath.DirAndName() + id := weed_util.Md5String([]byte(entry.FullPath)) + esEntry := &ESEntry{ + ParentId: weed_util.Md5String([]byte(dir)), + Entry: entry, + } + value, err := jsoniter.Marshal(esEntry) + if err != nil { + glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + return fmt.Errorf("insert entry %v.", err) + } + _, err = store.client.Index(). + Index(index). + Type(indexType). + Id(id). + BodyJson(string(value)). + Do(ctx) + if err != nil { + glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err) + return fmt.Errorf("insert entry %v.", err) + } + return nil +} + +func (store *ElasticStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + return store.InsertEntry(ctx, entry) +} + +func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { + index := getIndex(fullpath, false) + id := weed_util.Md5String([]byte(fullpath)) + searchResult, err := store.client.Get(). + Index(index). + Type(indexType). + Id(id). + Do(ctx) + if elastic.IsNotFound(err) { + return nil, filer_pb.ErrNotFound + } + if searchResult != nil && searchResult.Found { + esEntry := &ESEntry{ + ParentId: "", + Entry: &filer.Entry{}, + } + err := jsoniter.Unmarshal(searchResult.Source, esEntry) + return esEntry.Entry, err + } + glog.Errorf("find entry(%s),%v.", string(fullpath), err) + return nil, filer_pb.ErrNotFound +} + +func (store *ElasticStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { + index := getIndex(fullpath, false) + id := weed_util.Md5String([]byte(fullpath)) + if strings.Count(string(fullpath), "/") == 1 { + return store.deleteIndex(ctx, index) + } + return store.deleteEntry(ctx, index, id) +} + +func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err error) { + deleteResult, err := store.client.DeleteIndex(index).Do(ctx) + if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) { + return nil + } + glog.Errorf("delete index(%s) %v.", index, err) + return err +} + +func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (err error) { + deleteResult, err := store.client.Delete(). + Index(index). + Type(indexType). + Id(id). + Do(ctx) + if err == nil { + if deleteResult.Result == "deleted" || deleteResult.Result == "not_found" { + return nil + } + } + glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err) + return fmt.Errorf("delete entry %v.", err) +} + +func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { + _, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool { + if err := store.DeleteEntry(ctx, entry.FullPath); err != nil { + glog.Errorf("elastic delete %s: %v.", entry.FullPath, err) + return false + } + return true + }) + return +} + +func (store *ElasticStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.listDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc) +} + +func (store *ElasticStore) listDirectoryEntries( + ctx context.Context, fullpath weed_util.FullPath, startFileName string, inclusive bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + first := true + index := getIndex(fullpath, true) + nextStart := "" + parentId := weed_util.Md5String([]byte(fullpath)) + if _, err = store.client.Refresh(index).Do(ctx); err != nil { + if elastic.IsNotFound(err) { + store.client.CreateIndex(index).Do(ctx) + return + } + } + for { + result := &elastic.SearchResult{} + if (startFileName == "" && first) || inclusive { + if result, err = store.search(ctx, index, parentId); err != nil { + glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + return + } + } else { + fullPath := string(fullpath) + "/" + startFileName + if !first { + fullPath = nextStart + } + after := weed_util.Md5String([]byte(fullPath)) + if result, err = store.searchAfter(ctx, index, parentId, after); err != nil { + glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err) + return + } + } + first = false + for _, hit := range result.Hits.Hits { + esEntry := &ESEntry{ + ParentId: "", + Entry: &filer.Entry{}, + } + if err := jsoniter.Unmarshal(hit.Source, esEntry); err == nil { + limit-- + if limit < 0 { + return lastFileName, nil + } + nextStart = string(esEntry.Entry.FullPath) + fileName := esEntry.Entry.FullPath.Name() + if fileName == startFileName && !inclusive { + continue + } + if !eachEntryFunc(esEntry.Entry) { + break + } + lastFileName = fileName + } + } + if len(result.Hits.Hits) < store.maxPageSize { + break + } + } + return +} + +func (store *ElasticStore) search(ctx context.Context, index, parentId string) (result *elastic.SearchResult, err error) { + if count, err := store.client.Count(index).Do(ctx); err == nil && count == 0 { + return &elastic.SearchResult{ + Hits: &elastic.SearchHits{ + Hits: make([]*elastic.SearchHit, 0)}, + }, nil + } + queryResult, err := store.client.Search(). + Index(index). + Query(elastic.NewMatchQuery("ParentId", parentId)). + Size(store.maxPageSize). + Sort("_id", false). + Do(ctx) + return queryResult, err +} + +func (store *ElasticStore) searchAfter(ctx context.Context, index, parentId, after string) (result *elastic.SearchResult, err error) { + queryResult, err := store.client.Search(). + Index(index). + Query(elastic.NewMatchQuery("ParentId", parentId)). + SearchAfter(after). + Size(store.maxPageSize). + Sort("_id", false). + Do(ctx) + return queryResult, err + +} + +func (store *ElasticStore) Shutdown() { + store.client.Stop() +} + +func getIndex(fullpath weed_util.FullPath, isDirectory bool) string { + path := strings.Split(string(fullpath), "/") + if isDirectory && len(path) >= 2 { + return indexPrefix + strings.ToLower(path[1]) + } + if len(path) > 2 { + return indexPrefix + strings.ToLower(path[1]) + } + if len(path) == 2 { + return indexPrefix + } + return "" +} diff --git a/weed/filer/elastic/v7/elastic_store_kv.go b/weed/filer/elastic/v7/elastic_store_kv.go new file mode 100644 index 000000000..99c03314e --- /dev/null +++ b/weed/filer/elastic/v7/elastic_store_kv.go @@ -0,0 +1,65 @@ +package elastic + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + + "github.com/chrislusf/seaweedfs/weed/glog" + jsoniter "github.com/json-iterator/go" + elastic "github.com/olivere/elastic/v7" +) + +func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error) { + deleteResult, err := store.client.Delete(). + Index(indexKV). + Type(indexType). + Id(string(key)). + Do(ctx) + if err == nil { + if deleteResult.Result == "deleted" || deleteResult.Result == "not_found" { + return nil + } + } + glog.Errorf("delete key(id:%s) %v.", string(key), err) + return fmt.Errorf("delete key %v.", err) +} + +func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + searchResult, err := store.client.Get(). + Index(indexKV). + Type(indexType). + Id(string(key)). + Do(ctx) + if elastic.IsNotFound(err) { + return value, filer.ErrKvNotFound + } + if searchResult != nil && searchResult.Found { + esEntry := &ESKVEntry{} + if err := jsoniter.Unmarshal(searchResult.Source, esEntry); err == nil { + return esEntry.Value, nil + } + } + glog.Errorf("find key(%s),%v.", string(key), err) + return value, filer.ErrKvNotFound +} + +func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + esEntry := &ESKVEntry{value} + val, err := jsoniter.Marshal(esEntry) + if err != nil { + glog.Errorf("insert key(%s) %v.", string(key), err) + return fmt.Errorf("insert key %v.", err) + } + _, err = store.client.Index(). + Index(indexKV). + Type(indexType). + Id(string(key)). + BodyJson(string(val)). + Do(ctx) + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + return nil +} diff --git a/weed/filer/entry.go b/weed/filer/entry.go new file mode 100644 index 000000000..b7c8370e6 --- /dev/null +++ b/weed/filer/entry.go @@ -0,0 +1,113 @@ +package filer + +import ( + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type Attr struct { + Mtime time.Time // time of last modification + Crtime time.Time // time of creation (OS X only) + Mode os.FileMode // file mode + Uid uint32 // owner uid + Gid uint32 // group gid + Mime string // mime type + Replication string // replication + Collection string // collection name + TtlSec int32 // ttl in seconds + DiskType string + UserName string + GroupNames []string + SymlinkTarget string + Md5 []byte + FileSize uint64 +} + +func (attr Attr) IsDirectory() bool { + return attr.Mode&os.ModeDir > 0 +} + +type Entry struct { + util.FullPath + + Attr + Extended map[string][]byte + + // the following is for files + Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"` + + HardLinkId HardLinkId + HardLinkCounter int32 + Content []byte +} + +func (entry *Entry) Size() uint64 { + return maxUint64(maxUint64(TotalSize(entry.Chunks), entry.FileSize), uint64(len(entry.Content))) +} + +func (entry *Entry) Timestamp() time.Time { + if entry.IsDirectory() { + return entry.Crtime + } else { + return entry.Mtime + } +} + +func (entry *Entry) ToProtoEntry() *filer_pb.Entry { + if entry == nil { + return nil + } + return &filer_pb.Entry{ + Name: entry.FullPath.Name(), + IsDirectory: entry.IsDirectory(), + Attributes: EntryAttributeToPb(entry), + Chunks: entry.Chunks, + Extended: entry.Extended, + HardLinkId: entry.HardLinkId, + HardLinkCounter: entry.HardLinkCounter, + Content: entry.Content, + } +} + +func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry { + if entry == nil { + return nil + } + dir, _ := entry.FullPath.DirAndName() + return &filer_pb.FullEntry{ + Dir: dir, + Entry: entry.ToProtoEntry(), + } +} + +func (entry *Entry) Clone() *Entry { + return &Entry{ + FullPath: entry.FullPath, + Attr: entry.Attr, + Chunks: entry.Chunks, + Extended: entry.Extended, + HardLinkId: entry.HardLinkId, + HardLinkCounter: entry.HardLinkCounter, + } +} + +func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry { + return &Entry{ + FullPath: util.NewFullPath(dir, entry.Name), + Attr: PbToEntryAttribute(entry.Attributes), + Chunks: entry.Chunks, + HardLinkId: HardLinkId(entry.HardLinkId), + HardLinkCounter: entry.HardLinkCounter, + Content: entry.Content, + } +} + +func maxUint64(x, y uint64) uint64 { + if x > y { + return x + } + return y +} diff --git a/weed/filer2/entry_codec.go b/weed/filer/entry_codec.go similarity index 73% rename from weed/filer2/entry_codec.go rename to weed/filer/entry_codec.go index 3a2dc6134..4c613f068 100644 --- a/weed/filer2/entry_codec.go +++ b/weed/filer/entry_codec.go @@ -1,4 +1,4 @@ -package filer2 +package filer import ( "bytes" @@ -13,9 +13,12 @@ import ( func (entry *Entry) EncodeAttributesAndChunks() ([]byte, error) { message := &filer_pb.Entry{ - Attributes: EntryAttributeToPb(entry), - Chunks: entry.Chunks, - Extended: entry.Extended, + Attributes: EntryAttributeToPb(entry), + Chunks: entry.Chunks, + Extended: entry.Extended, + HardLinkId: entry.HardLinkId, + HardLinkCounter: entry.HardLinkCounter, + Content: entry.Content, } return proto.Marshal(message) } @@ -34,6 +37,10 @@ func (entry *Entry) DecodeAttributesAndChunks(blob []byte) error { entry.Chunks = message.Chunks + entry.HardLinkId = message.HardLinkId + entry.HardLinkCounter = message.HardLinkCounter + entry.Content = message.Content + return nil } @@ -49,9 +56,12 @@ func EntryAttributeToPb(entry *Entry) *filer_pb.FuseAttributes { Collection: entry.Attr.Collection, Replication: entry.Attr.Replication, TtlSec: entry.Attr.TtlSec, + DiskType: entry.Attr.DiskType, UserName: entry.Attr.UserName, GroupName: entry.Attr.GroupNames, SymlinkTarget: entry.Attr.SymlinkTarget, + Md5: entry.Attr.Md5, + FileSize: entry.Attr.FileSize, } } @@ -59,6 +69,10 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr { t := Attr{} + if attr == nil { + return t + } + t.Crtime = time.Unix(attr.Crtime, 0) t.Mtime = time.Unix(attr.Mtime, 0) t.Mode = os.FileMode(attr.FileMode) @@ -68,9 +82,12 @@ func PbToEntryAttribute(attr *filer_pb.FuseAttributes) Attr { t.Collection = attr.Collection t.Replication = attr.Replication t.TtlSec = attr.TtlSec + t.DiskType = attr.DiskType t.UserName = attr.UserName t.GroupNames = attr.GroupName t.SymlinkTarget = attr.SymlinkTarget + t.Md5 = attr.Md5 + t.FileSize = attr.FileSize return t } @@ -93,11 +110,25 @@ func EqualEntry(a, b *Entry) bool { return false } + if !bytes.Equal(a.Md5, b.Md5) { + return false + } + for i := 0; i < len(a.Chunks); i++ { if !proto.Equal(a.Chunks[i], b.Chunks[i]) { return false } } + + if !bytes.Equal(a.HardLinkId, b.HardLinkId) { + return false + } + if a.HardLinkCounter != b.HardLinkCounter { + return false + } + if !bytes.Equal(a.Content, b.Content) { + return false + } return true } diff --git a/weed/filer2/etcd/etcd_store.go b/weed/filer/etcd/etcd_store.go similarity index 60% rename from weed/filer2/etcd/etcd_store.go rename to weed/filer/etcd/etcd_store.go index 2eb9e3e86..71ed738f9 100644 --- a/weed/filer2/etcd/etcd_store.go +++ b/weed/filer/etcd/etcd_store.go @@ -1,15 +1,18 @@ package etcd import ( + "bytes" "context" "fmt" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" + "go.etcd.io/etcd/clientv3" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" - "go.etcd.io/etcd/clientv3" ) const ( @@ -17,7 +20,7 @@ const ( ) func init() { - filer2.Stores = append(filer2.Stores, &EtcdStore{}) + filer.Stores = append(filer.Stores, &EtcdStore{}) } type EtcdStore struct { @@ -28,13 +31,13 @@ func (store *EtcdStore) GetName() string { return "etcd" } -func (store *EtcdStore) Initialize(configuration weed_util.Configuration) (err error) { - servers := configuration.GetString("servers") +func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + servers := configuration.GetString(prefix + "servers") if servers == "" { servers = "localhost:2379" } - timeout := configuration.GetString("timeout") + timeout := configuration.GetString(prefix + "timeout") if timeout == "" { timeout = "3s" } @@ -71,41 +74,45 @@ func (store *EtcdStore) RollbackTransaction(ctx context.Context) error { return nil } -func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { key := genKey(entry.DirAndName()) - value, err := entry.EncodeAttributesAndChunks() + meta, err := entry.EncodeAttributesAndChunks() if err != nil { return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - if _, err := store.client.Put(ctx, string(key), string(value)); err != nil { + if len(entry.Chunks) > 50 { + meta = weed_util.MaybeGzipData(meta) + } + + if _, err := store.client.Put(ctx, string(key), string(meta)); err != nil { return fmt.Errorf("persisting %s : %v", entry.FullPath, err) } return nil } -func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *EtcdStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { return store.InsertEntry(ctx, entry) } -func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *EtcdStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { key := genKey(fullpath.DirAndName()) resp, err := store.client.Get(ctx, string(key)) if err != nil { - return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) + return nil, fmt.Errorf("get %s : %v", fullpath, err) } if len(resp.Kvs) == 0 { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } - entry = &filer2.Entry{ + entry = &filer.Entry{ FullPath: fullpath, } - err = entry.DecodeAttributesAndChunks(resp.Kvs[0].Value) + err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(resp.Kvs[0].Value)) if err != nil { return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } @@ -113,7 +120,7 @@ func (store *EtcdStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) return entry, nil } -func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { key := genKey(fullpath.DirAndName()) if _, err := store.client.Delete(ctx, string(key)); err != nil { @@ -123,7 +130,7 @@ func (store *EtcdStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPat return nil } -func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { directoryPrefix := genDirectoryKeyPrefix(fullpath, "") if _, err := store.client.Delete(ctx, string(directoryPrefix), clientv3.WithPrefix()); err != nil { @@ -133,41 +140,53 @@ func (store *EtcdStore) DeleteFolderChildren(ctx context.Context, fullpath filer return nil } -func (store *EtcdStore) ListDirectoryEntries( - ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int, -) (entries []*filer2.Entry, err error) { - directoryPrefix := genDirectoryKeyPrefix(fullpath, "") +func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} - resp, err := store.client.Get(ctx, string(directoryPrefix), +func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + directoryPrefix := genDirectoryKeyPrefix(dirPath, "") + lastFileStart := directoryPrefix + if startFileName != "" { + lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName) + } + + resp, err := store.client.Get(ctx, string(lastFileStart), clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend)) if err != nil { - return nil, fmt.Errorf("list %s : %v", fullpath, err) + return lastFileName, fmt.Errorf("list %s : %v", dirPath, err) } for _, kv := range resp.Kvs { + if !bytes.HasPrefix(kv.Key, directoryPrefix) { + break + } fileName := getNameFromKey(kv.Key) if fileName == "" { continue } - if fileName == startFileName && !inclusive { + if fileName == startFileName && !includeStartFile { continue } limit-- if limit < 0 { break } - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), + entry := &filer.Entry{ + FullPath: weed_util.NewFullPath(string(dirPath), fileName), } - if decodeErr := entry.DecodeAttributesAndChunks(kv.Value); decodeErr != nil { + if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil { err = decodeErr glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } - entries = append(entries, entry) + if !eachEntryFunc(entry) { + break + } + lastFileName = fileName } - return entries, err + return lastFileName, err } func genKey(dirPath, fileName string) (key []byte) { @@ -177,7 +196,7 @@ func genKey(dirPath, fileName string) (key []byte) { return key } -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { keyPrefix = []byte(string(fullpath)) keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR) if len(startFileName) > 0 { @@ -194,3 +213,7 @@ func getNameFromKey(key []byte) string { return string(key[sepIndex+1:]) } + +func (store *EtcdStore) Shutdown() { + store.client.Close() +} diff --git a/weed/filer/etcd/etcd_store_kv.go b/weed/filer/etcd/etcd_store_kv.go new file mode 100644 index 000000000..df252f46c --- /dev/null +++ b/weed/filer/etcd/etcd_store_kv.go @@ -0,0 +1,44 @@ +package etcd + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" +) + +func (store *EtcdStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + _, err = store.client.Put(ctx, string(key), string(value)) + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *EtcdStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + resp, err := store.client.Get(ctx, string(key)) + + if err != nil { + return nil, fmt.Errorf("kv get: %v", err) + } + + if len(resp.Kvs) == 0 { + return nil, filer.ErrKvNotFound + } + + return resp.Kvs[0].Value, nil +} + +func (store *EtcdStore) KvDelete(ctx context.Context, key []byte) (err error) { + + _, err = store.client.Delete(ctx, string(key)) + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go new file mode 100644 index 000000000..c709dc819 --- /dev/null +++ b/weed/filer/filechunk_manifest.go @@ -0,0 +1,194 @@ +package filer + +import ( + "bytes" + "fmt" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "io" + "math" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + ManifestBatch = 1000 +) + +func HasChunkManifest(chunks []*filer_pb.FileChunk) bool { + for _, chunk := range chunks { + if chunk.IsChunkManifest { + return true + } + } + return false +} + +func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonManifestChunks []*filer_pb.FileChunk) { + for _, c := range chunks { + if c.IsChunkManifest { + manifestChunks = append(manifestChunks, c) + } else { + nonManifestChunks = append(nonManifestChunks, c) + } + } + return +} + +func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) { + // TODO maybe parallel this + for _, chunk := range chunks { + if !chunk.IsChunkManifest { + dataChunks = append(dataChunks, chunk) + continue + } + + resolvedChunks, err := ResolveOneChunkManifest(lookupFileIdFn, chunk) + if err != nil { + return chunks, nil, err + } + + manifestChunks = append(manifestChunks, chunk) + // recursive + dchunks, mchunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks) + if subErr != nil { + return chunks, nil, subErr + } + dataChunks = append(dataChunks, dchunks...) + manifestChunks = append(manifestChunks, mchunks...) + } + return +} + +func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) { + if !chunk.IsChunkManifest { + return + } + + // IsChunkManifest + data, err := fetchChunk(lookupFileIdFn, chunk.GetFileIdString(), chunk.CipherKey, chunk.IsCompressed) + if err != nil { + return nil, fmt.Errorf("fail to read manifest %s: %v", chunk.GetFileIdString(), err) + } + m := &filer_pb.FileChunkManifest{} + if err := proto.Unmarshal(data, m); err != nil { + return nil, fmt.Errorf("fail to unmarshal manifest %s: %v", chunk.GetFileIdString(), err) + } + + // recursive + filer_pb.AfterEntryDeserialization(m.Chunks) + return m.Chunks, nil +} + +// TODO fetch from cache for weed mount? +func fetchChunk(lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) { + urlStrings, err := lookupFileIdFn(fileId) + if err != nil { + glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err) + return nil, err + } + return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, 0, 0) +} + +func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) ([]byte, error) { + + var err error + var shouldRetry bool + receivedData := make([]byte, 0, size) + + for waitTime := time.Second; waitTime < util.RetryWaitTime; waitTime += waitTime / 2 { + for _, urlString := range urlStrings { + receivedData = receivedData[:0] + shouldRetry, err = util.ReadUrlAsStream(urlString+"?readDeleted=true", cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) { + receivedData = append(receivedData, data...) + }) + if !shouldRetry { + break + } + if err != nil { + glog.V(0).Infof("read %s failed, err: %v", urlString, err) + } else { + break + } + } + if err != nil && shouldRetry { + glog.V(0).Infof("retry reading in %v", waitTime) + time.Sleep(waitTime) + } else { + break + } + } + + return receivedData, err + +} + +func MaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk) (chunks []*filer_pb.FileChunk, err error) { + return doMaybeManifestize(saveFunc, inputChunks, ManifestBatch, mergeIntoManifest) +} + +func doMaybeManifestize(saveFunc SaveDataAsChunkFunctionType, inputChunks []*filer_pb.FileChunk, mergeFactor int, mergefn func(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error)) (chunks []*filer_pb.FileChunk, err error) { + + var dataChunks []*filer_pb.FileChunk + for _, chunk := range inputChunks { + if !chunk.IsChunkManifest { + dataChunks = append(dataChunks, chunk) + } else { + chunks = append(chunks, chunk) + } + } + + remaining := len(dataChunks) + for i := 0; i+mergeFactor <= len(dataChunks); i += mergeFactor { + chunk, err := mergefn(saveFunc, dataChunks[i:i+mergeFactor]) + if err != nil { + return dataChunks, err + } + chunks = append(chunks, chunk) + remaining -= mergeFactor + } + // remaining + for i := len(dataChunks) - remaining; i < len(dataChunks); i++ { + chunks = append(chunks, dataChunks[i]) + } + return +} + +func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error) { + + filer_pb.BeforeEntrySerialization(dataChunks) + + // create and serialize the manifest + data, serErr := proto.Marshal(&filer_pb.FileChunkManifest{ + Chunks: dataChunks, + }) + if serErr != nil { + return nil, fmt.Errorf("serializing manifest: %v", serErr) + } + + minOffset, maxOffset := int64(math.MaxInt64), int64(math.MinInt64) + for _, chunk := range dataChunks { + if minOffset > int64(chunk.Offset) { + minOffset = chunk.Offset + } + if maxOffset < int64(chunk.Size)+chunk.Offset { + maxOffset = int64(chunk.Size) + chunk.Offset + } + } + + manifestChunk, _, _, err = saveFunc(bytes.NewReader(data), "", 0) + if err != nil { + return nil, err + } + manifestChunk.IsChunkManifest = true + manifestChunk.Offset = minOffset + manifestChunk.Size = uint64(maxOffset - minOffset) + + return +} + +type SaveDataAsChunkFunctionType func(reader io.Reader, name string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) diff --git a/weed/filer/filechunk_manifest_test.go b/weed/filer/filechunk_manifest_test.go new file mode 100644 index 000000000..ce12c5da6 --- /dev/null +++ b/weed/filer/filechunk_manifest_test.go @@ -0,0 +1,113 @@ +package filer + +import ( + "bytes" + "math" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func TestDoMaybeManifestize(t *testing.T) { + var manifestTests = []struct { + inputs []*filer_pb.FileChunk + expected []*filer_pb.FileChunk + }{ + { + inputs: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: false}, + {FileId: "2", IsChunkManifest: false}, + {FileId: "3", IsChunkManifest: false}, + {FileId: "4", IsChunkManifest: false}, + }, + expected: []*filer_pb.FileChunk{ + {FileId: "12", IsChunkManifest: true}, + {FileId: "34", IsChunkManifest: true}, + }, + }, + { + inputs: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: true}, + {FileId: "2", IsChunkManifest: false}, + {FileId: "3", IsChunkManifest: false}, + {FileId: "4", IsChunkManifest: false}, + }, + expected: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: true}, + {FileId: "23", IsChunkManifest: true}, + {FileId: "4", IsChunkManifest: false}, + }, + }, + { + inputs: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: false}, + {FileId: "2", IsChunkManifest: true}, + {FileId: "3", IsChunkManifest: false}, + {FileId: "4", IsChunkManifest: false}, + }, + expected: []*filer_pb.FileChunk{ + {FileId: "2", IsChunkManifest: true}, + {FileId: "13", IsChunkManifest: true}, + {FileId: "4", IsChunkManifest: false}, + }, + }, + { + inputs: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: true}, + {FileId: "2", IsChunkManifest: true}, + {FileId: "3", IsChunkManifest: false}, + {FileId: "4", IsChunkManifest: false}, + }, + expected: []*filer_pb.FileChunk{ + {FileId: "1", IsChunkManifest: true}, + {FileId: "2", IsChunkManifest: true}, + {FileId: "34", IsChunkManifest: true}, + }, + }, + } + + for i, mtest := range manifestTests { + println("test", i) + actual, _ := doMaybeManifestize(nil, mtest.inputs, 2, mockMerge) + assertEqualChunks(t, mtest.expected, actual) + } + +} + +func assertEqualChunks(t *testing.T, expected, actual []*filer_pb.FileChunk) { + assert.Equal(t, len(expected), len(actual)) + for i := 0; i < len(actual); i++ { + assertEqualChunk(t, actual[i], expected[i]) + } +} +func assertEqualChunk(t *testing.T, expected, actual *filer_pb.FileChunk) { + assert.Equal(t, expected.FileId, actual.FileId) + assert.Equal(t, expected.IsChunkManifest, actual.IsChunkManifest) +} + +func mockMerge(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer_pb.FileChunk) (manifestChunk *filer_pb.FileChunk, err error) { + + var buf bytes.Buffer + minOffset, maxOffset := int64(math.MaxInt64), int64(math.MinInt64) + for k := 0; k < len(dataChunks); k++ { + chunk := dataChunks[k] + buf.WriteString(chunk.FileId) + if minOffset > int64(chunk.Offset) { + minOffset = chunk.Offset + } + if maxOffset < int64(chunk.Size)+chunk.Offset { + maxOffset = int64(chunk.Size) + chunk.Offset + } + } + + manifestChunk = &filer_pb.FileChunk{ + FileId: buf.String(), + } + manifestChunk.IsChunkManifest = true + manifestChunk.Offset = minOffset + manifestChunk.Size = uint64(maxOffset - minOffset) + + return +} diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go new file mode 100644 index 000000000..68f308a51 --- /dev/null +++ b/weed/filer/filechunks.go @@ -0,0 +1,292 @@ +package filer + +import ( + "bytes" + "encoding/hex" + "fmt" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "math" + "sort" + "sync" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) { + for _, c := range chunks { + t := uint64(c.Offset + int64(c.Size)) + if size < t { + size = t + } + } + return +} + +func FileSize(entry *filer_pb.Entry) (size uint64) { + return maxUint64(TotalSize(entry.Chunks), entry.Attributes.FileSize) +} + +func ETag(entry *filer_pb.Entry) (etag string) { + if entry.Attributes == nil || entry.Attributes.Md5 == nil { + return ETagChunks(entry.Chunks) + } + return fmt.Sprintf("%x", entry.Attributes.Md5) +} + +func ETagEntry(entry *Entry) (etag string) { + if entry.Attr.Md5 == nil { + return ETagChunks(entry.Chunks) + } + return fmt.Sprintf("%x", entry.Attr.Md5) +} + +func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) { + if len(chunks) == 1 { + return chunks[0].ETag + } + md5_digests := [][]byte{} + for _, c := range chunks { + md5_decoded, _ := hex.DecodeString(c.ETag) + md5_digests = append(md5_digests, md5_decoded) + } + return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks)) +} + +func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) { + + visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks) + + fileIds := make(map[string]bool) + for _, interval := range visibles { + fileIds[interval.fileId] = true + } + for _, chunk := range chunks { + if _, found := fileIds[chunk.GetFileIdString()]; found { + compacted = append(compacted, chunk) + } else { + garbage = append(garbage, chunk) + } + } + + return +} + +func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) { + + aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as) + if aErr != nil { + return nil, aErr + } + bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs) + if bErr != nil { + return nil, bErr + } + + delta = append(delta, DoMinusChunks(aData, bData)...) + delta = append(delta, DoMinusChunks(aMeta, bMeta)...) + return +} + +func DoMinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) { + + fileIds := make(map[string]bool) + for _, interval := range bs { + fileIds[interval.GetFileIdString()] = true + } + for _, chunk := range as { + if _, found := fileIds[chunk.GetFileIdString()]; !found { + delta = append(delta, chunk) + } + } + + return +} + +type ChunkView struct { + FileId string + Offset int64 + Size uint64 + LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk + ChunkSize uint64 + CipherKey []byte + IsGzipped bool +} + +func (cv *ChunkView) IsFullChunk() bool { + return cv.Size == cv.ChunkSize +} + +func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) { + + visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks) + + return ViewFromVisibleIntervals(visibles, offset, size) + +} + +func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) { + + stop := offset + size + if size == math.MaxInt64 { + stop = math.MaxInt64 + } + if stop < offset { + stop = math.MaxInt64 + } + + for _, chunk := range visibles { + + chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop) + + if chunkStart < chunkStop { + views = append(views, &ChunkView{ + FileId: chunk.fileId, + Offset: chunkStart - chunk.start + chunk.chunkOffset, + Size: uint64(chunkStop - chunkStart), + LogicOffset: chunkStart, + ChunkSize: chunk.chunkSize, + CipherKey: chunk.cipherKey, + IsGzipped: chunk.isGzipped, + }) + } + } + + return views + +} + +func logPrintf(name string, visibles []VisibleInterval) { + + /* + glog.V(0).Infof("%s len %d", name, len(visibles)) + for _, v := range visibles { + glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset) + } + */ +} + +var bufPool = sync.Pool{ + New: func() interface{} { + return new(VisibleInterval) + }, +} + +func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) { + + newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed) + + length := len(visibles) + if length == 0 { + return append(visibles, newV) + } + last := visibles[length-1] + if last.stop <= chunk.Offset { + return append(visibles, newV) + } + + logPrintf(" before", visibles) + // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size) + chunkStop := chunk.Offset + int64(chunk.Size) + for _, v := range visibles { + if v.start < chunk.Offset && chunk.Offset < v.stop { + t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped) + newVisibles = append(newVisibles, t) + // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop) + } + if v.start < chunkStop && chunkStop < v.stop { + t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped) + newVisibles = append(newVisibles, t) + // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop) + } + if chunkStop <= v.start || v.stop <= chunk.Offset { + newVisibles = append(newVisibles, v) + // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop) + } + } + newVisibles = append(newVisibles, newV) + + logPrintf(" append", newVisibles) + + for i := len(newVisibles) - 1; i >= 0; i-- { + if i > 0 && newV.start < newVisibles[i-1].start { + newVisibles[i] = newVisibles[i-1] + } else { + newVisibles[i] = newV + break + } + } + logPrintf(" sorted", newVisibles) + + return newVisibles +} + +// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory +// If the file chunk content is a chunk manifest +func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) { + + chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks) + + sort.Slice(chunks, func(i, j int) bool { + if chunks[i].Mtime == chunks[j].Mtime { + filer_pb.EnsureFid(chunks[i]) + filer_pb.EnsureFid(chunks[j]) + if chunks[i].Fid == nil || chunks[j].Fid == nil { + return true + } + return chunks[i].Fid.FileKey < chunks[j].Fid.FileKey + } + return chunks[i].Mtime < chunks[j].Mtime // keep this to make tests run + }) + + for _, chunk := range chunks { + + // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size)) + visibles = MergeIntoVisibles(visibles, chunk) + + logPrintf("add", visibles) + + } + + return +} + +// find non-overlapping visible intervals +// visible interval map to one file chunk + +type VisibleInterval struct { + start int64 + stop int64 + modifiedTime int64 + fileId string + chunkOffset int64 + chunkSize uint64 + cipherKey []byte + isGzipped bool +} + +func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval { + return VisibleInterval{ + start: start, + stop: stop, + fileId: fileId, + modifiedTime: modifiedTime, + chunkOffset: chunkOffset, // the starting position in the chunk + chunkSize: chunkSize, + cipherKey: cipherKey, + isGzipped: isGzipped, + } +} + +func min(x, y int64) int64 { + if x <= y { + return x + } + return y +} +func max(x, y int64) int64 { + if x <= y { + return y + } + return x +} diff --git a/weed/filer/filechunks2_test.go b/weed/filer/filechunks2_test.go new file mode 100644 index 000000000..9f9566d9b --- /dev/null +++ b/weed/filer/filechunks2_test.go @@ -0,0 +1,46 @@ +package filer + +import ( + "sort" + "testing" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func TestCompactFileChunksRealCase(t *testing.T) { + + chunks := []*filer_pb.FileChunk{ + {FileId: "2,512f31f2c0700a", Offset: 0, Size: 25 - 0, Mtime: 5320497}, + {FileId: "6,512f2c2e24e9e8", Offset: 868352, Size: 917585 - 868352, Mtime: 5320492}, + {FileId: "7,514468dd5954ca", Offset: 884736, Size: 901120 - 884736, Mtime: 5325928}, + {FileId: "5,5144463173fe77", Offset: 917504, Size: 2297856 - 917504, Mtime: 5325894}, + {FileId: "4,51444c7ab54e2d", Offset: 2301952, Size: 2367488 - 2301952, Mtime: 5325900}, + {FileId: "4,514450e643ad22", Offset: 2371584, Size: 2420736 - 2371584, Mtime: 5325904}, + {FileId: "6,514456a5e9e4d7", Offset: 2449408, Size: 2490368 - 2449408, Mtime: 5325910}, + {FileId: "3,51444f8d53eebe", Offset: 2494464, Size: 2555904 - 2494464, Mtime: 5325903}, + {FileId: "4,5144578b097c7e", Offset: 2560000, Size: 2596864 - 2560000, Mtime: 5325911}, + {FileId: "3,51445500b6b4ac", Offset: 2637824, Size: 2678784 - 2637824, Mtime: 5325909}, + {FileId: "1,51446285e52a61", Offset: 2695168, Size: 2715648 - 2695168, Mtime: 5325922}, + } + + printChunks("before", chunks) + + compacted, garbage := CompactFileChunks(nil, chunks) + + printChunks("compacted", compacted) + printChunks("garbage", garbage) + +} + +func printChunks(name string, chunks []*filer_pb.FileChunk) { + sort.Slice(chunks, func(i, j int) bool { + if chunks[i].Offset == chunks[j].Offset { + return chunks[i].Mtime < chunks[j].Mtime + } + return chunks[i].Offset < chunks[j].Offset + }) + for _, chunk := range chunks { + glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) + } +} diff --git a/weed/filer2/filechunks_test.go b/weed/filer/filechunks_test.go similarity index 62% rename from weed/filer2/filechunks_test.go rename to weed/filer/filechunks_test.go index e75e60753..699e7e298 100644 --- a/weed/filer2/filechunks_test.go +++ b/weed/filer/filechunks_test.go @@ -1,10 +1,15 @@ -package filer2 +package filer import ( + "fmt" "log" + "math" + "math/rand" + "strconv" "testing" - "fmt" + "github.com/stretchr/testify/assert" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) @@ -16,7 +21,7 @@ func TestCompactFileChunks(t *testing.T) { {Offset: 110, Size: 200, FileId: "jkl", Mtime: 300}, } - compacted, garbage := CompactFileChunks(chunks) + compacted, garbage := CompactFileChunks(nil, chunks) if len(compacted) != 3 { t.Fatalf("unexpected compacted: %d", len(compacted)) @@ -49,7 +54,7 @@ func TestCompactFileChunks2(t *testing.T) { }) } - compacted, garbage := CompactFileChunks(chunks) + compacted, garbage := CompactFileChunks(nil, chunks) if len(compacted) != 4 { t.Fatalf("unexpected compacted: %d", len(compacted)) @@ -59,6 +64,42 @@ func TestCompactFileChunks2(t *testing.T) { } } +func TestRandomFileChunksCompact(t *testing.T) { + + data := make([]byte, 1024) + + var chunks []*filer_pb.FileChunk + for i := 0; i < 15; i++ { + start, stop := rand.Intn(len(data)), rand.Intn(len(data)) + if start > stop { + start, stop = stop, start + } + if start+16 < stop { + stop = start + 16 + } + chunk := &filer_pb.FileChunk{ + FileId: strconv.Itoa(i), + Offset: int64(start), + Size: uint64(stop - start), + Mtime: int64(i), + Fid: &filer_pb.FileId{FileKey: uint64(i)}, + } + chunks = append(chunks, chunk) + for x := start; x < stop; x++ { + data[x] = byte(i) + } + } + + visibles, _ := NonOverlappingVisibleIntervals(nil, chunks) + + for _, v := range visibles { + for x := v.start; x < v.stop; x++ { + assert.Equal(t, strconv.Itoa(int(data[x])), v.fileId) + } + } + +} + func TestIntervalMerging(t *testing.T) { testcases := []struct { @@ -91,12 +132,12 @@ func TestIntervalMerging(t *testing.T) { // case 2: updates overwrite part of previous chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134}, + {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 0, Size: 70, FileId: "b", Mtime: 134}, }, Expected: []*VisibleInterval{ - {start: 0, stop: 50, fileId: "asdf"}, - {start: 50, stop: 100, fileId: "abc"}, + {start: 0, stop: 70, fileId: "b"}, + {start: 70, stop: 100, fileId: "a", chunkOffset: 70}, }, }, // case 3: updates overwrite full chunks @@ -126,25 +167,25 @@ func TestIntervalMerging(t *testing.T) { // case 5: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184}, - {Offset: 70, Size: 150, FileId: "abc", Mtime: 143}, - {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134}, + {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "d", Mtime: 184}, + {Offset: 70, Size: 150, FileId: "c", Mtime: 143}, + {Offset: 80, Size: 100, FileId: "b", Mtime: 134}, }, Expected: []*VisibleInterval{ - {start: 0, stop: 200, fileId: "asdf"}, - {start: 200, stop: 220, fileId: "abc"}, + {start: 0, stop: 200, fileId: "d"}, + {start: 200, stop: 220, fileId: "c", chunkOffset: 130}, }, }, // case 6: same updates { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123}, }, Expected: []*VisibleInterval{ - {start: 0, stop: 100, fileId: "abc"}, + {start: 0, stop: 100, fileId: "xyz"}, }, }, // case 7: real updates @@ -186,7 +227,7 @@ func TestIntervalMerging(t *testing.T) { for i, testcase := range testcases { log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i) - intervals := NonOverlappingVisibleIntervals(testcase.Chunks) + intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks) for x, interval := range intervals { log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s", i, x, interval.start, interval.stop, interval.fileId) @@ -204,6 +245,10 @@ func TestIntervalMerging(t *testing.T) { t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s", i, x, interval.fileId, testcase.Expected[x].fileId) } + if interval.chunkOffset != testcase.Expected[x].chunkOffset { + t.Fatalf("failed on test case %d, interval %d, chunkOffset %d, expect %d", + i, x, interval.chunkOffset, testcase.Expected[x].chunkOffset) + } } if len(intervals) != len(testcase.Expected) { t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected)) @@ -218,7 +263,7 @@ func TestChunksReading(t *testing.T) { testcases := []struct { Chunks []*filer_pb.FileChunk Offset int64 - Size int + Size int64 Expected []*ChunkView }{ // case 0: normal @@ -251,14 +296,14 @@ func TestChunksReading(t *testing.T) { // case 2: updates overwrite part of previous chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134}, + {Offset: 3, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 10, Size: 50, FileId: "b", Mtime: 134}, }, - Offset: 25, - Size: 50, + Offset: 30, + Size: 40, Expected: []*ChunkView{ - {Offset: 25, Size: 25, FileId: "asdf", LogicOffset: 25}, - {Offset: 0, Size: 25, FileId: "abc", LogicOffset: 50}, + {Offset: 20, Size: 30, FileId: "b", LogicOffset: 30}, + {Offset: 57, Size: 10, FileId: "a", LogicOffset: 60}, }, }, // case 3: updates overwrite full chunks @@ -286,35 +331,35 @@ func TestChunksReading(t *testing.T) { Size: 400, Expected: []*ChunkView{ {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0}, - // {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen + {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 250}, }, }, // case 5: updates overwrite full chunks { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184}, - {Offset: 70, Size: 150, FileId: "abc", Mtime: 143}, + {Offset: 0, Size: 100, FileId: "a", Mtime: 123}, + {Offset: 0, Size: 200, FileId: "c", Mtime: 184}, + {Offset: 70, Size: 150, FileId: "b", Mtime: 143}, {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134}, }, Offset: 0, Size: 220, Expected: []*ChunkView{ - {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0}, - {Offset: 0, Size: 20, FileId: "abc", LogicOffset: 200}, + {Offset: 0, Size: 200, FileId: "c", LogicOffset: 0}, + {Offset: 130, Size: 20, FileId: "b", LogicOffset: 200}, }, }, // case 6: same updates { Chunks: []*filer_pb.FileChunk{ - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, - {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, Mtime: 123}, + {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, Mtime: 123}, }, Offset: 0, Size: 100, Expected: []*ChunkView{ - {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "xyz", LogicOffset: 0}, }, }, // case 7: edge cases @@ -331,21 +376,60 @@ func TestChunksReading(t *testing.T) { {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100}, }, }, + // case 8: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 100, FileId: "abc", Mtime: 123}, + {Offset: 90, Size: 200, FileId: "asdf", Mtime: 134}, + {Offset: 190, Size: 300, FileId: "fsad", Mtime: 353}, + }, + Offset: 0, + Size: 300, + Expected: []*ChunkView{ + {Offset: 0, Size: 90, FileId: "abc", LogicOffset: 0}, + {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 90}, + {Offset: 0, Size: 110, FileId: "fsad", LogicOffset: 190}, + }, + }, + // case 9: edge cases + { + Chunks: []*filer_pb.FileChunk{ + {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", Mtime: 1}, + {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", Mtime: 2}, + {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", Mtime: 3}, + {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", Mtime: 4}, + {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", Mtime: 5}, + {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", Mtime: 6}, + }, + Offset: 0, + Size: 153578836, + Expected: []*ChunkView{ + {Offset: 0, Size: 43175936, FileId: "2,111fc2cbfac1", LogicOffset: 0}, + {Offset: 0, Size: 52981760 - 43175936, FileId: "2,112a36ea7f85", LogicOffset: 43175936}, + {Offset: 0, Size: 72564736 - 52981760, FileId: "4,112d5f31c5e7", LogicOffset: 52981760}, + {Offset: 0, Size: 133255168 - 72564736, FileId: "1,113245f0cdb6", LogicOffset: 72564736}, + {Offset: 0, Size: 137269248 - 133255168, FileId: "3,1141a70733b5", LogicOffset: 133255168}, + {Offset: 0, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", LogicOffset: 137269248}, + }, + }, } for i, testcase := range testcases { + if i != 2 { + // continue + } log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i) - chunks := ViewFromChunks(testcase.Chunks, testcase.Offset, testcase.Size) + chunks := ViewFromChunks(nil, testcase.Chunks, testcase.Offset, testcase.Size) for x, chunk := range chunks { log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s", i, x, chunk.Offset, chunk.Size, chunk.FileId) if chunk.Offset != testcase.Expected[x].Offset { - t.Fatalf("failed on read case %d, chunk %d, Offset %d, expect %d", - i, x, chunk.Offset, testcase.Expected[x].Offset) + t.Fatalf("failed on read case %d, chunk %s, Offset %d, expect %d", + i, chunk.FileId, chunk.Offset, testcase.Expected[x].Offset) } if chunk.Size != testcase.Expected[x].Size { - t.Fatalf("failed on read case %d, chunk %d, Size %d, expect %d", - i, x, chunk.Size, testcase.Expected[x].Size) + t.Fatalf("failed on read case %d, chunk %s, Size %d, expect %d", + i, chunk.FileId, chunk.Size, testcase.Expected[x].Size) } if chunk.FileId != testcase.Expected[x].FileId { t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s", @@ -379,6 +463,77 @@ func BenchmarkCompactFileChunks(b *testing.B) { } for n := 0; n < b.N; n++ { - CompactFileChunks(chunks) + CompactFileChunks(nil, chunks) + } +} + +func TestViewFromVisibleIntervals(t *testing.T) { + visibles := []VisibleInterval{ + { + start: 0, + stop: 25, + fileId: "fid1", + }, + { + start: 4096, + stop: 8192, + fileId: "fid2", + }, + { + start: 16384, + stop: 18551, + fileId: "fid3", + }, } + + views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) + + if len(views) != len(visibles) { + assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") + } + +} + +func TestViewFromVisibleIntervals2(t *testing.T) { + visibles := []VisibleInterval{ + { + start: 344064, + stop: 348160, + fileId: "fid1", + }, + { + start: 348160, + stop: 356352, + fileId: "fid2", + }, + } + + views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32) + + if len(views) != len(visibles) { + assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") + } + +} + +func TestViewFromVisibleIntervals3(t *testing.T) { + visibles := []VisibleInterval{ + { + start: 1000, + stop: 2000, + fileId: "fid1", + }, + { + start: 3000, + stop: 4000, + fileId: "fid2", + }, + } + + views := ViewFromVisibleIntervals(visibles, 1700, 1500) + + if len(views) != len(visibles) { + assert.Equal(t, len(visibles), len(views), "ViewFromVisibleIntervals error") + } + } diff --git a/weed/filer/filer.go b/weed/filer/filer.go new file mode 100644 index 000000000..effdc0e4e --- /dev/null +++ b/weed/filer/filer.go @@ -0,0 +1,304 @@ +package filer + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +const ( + LogFlushInterval = time.Minute + PaginationSize = 1024 + FilerStoreId = "filer.store.id" +) + +var ( + OS_UID = uint32(os.Getuid()) + OS_GID = uint32(os.Getgid()) +) + +type Filer struct { + Store VirtualFilerStore + MasterClient *wdclient.MasterClient + fileIdDeletionQueue *util.UnboundedQueue + GrpcDialOption grpc.DialOption + DirBucketsPath string + FsyncBuckets []string + buckets *FilerBuckets + Cipher bool + LocalMetaLogBuffer *log_buffer.LogBuffer + metaLogCollection string + metaLogReplication string + MetaAggregator *MetaAggregator + Signature int32 + FilerConf *FilerConf +} + +func NewFiler(masters []string, grpcDialOption grpc.DialOption, + filerHost string, filerGrpcPort uint32, collection string, replication string, dataCenter string, notifyFn func()) *Filer { + f := &Filer{ + MasterClient: wdclient.NewMasterClient(grpcDialOption, "filer", filerHost, filerGrpcPort, dataCenter, masters), + fileIdDeletionQueue: util.NewUnboundedQueue(), + GrpcDialOption: grpcDialOption, + FilerConf: NewFilerConf(), + } + f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, f.logFlushFunc, notifyFn) + f.metaLogCollection = collection + f.metaLogReplication = replication + + go f.loopProcessingDeletion() + + return f +} + +func (f *Filer) AggregateFromPeers(self string, filers []string) { + + // set peers + found := false + for _, peer := range filers { + if peer == self { + found = true + } + } + if !found { + filers = append(filers, self) + } + + f.MetaAggregator = NewMetaAggregator(filers, f.GrpcDialOption) + f.MetaAggregator.StartLoopSubscribe(f, self) + +} + +func (f *Filer) SetStore(store FilerStore) { + f.Store = NewFilerStoreWrapper(store) + + f.setOrLoadFilerStoreSignature(store) + +} + +func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) { + storeIdBytes, err := store.KvGet(context.Background(), []byte(FilerStoreId)) + if err == ErrKvNotFound || err == nil && len(storeIdBytes) == 0 { + f.Signature = util.RandomInt32() + storeIdBytes = make([]byte, 4) + util.Uint32toBytes(storeIdBytes, uint32(f.Signature)) + if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil { + glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err) + } + glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature) + } else if err == nil && len(storeIdBytes) == 4 { + f.Signature = int32(util.BytesToUint32(storeIdBytes)) + glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature) + } else { + glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err) + } +} + +func (f *Filer) GetStore() (store FilerStore) { + return f.Store +} + +func (fs *Filer) GetMaster() string { + return fs.MasterClient.GetMaster() +} + +func (fs *Filer) KeepConnectedToMaster() { + fs.MasterClient.KeepConnectedToMaster() +} + +func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) { + return f.Store.BeginTransaction(ctx) +} + +func (f *Filer) CommitTransaction(ctx context.Context) error { + return f.Store.CommitTransaction(ctx) +} + +func (f *Filer) RollbackTransaction(ctx context.Context) error { + return f.Store.RollbackTransaction(ctx) +} + +func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool, signatures []int32) error { + + if string(entry.FullPath) == "/" { + return nil + } + + oldEntry, _ := f.FindEntry(ctx, entry.FullPath) + + /* + if !hasWritePermission(lastDirectoryEntry, entry) { + glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d", + lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid) + return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath) + } + */ + + if oldEntry == nil { + + dirParts := strings.Split(string(entry.FullPath), "/") + if err := f.ensureParentDirecotryEntry(ctx, entry, dirParts, len(dirParts)-1, isFromOtherCluster); err != nil { + return err + } + + glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name()) + if err := f.Store.InsertEntry(ctx, entry); err != nil { + glog.Errorf("insert entry %s: %v", entry.FullPath, err) + return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) + } + } else { + if o_excl { + glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath) + return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath) + } + glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name()) + if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { + glog.Errorf("update entry %s: %v", entry.FullPath, err) + return fmt.Errorf("update entry %s: %v", entry.FullPath, err) + } + } + + f.maybeAddBucket(entry) + f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures) + + f.deleteChunksIfNotNew(oldEntry, entry) + + glog.V(4).Infof("CreateEntry %s: created", entry.FullPath) + + return nil +} + +func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, dirParts []string, level int, isFromOtherCluster bool) (err error) { + + if level == 0 { + return nil + } + + dirPath := "/" + util.Join(dirParts[:level]...) + // fmt.Printf("%d directory: %+v\n", i, dirPath) + + // check the store directly + glog.V(4).Infof("find uncached directory: %s", dirPath) + dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath)) + + // no such existing directory + if dirEntry == nil { + + // ensure parent directory + if err = f.ensureParentDirecotryEntry(ctx, entry, dirParts, level-1, isFromOtherCluster); err != nil { + return err + } + + // create the directory + now := time.Now() + + dirEntry = &Entry{ + FullPath: util.FullPath(dirPath), + Attr: Attr{ + Mtime: now, + Crtime: now, + Mode: os.ModeDir | entry.Mode | 0110, + Uid: entry.Uid, + Gid: entry.Gid, + Collection: entry.Collection, + Replication: entry.Replication, + UserName: entry.UserName, + GroupNames: entry.GroupNames, + }, + } + + glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) + mkdirErr := f.Store.InsertEntry(ctx, dirEntry) + if mkdirErr != nil { + if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound { + glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr) + return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) + } + } else { + f.maybeAddBucket(dirEntry) + f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil) + } + + } else if !dirEntry.IsDirectory() { + glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath) + return fmt.Errorf("%s is a file", dirPath) + } + + return nil +} + +func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) { + if oldEntry != nil { + entry.Attr.Crtime = oldEntry.Attr.Crtime + if oldEntry.IsDirectory() && !entry.IsDirectory() { + glog.Errorf("existing %s is a directory", oldEntry.FullPath) + return fmt.Errorf("existing %s is a directory", oldEntry.FullPath) + } + if !oldEntry.IsDirectory() && entry.IsDirectory() { + glog.Errorf("existing %s is a file", oldEntry.FullPath) + return fmt.Errorf("existing %s is a file", oldEntry.FullPath) + } + } + return f.Store.UpdateEntry(ctx, entry) +} + +var ( + Root = &Entry{ + FullPath: "/", + Attr: Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: os.ModeDir | 0755, + Uid: OS_UID, + Gid: OS_GID, + }, + } +) + +func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) { + + if string(p) == "/" { + return Root, nil + } + entry, err = f.Store.FindEntry(ctx, p) + if entry != nil && entry.TtlSec > 0 { + if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + f.Store.DeleteOneEntry(ctx, entry) + return nil, filer_pb.ErrNotFound + } + } + return + +} + +func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (expiredCount int64, lastFileName string, err error) { + lastFileName, err = f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool { + if entry.TtlSec > 0 { + if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + f.Store.DeleteOneEntry(ctx, entry) + expiredCount++ + return true + } + } + return eachEntryFunc(entry) + }) + if err != nil { + return expiredCount, lastFileName, err + } + return +} + +func (f *Filer) Shutdown() { + f.LocalMetaLogBuffer.Shutdown() + f.Store.Shutdown() +} diff --git a/weed/filer/filer_buckets.go b/weed/filer/filer_buckets.go new file mode 100644 index 000000000..43fb000c9 --- /dev/null +++ b/weed/filer/filer_buckets.go @@ -0,0 +1,121 @@ +package filer + +import ( + "context" + "math" + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type BucketName string +type BucketOption struct { + Name BucketName + Replication string + fsync bool +} +type FilerBuckets struct { + dirBucketsPath string + buckets map[BucketName]*BucketOption + sync.RWMutex +} + +func (f *Filer) LoadBuckets() { + + f.buckets = &FilerBuckets{ + buckets: make(map[BucketName]*BucketOption), + } + + limit := int64(math.MaxInt32) + + entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "", "") + + if err != nil { + glog.V(1).Infof("no buckets found: %v", err) + return + } + + shouldFsyncMap := make(map[string]bool) + for _, bucket := range f.FsyncBuckets { + shouldFsyncMap[bucket] = true + } + + glog.V(1).Infof("buckets found: %d", len(entries)) + + f.buckets.Lock() + for _, entry := range entries { + _, shouldFsnyc := shouldFsyncMap[entry.Name()] + f.buckets.buckets[BucketName(entry.Name())] = &BucketOption{ + Name: BucketName(entry.Name()), + Replication: entry.Replication, + fsync: shouldFsnyc, + } + } + f.buckets.Unlock() + +} + +func (f *Filer) ReadBucketOption(buketName string) (replication string, fsync bool) { + + f.buckets.RLock() + defer f.buckets.RUnlock() + + option, found := f.buckets.buckets[BucketName(buketName)] + + if !found { + return "", false + } + return option.Replication, option.fsync + +} + +func (f *Filer) isBucket(entry *Entry) bool { + if !entry.IsDirectory() { + return false + } + parent, dirName := entry.FullPath.DirAndName() + if parent != f.DirBucketsPath { + return false + } + + f.buckets.RLock() + defer f.buckets.RUnlock() + + _, found := f.buckets.buckets[BucketName(dirName)] + + return found + +} + +func (f *Filer) maybeAddBucket(entry *Entry) { + if !entry.IsDirectory() { + return + } + parent, dirName := entry.FullPath.DirAndName() + if parent != f.DirBucketsPath { + return + } + f.addBucket(dirName, &BucketOption{ + Name: BucketName(dirName), + Replication: entry.Replication, + }) +} + +func (f *Filer) addBucket(buketName string, bucketOption *BucketOption) { + + f.buckets.Lock() + defer f.buckets.Unlock() + + f.buckets.buckets[BucketName(buketName)] = bucketOption + +} + +func (f *Filer) deleteBucket(buketName string) { + + f.buckets.Lock() + defer f.buckets.Unlock() + + delete(f.buckets.buckets, BucketName(buketName)) + +} diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go new file mode 100644 index 000000000..ab5afc5cc --- /dev/null +++ b/weed/filer/filer_conf.go @@ -0,0 +1,149 @@ +package filer + +import ( + "bytes" + "context" + "io" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/jsonpb" + "github.com/viant/ptrie" +) + +const ( + DirectoryEtcRoot = "/etc" + DirectoryEtcSeaweedFS = "/etc/seaweedfs" + FilerConfName = "filer.conf" + IamConfigDirecotry = "/etc/iam" + IamIdentityFile = "identity.json" + IamPoliciesFile = "policies.json" +) + +type FilerConf struct { + rules ptrie.Trie +} + +func NewFilerConf() (fc *FilerConf) { + fc = &FilerConf{ + rules: ptrie.New(), + } + return fc +} + +func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) { + filerConfPath := util.NewFullPath(DirectoryEtcSeaweedFS, FilerConfName) + entry, err := filer.FindEntry(context.Background(), filerConfPath) + if err != nil { + if err == filer_pb.ErrNotFound { + return nil + } + glog.Errorf("read filer conf entry %s: %v", filerConfPath, err) + return + } + + if len(entry.Content) > 0 { + return fc.LoadFromBytes(entry.Content) + } + + return fc.loadFromChunks(filer, entry.Content, entry.Chunks) +} + +func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk) (err error) { + if len(content) == 0 { + content, err = filer.readEntry(chunks) + if err != nil { + glog.Errorf("read filer conf content: %v", err) + return + } + } + + return fc.LoadFromBytes(content) +} + +func (fc *FilerConf) LoadFromBytes(data []byte) (err error) { + conf := &filer_pb.FilerConf{} + + if err := jsonpb.Unmarshal(bytes.NewReader(data), conf); err != nil { + return err + } + + return fc.doLoadConf(conf) +} + +func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) { + for _, location := range conf.Locations { + err = fc.AddLocationConf(location) + if err != nil { + // this is not recoverable + return nil + } + } + return nil +} + +func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) { + err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf) + if err != nil { + glog.Errorf("put location prefix: %v", err) + } + return +} + +func (fc *FilerConf) DeleteLocationConf(locationPrefix string) { + rules := ptrie.New() + fc.rules.Walk(func(key []byte, value interface{}) bool { + if string(key) == locationPrefix { + return true + } + rules.Put(key, value) + return true + }) + fc.rules = rules + return +} + +func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf_PathConf) { + pathConf = &filer_pb.FilerConf_PathConf{} + fc.rules.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool { + t := value.(*filer_pb.FilerConf_PathConf) + mergePathConf(pathConf, t) + return true + }) + return pathConf +} + +// merge if values in b is not empty, merge them into a +func mergePathConf(a, b *filer_pb.FilerConf_PathConf) { + a.Collection = util.Nvl(b.Collection, a.Collection) + a.Replication = util.Nvl(b.Replication, a.Replication) + a.Ttl = util.Nvl(b.Ttl, a.Ttl) + if b.DiskType != "" { + a.DiskType = b.DiskType + } + a.Fsync = b.Fsync || a.Fsync + if b.VolumeGrowthCount > 0 { + a.VolumeGrowthCount = b.VolumeGrowthCount + } +} + +func (fc *FilerConf) ToProto() *filer_pb.FilerConf { + m := &filer_pb.FilerConf{} + fc.rules.Walk(func(key []byte, value interface{}) bool { + pathConf := value.(*filer_pb.FilerConf_PathConf) + m.Locations = append(m.Locations, pathConf) + return true + }) + return m +} + +func (fc *FilerConf) ToText(writer io.Writer) error { + + m := jsonpb.Marshaler{ + EmitDefaults: false, + Indent: " ", + } + + return m.Marshal(writer, fc.ToProto()) +} diff --git a/weed/filer/filer_conf_test.go b/weed/filer/filer_conf_test.go new file mode 100644 index 000000000..ff868a3ec --- /dev/null +++ b/weed/filer/filer_conf_test.go @@ -0,0 +1,34 @@ +package filer + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/stretchr/testify/assert" +) + +func TestFilerConf(t *testing.T) { + + fc := NewFilerConf() + + conf := &filer_pb.FilerConf{Locations: []*filer_pb.FilerConf_PathConf{ + { + LocationPrefix: "/buckets/abc", + Collection: "abc", + }, + { + LocationPrefix: "/buckets/abcd", + Collection: "abcd", + }, + { + LocationPrefix: "/buckets/", + Replication: "001", + }, + }} + fc.doLoadConf(conf) + + assert.Equal(t, "abc", fc.MatchStorageRule("/buckets/abc/jasdf").Collection) + assert.Equal(t, "abcd", fc.MatchStorageRule("/buckets/abcd/jasdf").Collection) + assert.Equal(t, "001", fc.MatchStorageRule("/buckets/abc/jasdf").Replication) + +} diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go new file mode 100644 index 000000000..3ef3cfff9 --- /dev/null +++ b/weed/filer/filer_delete_entry.go @@ -0,0 +1,161 @@ +package filer + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type HardLinkId []byte + +const ( + MsgFailDelNonEmptyFolder = "fail to delete non-empty folder" +) + +func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isFromOtherCluster bool, signatures []int32) (err error) { + if p == "/" { + return nil + } + + entry, findErr := f.FindEntry(ctx, p) + if findErr != nil { + return findErr + } + + isDeleteCollection := f.isBucket(entry) + + var chunks []*filer_pb.FileChunk + var hardLinkIds []HardLinkId + chunks = append(chunks, entry.Chunks...) + if entry.IsDirectory() { + // delete the folder children, not including the folder itself + var dirChunks []*filer_pb.FileChunk + var dirHardLinkIds []HardLinkId + dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isDeleteCollection, isDeleteCollection, isFromOtherCluster, signatures) + if err != nil { + glog.V(0).Infof("delete directory %s: %v", p, err) + return fmt.Errorf("delete directory %s: %v", p, err) + } + chunks = append(chunks, dirChunks...) + hardLinkIds = append(hardLinkIds, dirHardLinkIds...) + } + + // delete the file or folder + err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks, isFromOtherCluster, signatures) + if err != nil { + return fmt.Errorf("delete file %s: %v", p, err) + } + + if shouldDeleteChunks && !isDeleteCollection { + f.DirectDeleteChunks(chunks) + } + // A case not handled: + // what if the chunk is in a different collection? + if shouldDeleteChunks { + f.maybeDeleteHardLinks(hardLinkIds) + } + + if isDeleteCollection { + collectionName := entry.Name() + f.doDeleteCollection(collectionName) + f.deleteBucket(collectionName) + } + + return nil +} + +func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks, isDeletingBucket, isFromOtherCluster bool, signatures []int32) (chunks []*filer_pb.FileChunk, hardlinkIds []HardLinkId, err error) { + + lastFileName := "" + includeLastFile := false + if !isDeletingBucket { + for { + entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "") + if err != nil { + glog.Errorf("list folder %s: %v", entry.FullPath, err) + return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) + } + if lastFileName == "" && !isRecursive && len(entries) > 0 { + // only for first iteration in the loop + glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name()) + return nil, nil, fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath) + } + + for _, sub := range entries { + lastFileName = sub.Name() + var dirChunks []*filer_pb.FileChunk + var dirHardLinkIds []HardLinkId + if sub.IsDirectory() { + subIsDeletingBucket := f.isBucket(sub) + dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks, subIsDeletingBucket, false, nil) + chunks = append(chunks, dirChunks...) + hardlinkIds = append(hardlinkIds, dirHardLinkIds...) + } else { + f.NotifyUpdateEvent(ctx, sub, nil, shouldDeleteChunks, isFromOtherCluster, nil) + if len(sub.HardLinkId) != 0 { + // hard link chunk data are deleted separately + hardlinkIds = append(hardlinkIds, sub.HardLinkId) + } else { + chunks = append(chunks, sub.Chunks...) + } + } + if err != nil && !ignoreRecursiveError { + return nil, nil, err + } + } + + if len(entries) < PaginationSize { + break + } + } + } + + glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks) + + if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { + return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr) + } + + f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures) + + return chunks, hardlinkIds, nil +} + +func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) { + + glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks) + + if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil { + return fmt.Errorf("filer store delete: %v", storeDeletionErr) + } + if !entry.IsDirectory() { + f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures) + } + + return nil +} + +func (f *Filer) doDeleteCollection(collectionName string) (err error) { + + return f.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ + Name: collectionName, + }) + if err != nil { + glog.Infof("delete collection %s: %v", collectionName, err) + } + return err + }) + +} + +func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) { + for _, hardLinkId := range hardLinkIds { + if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil { + glog.Errorf("delete hard link id %d : %v", hardLinkId, err) + } + } +} diff --git a/weed/filer/filer_deletion.go b/weed/filer/filer_deletion.go new file mode 100644 index 000000000..9eee38277 --- /dev/null +++ b/weed/filer/filer_deletion.go @@ -0,0 +1,153 @@ +package filer + +import ( + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +func LookupByMasterClientFn(masterClient *wdclient.MasterClient) func(vids []string) (map[string]operation.LookupResult, error) { + return func(vids []string) (map[string]operation.LookupResult, error) { + m := make(map[string]operation.LookupResult) + for _, vid := range vids { + locs, _ := masterClient.GetVidLocations(vid) + var locations []operation.Location + for _, loc := range locs { + locations = append(locations, operation.Location{ + Url: loc.Url, + PublicUrl: loc.PublicUrl, + }) + } + m[vid] = operation.LookupResult{ + VolumeId: vid, + Locations: locations, + } + } + return m, nil + } +} + +func (f *Filer) loopProcessingDeletion() { + + lookupFunc := LookupByMasterClientFn(f.MasterClient) + + DeletionBatchSize := 100000 // roughly 20 bytes cost per file id. + + var deletionCount int + for { + deletionCount = 0 + f.fileIdDeletionQueue.Consume(func(fileIds []string) { + for len(fileIds) > 0 { + var toDeleteFileIds []string + if len(fileIds) > DeletionBatchSize { + toDeleteFileIds = fileIds[:DeletionBatchSize] + fileIds = fileIds[DeletionBatchSize:] + } else { + toDeleteFileIds = fileIds + fileIds = fileIds[:0] + } + deletionCount = len(toDeleteFileIds) + _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) + if err != nil { + if !strings.Contains(err.Error(), "already deleted") { + glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + } + } else { + glog.V(1).Infof("deleting fileIds len=%d", deletionCount) + } + } + }) + + if deletionCount == 0 { + time.Sleep(1123 * time.Millisecond) + } + } +} + +func (f *Filer) doDeleteFileIds(fileIds []string) { + + lookupFunc := LookupByMasterClientFn(f.MasterClient) + DeletionBatchSize := 100000 // roughly 20 bytes cost per file id. + + for len(fileIds) > 0 { + var toDeleteFileIds []string + if len(fileIds) > DeletionBatchSize { + toDeleteFileIds = fileIds[:DeletionBatchSize] + fileIds = fileIds[DeletionBatchSize:] + } else { + toDeleteFileIds = fileIds + fileIds = fileIds[:0] + } + deletionCount := len(toDeleteFileIds) + _, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc) + if err != nil { + if !strings.Contains(err.Error(), "already deleted") { + glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err) + } + } + } +} + +func (f *Filer) DirectDeleteChunks(chunks []*filer_pb.FileChunk) { + var fildIdsToDelete []string + for _, chunk := range chunks { + if !chunk.IsChunkManifest { + fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString()) + continue + } + dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) + if manifestResolveErr != nil { + glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) + } + for _, dChunk := range dataChunks { + fildIdsToDelete = append(fildIdsToDelete, dChunk.GetFileIdString()) + } + fildIdsToDelete = append(fildIdsToDelete, chunk.GetFileIdString()) + } + + f.doDeleteFileIds(fildIdsToDelete) +} + +func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { + for _, chunk := range chunks { + if !chunk.IsChunkManifest { + f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString()) + continue + } + dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk) + if manifestResolveErr != nil { + glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr) + } + for _, dChunk := range dataChunks { + f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString()) + } + f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString()) + } +} + +func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { + + if oldEntry == nil { + return + } + if newEntry == nil { + f.DeleteChunks(oldEntry.Chunks) + } + + var toDelete []*filer_pb.FileChunk + newChunkIds := make(map[string]bool) + for _, newChunk := range newEntry.Chunks { + newChunkIds[newChunk.GetFileIdString()] = true + } + + for _, oldChunk := range oldEntry.Chunks { + if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found { + toDelete = append(toDelete, oldChunk) + } + } + f.DeleteChunks(toDelete) +} diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go new file mode 100644 index 000000000..7ab101102 --- /dev/null +++ b/weed/filer/filer_notify.go @@ -0,0 +1,185 @@ +package filer + +import ( + "context" + "fmt" + "io" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/notification" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) { + var fullpath string + if oldEntry != nil { + fullpath = string(oldEntry.FullPath) + } else if newEntry != nil { + fullpath = string(newEntry.FullPath) + } else { + return + } + + // println("fullpath:", fullpath) + + if strings.HasPrefix(fullpath, SystemLogDir) { + return + } + foundSelf := false + for _, sig := range signatures { + if sig == f.Signature { + foundSelf = true + } + } + if !foundSelf { + signatures = append(signatures, f.Signature) + } + + newParentPath := "" + if newEntry != nil { + newParentPath, _ = newEntry.FullPath.DirAndName() + } + eventNotification := &filer_pb.EventNotification{ + OldEntry: oldEntry.ToProtoEntry(), + NewEntry: newEntry.ToProtoEntry(), + DeleteChunks: deleteChunks, + NewParentPath: newParentPath, + IsFromOtherCluster: isFromOtherCluster, + Signatures: signatures, + } + + if notification.Queue != nil { + glog.V(3).Infof("notifying entry update %v", fullpath) + if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil { + // throw message + glog.Error(err) + } + } + + f.logMetaEvent(ctx, fullpath, eventNotification) + +} + +func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) { + + dir, _ := util.FullPath(fullpath).DirAndName() + + event := &filer_pb.SubscribeMetadataResponse{ + Directory: dir, + EventNotification: eventNotification, + TsNs: time.Now().UnixNano(), + } + data, err := proto.Marshal(event) + if err != nil { + glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + return + } + + f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs) + +} + +func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) { + + if len(buf) == 0 { + return + } + + startTime, stopTime = startTime.UTC(), stopTime.UTC() + + targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir, + startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), + // startTime.Second(), startTime.Nanosecond(), + ) + + for { + if err := f.appendToFile(targetFile, buf); err != nil { + glog.V(1).Infof("log write failed %s: %v", targetFile, err) + time.Sleep(737 * time.Millisecond) + } else { + break + } + } +} + +func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) { + + startTime = startTime.UTC() + startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day()) + startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute()) + + sizeBuf := make([]byte, 4) + startTsNs := startTime.UnixNano() + + dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "", "") + if listDayErr != nil { + return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr) + } + for _, dayEntry := range dayEntries { + // println("checking day", dayEntry.FullPath) + hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "", "") + if listHourMinuteErr != nil { + return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr) + } + for _, hourMinuteEntry := range hourMinuteEntries { + // println("checking hh-mm", hourMinuteEntry.FullPath) + if dayEntry.Name() == startDate { + if strings.Compare(hourMinuteEntry.Name(), startHourMinute) < 0 { + continue + } + } + // println("processing", hourMinuteEntry.FullPath) + chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks) + if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil { + chunkedFileReader.Close() + if err == io.EOF { + continue + } + return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err) + } + chunkedFileReader.Close() + } + } + + return lastTsNs, nil +} + +func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) { + for { + n, err := r.Read(sizeBuf) + if err != nil { + return lastTsNs, err + } + if n != 4 { + return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n) + } + size := util.BytesToUint32(sizeBuf) + // println("entry size", size) + entryData := make([]byte, size) + n, err = r.Read(entryData) + if err != nil { + return lastTsNs, err + } + if n != int(size) { + return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size) + } + logEntry := &filer_pb.LogEntry{} + if err = proto.Unmarshal(entryData, logEntry); err != nil { + return lastTsNs, err + } + if logEntry.TsNs <= ns { + continue + } + // println("each log: ", logEntry.TsNs) + if err := eachLogEntryFn(logEntry); err != nil { + return lastTsNs, err + } else { + lastTsNs = logEntry.TsNs + } + } +} diff --git a/weed/filer/filer_notify_append.go b/weed/filer/filer_notify_append.go new file mode 100644 index 000000000..d441bbbc9 --- /dev/null +++ b/weed/filer/filer_notify_append.go @@ -0,0 +1,75 @@ +package filer + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (f *Filer) appendToFile(targetFile string, data []byte) error { + + assignResult, uploadResult, err2 := f.assignAndUpload(targetFile, data) + if err2 != nil { + return err2 + } + + // find out existing entry + fullpath := util.FullPath(targetFile) + entry, err := f.FindEntry(context.Background(), fullpath) + var offset int64 = 0 + if err == filer_pb.ErrNotFound { + entry = &Entry{ + FullPath: fullpath, + Attr: Attr{ + Crtime: time.Now(), + Mtime: time.Now(), + Mode: os.FileMode(0644), + Uid: OS_UID, + Gid: OS_GID, + }, + } + } else { + offset = int64(TotalSize(entry.Chunks)) + } + + // append to existing chunks + entry.Chunks = append(entry.Chunks, uploadResult.ToPbFileChunk(assignResult.Fid, offset)) + + // update the entry + err = f.CreateEntry(context.Background(), entry, false, false, nil) + + return err +} + +func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.AssignResult, *operation.UploadResult, error) { + // assign a volume location + rule := f.FilerConf.MatchStorageRule(targetFile) + assignRequest := &operation.VolumeAssignRequest{ + Count: 1, + Collection: util.Nvl(f.metaLogCollection, rule.Collection), + Replication: util.Nvl(f.metaLogReplication, rule.Replication), + WritableVolumeCount: rule.VolumeGrowthCount, + } + + assignResult, err := operation.Assign(f.GetMaster, f.GrpcDialOption, assignRequest) + if err != nil { + return nil, nil, fmt.Errorf("AssignVolume: %v", err) + } + if assignResult.Error != "" { + return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error) + } + + // upload data + targetUrl := "http://" + assignResult.Url + "/" + assignResult.Fid + uploadResult, err := operation.UploadData(targetUrl, "", f.Cipher, data, false, "", nil, assignResult.Auth) + if err != nil { + return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) + } + // println("uploaded to", targetUrl) + return assignResult, uploadResult, nil +} diff --git a/weed/filer2/filer_notify_test.go b/weed/filer/filer_notify_test.go similarity index 90% rename from weed/filer2/filer_notify_test.go rename to weed/filer/filer_notify_test.go index b74e2ad35..6a2be8f18 100644 --- a/weed/filer2/filer_notify_test.go +++ b/weed/filer/filer_notify_test.go @@ -1,17 +1,19 @@ -package filer2 +package filer import ( "testing" "time" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/golang/protobuf/proto" ) func TestProtoMarshalText(t *testing.T) { oldEntry := &Entry{ - FullPath: FullPath("/this/path/to"), + FullPath: util.FullPath("/this/path/to"), Attr: Attr{ Mtime: time.Now(), Mode: 0644, diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go new file mode 100644 index 000000000..a91faeb24 --- /dev/null +++ b/weed/filer/filer_on_meta_event.go @@ -0,0 +1,82 @@ +package filer + +import ( + "bytes" + "math" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// onMetadataChangeEvent is triggered after filer processed change events from local or remote filers +func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse) { + f.maybeReloadFilerConfiguration(event) + f.onBucketEvents(event) +} + +func (f *Filer) onBucketEvents(event *filer_pb.SubscribeMetadataResponse) { + message := event.EventNotification + for _, sig := range message.Signatures { + if sig == f.Signature { + return + } + } + if f.DirBucketsPath == event.Directory { + if message.OldEntry == nil && message.NewEntry != nil { + f.Store.OnBucketCreation(message.NewEntry.Name) + } + if message.OldEntry != nil && message.NewEntry == nil { + f.Store.OnBucketDeletion(message.OldEntry.Name) + } + } +} + +func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataResponse) { + if DirectoryEtcSeaweedFS != event.Directory { + if DirectoryEtcSeaweedFS != event.EventNotification.NewParentPath { + return + } + } + + entry := event.EventNotification.NewEntry + if entry == nil { + return + } + + glog.V(0).Infof("procesing %v", event) + if entry.Name == FilerConfName { + f.reloadFilerConfiguration(entry) + } +} + +func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) { + var buf bytes.Buffer + err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64, false) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) { + fc := NewFilerConf() + err := fc.loadFromChunks(f, entry.Content, entry.Chunks) + if err != nil { + glog.Errorf("read filer conf chunks: %v", err) + return + } + f.FilerConf = fc +} + +func (f *Filer) LoadFilerConf() { + fc := NewFilerConf() + err := util.Retry("loadFilerConf", func() error { + return fc.loadFromFiler(f) + }) + if err != nil { + glog.Errorf("read filer conf: %v", err) + return + } + f.FilerConf = fc +} diff --git a/weed/filer/filer_rename.go b/weed/filer/filer_rename.go new file mode 100644 index 000000000..b6f0cf6de --- /dev/null +++ b/weed/filer/filer_rename.go @@ -0,0 +1,30 @@ +package filer + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" + "strings" +) + +func (f *Filer) CanRename(source, target util.FullPath) error { + sourceBucket := f.DetectBucket(source) + targetBucket := f.DetectBucket(target) + if sourceBucket != targetBucket { + return fmt.Errorf("can not move across collection %s => %s", sourceBucket, targetBucket) + } + return nil +} + +func (f *Filer) DetectBucket(source util.FullPath) (bucket string) { + if strings.HasPrefix(string(source), f.DirBucketsPath+"/") { + bucketAndObjectKey := string(source)[len(f.DirBucketsPath)+1:] + t := strings.Index(bucketAndObjectKey, "/") + if t < 0 { + bucket = bucketAndObjectKey + } + if t > 0 { + bucket = bucketAndObjectKey[:t] + } + } + return bucket +} diff --git a/weed/filer/filer_search.go b/weed/filer/filer_search.go new file mode 100644 index 000000000..f43312cfa --- /dev/null +++ b/weed/filer/filer_search.go @@ -0,0 +1,98 @@ +package filer + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/util" + "path/filepath" + "strings" +) + +func splitPattern(pattern string) (prefix string, restPattern string) { + position := strings.Index(pattern, "*") + if position >= 0 { + return pattern[:position], pattern[position:] + } + position = strings.Index(pattern, "?") + if position >= 0 { + return pattern[:position], pattern[position:] + } + return "", restPattern +} + +// For now, prefix and namePattern are mutually exclusive +func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string) (entries []*Entry, hasMore bool, err error) { + + _, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, namePatternExclude, func(entry *Entry) bool { + entries = append(entries, entry) + return true + }) + + hasMore = int64(len(entries)) >= limit+1 + if hasMore { + entries = entries[:limit] + } + + return entries, hasMore, err +} + +// For now, prefix and namePattern are mutually exclusive +func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { + if strings.HasSuffix(string(p), "/") && len(p) > 1 { + p = p[0 : len(p)-1] + } + + prefixInNamePattern, restNamePattern := splitPattern(namePattern) + if prefixInNamePattern != "" { + prefix = prefixInNamePattern + } + var missedCount int64 + + missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, namePatternExclude, eachEntryFunc) + + for missedCount > 0 && err == nil { + missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, namePatternExclude, eachEntryFunc) + } + + return +} + +func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) { + + if len(restNamePattern) == 0 && len(namePatternExclude) == 0{ + lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc) + return 0, lastFileName, err + } + + lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool { + nameToTest := entry.Name() + if len(namePatternExclude) > 0 { + if matched, matchErr := filepath.Match(namePatternExclude, nameToTest); matchErr == nil && matched { + missedCount++ + return true + } + } + if len(restNamePattern) > 0 { + if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && !matched { + missedCount++ + return true + } + } + if !eachEntryFunc(entry) { + return false + } + return true + }) + if err != nil { + return + } + return +} + +func (f *Filer) doListValidEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { + var expiredCount int64 + expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc) + for expiredCount > 0 && err == nil { + expiredCount, lastFileName, err = f.doListDirectoryEntries(ctx, p, lastFileName, false, expiredCount, prefix, eachEntryFunc) + } + return +} diff --git a/weed/filer/filerstore.go b/weed/filer/filerstore.go new file mode 100644 index 000000000..a5b2f25de --- /dev/null +++ b/weed/filer/filerstore.go @@ -0,0 +1,46 @@ +package filer + +import ( + "context" + "errors" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + ErrUnsupportedListDirectoryPrefixed = errors.New("unsupported directory prefix listing") + ErrUnsupportedSuperLargeDirectoryListing = errors.New("unsupported super large directory listing") + ErrKvNotImplemented = errors.New("kv not implemented yet") + ErrKvNotFound = errors.New("kv: not found") +) + +type ListEachEntryFunc func(entry *Entry) bool + +type FilerStore interface { + // GetName gets the name to locate the configuration in filer.toml file + GetName() string + // Initialize initializes the file store + Initialize(configuration util.Configuration, prefix string) error + InsertEntry(context.Context, *Entry) error + UpdateEntry(context.Context, *Entry) (err error) + // err == filer_pb.ErrNotFound if not found + FindEntry(context.Context, util.FullPath) (entry *Entry, err error) + DeleteEntry(context.Context, util.FullPath) (err error) + DeleteFolderChildren(context.Context, util.FullPath) (err error) + ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) + ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) + + BeginTransaction(ctx context.Context) (context.Context, error) + CommitTransaction(ctx context.Context) error + RollbackTransaction(ctx context.Context) error + + KvPut(ctx context.Context, key []byte, value []byte) (err error) + KvGet(ctx context.Context, key []byte) (value []byte, err error) + KvDelete(ctx context.Context, key []byte) (err error) + + Shutdown() +} + +type BucketAware interface { + OnBucketCreation(bucket string) + OnBucketDeletion(bucket string) +} diff --git a/weed/filer/filerstore_hardlink.go b/weed/filer/filerstore_hardlink.go new file mode 100644 index 000000000..316c76a0c --- /dev/null +++ b/weed/filer/filerstore_hardlink.go @@ -0,0 +1,102 @@ +package filer + +import ( + "bytes" + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry *Entry) error { + if len(entry.HardLinkId) == 0 { + return nil + } + // handle hard links + if err := fsw.setHardLink(ctx, entry); err != nil { + return fmt.Errorf("setHardLink %d: %v", entry.HardLinkId, err) + } + + // check what is existing entry + glog.V(4).Infof("handleUpdateToHardLinks FindEntry %s", entry.FullPath) + actualStore := fsw.getActualStore(entry.FullPath) + existingEntry, err := actualStore.FindEntry(ctx, entry.FullPath) + if err != nil && err != filer_pb.ErrNotFound { + return fmt.Errorf("update existing entry %s: %v", entry.FullPath, err) + } + + // remove old hard link + if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 { + glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath) + if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { + return err + } + } + return nil +} + +func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) error { + if len(entry.HardLinkId) == 0 { + return nil + } + key := entry.HardLinkId + + newBlob, encodeErr := entry.EncodeAttributesAndChunks() + if encodeErr != nil { + return encodeErr + } + + return fsw.KvPut(ctx, key, newBlob) +} + +func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entry) error { + if len(entry.HardLinkId) == 0 { + return nil + } + key := entry.HardLinkId + + glog.V(4).Infof("maybeReadHardLink KvGet %v", key) + value, err := fsw.KvGet(ctx, key) + if err != nil { + glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + return err + } + + if err = entry.DecodeAttributesAndChunks(value); err != nil { + glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err) + return err + } + + return nil +} + +func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId HardLinkId) error { + key := hardLinkId + value, err := fsw.KvGet(ctx, key) + if err == ErrKvNotFound { + return nil + } + if err != nil { + return err + } + + entry := &Entry{} + if err = entry.DecodeAttributesAndChunks(value); err != nil { + return err + } + + entry.HardLinkCounter-- + if entry.HardLinkCounter <= 0 { + glog.V(4).Infof("DeleteHardLink KvDelete %v", key) + return fsw.KvDelete(ctx, key) + } + + newBlob, encodeErr := entry.EncodeAttributesAndChunks() + if encodeErr != nil { + return encodeErr + } + + glog.V(4).Infof("DeleteHardLink KvPut %v", key) + return fsw.KvPut(ctx, key, newBlob) + +} diff --git a/weed/filer/filerstore_translate_path.go b/weed/filer/filerstore_translate_path.go new file mode 100644 index 000000000..00bf82ed4 --- /dev/null +++ b/weed/filer/filerstore_translate_path.go @@ -0,0 +1,153 @@ +package filer + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/util" + "strings" +) + +var ( + _ = FilerStore(&FilerStorePathTranlator{}) +) + +type FilerStorePathTranlator struct { + actualStore FilerStore + storeRoot string +} + +func NewFilerStorePathTranlator(storeRoot string, store FilerStore) *FilerStorePathTranlator { + if innerStore, ok := store.(*FilerStorePathTranlator); ok { + return innerStore + } + + if !strings.HasSuffix(storeRoot, "/") { + storeRoot += "/" + } + + return &FilerStorePathTranlator{ + actualStore: store, + storeRoot: storeRoot, + } +} + +func (t *FilerStorePathTranlator) translatePath(fp util.FullPath) (newPath util.FullPath) { + newPath = fp + if t.storeRoot == "/" { + return + } + newPath = fp[len(t.storeRoot)-1:] + if newPath == "" { + newPath = "/" + } + return +} +func (t *FilerStorePathTranlator) changeEntryPath(entry *Entry) (previousPath util.FullPath) { + previousPath = entry.FullPath + if t.storeRoot == "/" { + return + } + entry.FullPath = t.translatePath(previousPath) + return +} +func (t *FilerStorePathTranlator) recoverEntryPath(entry *Entry, previousPath util.FullPath) { + entry.FullPath = previousPath +} + +func (t *FilerStorePathTranlator) GetName() string { + return t.actualStore.GetName() +} + +func (t *FilerStorePathTranlator) Initialize(configuration util.Configuration, prefix string) error { + return t.actualStore.Initialize(configuration, prefix) +} + +func (t *FilerStorePathTranlator) InsertEntry(ctx context.Context, entry *Entry) error { + previousPath := t.changeEntryPath(entry) + defer t.recoverEntryPath(entry, previousPath) + + return t.actualStore.InsertEntry(ctx, entry) +} + +func (t *FilerStorePathTranlator) UpdateEntry(ctx context.Context, entry *Entry) error { + previousPath := t.changeEntryPath(entry) + defer t.recoverEntryPath(entry, previousPath) + + return t.actualStore.UpdateEntry(ctx, entry) +} + +func (t *FilerStorePathTranlator) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) { + if t.storeRoot == "/" { + return t.actualStore.FindEntry(ctx, fp) + } + newFullPath := t.translatePath(fp) + entry, err = t.actualStore.FindEntry(ctx, newFullPath) + if err == nil { + entry.FullPath = fp[:len(t.storeRoot)-1] + entry.FullPath + } + return +} + +func (t *FilerStorePathTranlator) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { + newFullPath := t.translatePath(fp) + return t.actualStore.DeleteEntry(ctx, newFullPath) +} + +func (t *FilerStorePathTranlator) DeleteOneEntry(ctx context.Context, existingEntry *Entry) (err error) { + + previousPath := t.changeEntryPath(existingEntry) + defer t.recoverEntryPath(existingEntry, previousPath) + + return t.actualStore.DeleteEntry(ctx, existingEntry.FullPath) +} + +func (t *FilerStorePathTranlator) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) { + newFullPath := t.translatePath(fp) + + return t.actualStore.DeleteFolderChildren(ctx, newFullPath) +} + +func (t *FilerStorePathTranlator) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) { + + newFullPath := t.translatePath(dirPath) + + return t.actualStore.ListDirectoryEntries(ctx, newFullPath, startFileName, includeStartFile, limit, func(entry *Entry) bool { + entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath + return eachEntryFunc(entry) + }) +} + +func (t *FilerStorePathTranlator) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (string, error) { + + newFullPath := t.translatePath(dirPath) + + return t.actualStore.ListDirectoryPrefixedEntries(ctx, newFullPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool { + entry.FullPath = dirPath[:len(t.storeRoot)-1] + entry.FullPath + return eachEntryFunc(entry) + }) +} + +func (t *FilerStorePathTranlator) BeginTransaction(ctx context.Context) (context.Context, error) { + return t.actualStore.BeginTransaction(ctx) +} + +func (t *FilerStorePathTranlator) CommitTransaction(ctx context.Context) error { + return t.actualStore.CommitTransaction(ctx) +} + +func (t *FilerStorePathTranlator) RollbackTransaction(ctx context.Context) error { + return t.actualStore.RollbackTransaction(ctx) +} + +func (t *FilerStorePathTranlator) Shutdown() { + t.actualStore.Shutdown() +} + +func (t *FilerStorePathTranlator) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + return t.actualStore.KvPut(ctx, key, value) +} +func (t *FilerStorePathTranlator) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + return t.actualStore.KvGet(ctx, key) +} +func (t *FilerStorePathTranlator) KvDelete(ctx context.Context, key []byte) (err error) { + return t.actualStore.KvDelete(ctx, key) +} diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go new file mode 100644 index 000000000..cd7c0bea3 --- /dev/null +++ b/weed/filer/filerstore_wrapper.go @@ -0,0 +1,322 @@ +package filer + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/viant/ptrie" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + _ = VirtualFilerStore(&FilerStoreWrapper{}) +) + +type VirtualFilerStore interface { + FilerStore + DeleteHardLink(ctx context.Context, hardLinkId HardLinkId) error + DeleteOneEntry(ctx context.Context, entry *Entry) error + AddPathSpecificStore(path string, storeId string, store FilerStore) + OnBucketCreation(bucket string) + OnBucketDeletion(bucket string) +} + +type FilerStoreWrapper struct { + defaultStore FilerStore + pathToStore ptrie.Trie + storeIdToStore map[string]FilerStore +} + +func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper { + if innerStore, ok := store.(*FilerStoreWrapper); ok { + return innerStore + } + return &FilerStoreWrapper{ + defaultStore: store, + pathToStore: ptrie.New(), + storeIdToStore: make(map[string]FilerStore), + } +} + +func (fsw *FilerStoreWrapper) OnBucketCreation(bucket string) { + for _, store := range fsw.storeIdToStore { + if ba, ok := store.(BucketAware); ok { + ba.OnBucketCreation(bucket) + } + } + if ba, ok := fsw.defaultStore.(BucketAware); ok { + ba.OnBucketCreation(bucket) + } +} +func (fsw *FilerStoreWrapper) OnBucketDeletion(bucket string) { + for _, store := range fsw.storeIdToStore { + if ba, ok := store.(BucketAware); ok { + ba.OnBucketDeletion(bucket) + } + } + if ba, ok := fsw.defaultStore.(BucketAware); ok { + ba.OnBucketDeletion(bucket) + } +} + +func (fsw *FilerStoreWrapper) AddPathSpecificStore(path string, storeId string, store FilerStore) { + fsw.storeIdToStore[storeId] = NewFilerStorePathTranlator(path, store) + err := fsw.pathToStore.Put([]byte(path), storeId) + if err != nil { + glog.Fatalf("put path specific store: %v", err) + } +} + +func (fsw *FilerStoreWrapper) getActualStore(path util.FullPath) (store FilerStore) { + store = fsw.defaultStore + if path == "/" { + return + } + var storeId string + fsw.pathToStore.MatchPrefix([]byte(path), func(key []byte, value interface{}) bool { + storeId = value.(string) + return false + }) + if storeId != "" { + store = fsw.storeIdToStore[storeId] + } + return +} + +func (fsw *FilerStoreWrapper) getDefaultStore() (store FilerStore) { + return fsw.defaultStore +} + +func (fsw *FilerStoreWrapper) GetName() string { + return fsw.getDefaultStore().GetName() +} + +func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration, prefix string) error { + return fsw.getDefaultStore().Initialize(configuration, prefix) +} + +func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error { + actualStore := fsw.getActualStore(entry.FullPath) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "insert").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "insert").Observe(time.Since(start).Seconds()) + }() + + filer_pb.BeforeEntrySerialization(entry.Chunks) + if entry.Mime == "application/octet-stream" { + entry.Mime = "" + } + + if err := fsw.handleUpdateToHardLinks(ctx, entry); err != nil { + return err + } + + glog.V(4).Infof("InsertEntry %s", entry.FullPath) + return actualStore.InsertEntry(ctx, entry) +} + +func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error { + actualStore := fsw.getActualStore(entry.FullPath) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "update").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "update").Observe(time.Since(start).Seconds()) + }() + + filer_pb.BeforeEntrySerialization(entry.Chunks) + if entry.Mime == "application/octet-stream" { + entry.Mime = "" + } + + if err := fsw.handleUpdateToHardLinks(ctx, entry); err != nil { + return err + } + + glog.V(4).Infof("UpdateEntry %s", entry.FullPath) + return actualStore.UpdateEntry(ctx, entry) +} + +func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (entry *Entry, err error) { + actualStore := fsw.getActualStore(fp) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "find").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "find").Observe(time.Since(start).Seconds()) + }() + + entry, err = actualStore.FindEntry(ctx, fp) + glog.V(4).Infof("FindEntry %s: %v", fp, err) + if err != nil { + return nil, err + } + + fsw.maybeReadHardLink(ctx, entry) + + filer_pb.AfterEntryDeserialization(entry.Chunks) + return +} + +func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { + actualStore := fsw.getActualStore(fp) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "delete").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "delete").Observe(time.Since(start).Seconds()) + }() + + existingEntry, findErr := fsw.FindEntry(ctx, fp) + if findErr == filer_pb.ErrNotFound { + return nil + } + if len(existingEntry.HardLinkId) != 0 { + // remove hard link + glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath) + if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { + return err + } + } + + glog.V(4).Infof("DeleteEntry %s", fp) + return actualStore.DeleteEntry(ctx, fp) +} + +func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry *Entry) (err error) { + actualStore := fsw.getActualStore(existingEntry.FullPath) + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "delete").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "delete").Observe(time.Since(start).Seconds()) + }() + + if len(existingEntry.HardLinkId) != 0 { + // remove hard link + glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath) + if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil { + return err + } + } + + glog.V(4).Infof("DeleteOneEntry %s", existingEntry.FullPath) + return actualStore.DeleteEntry(ctx, existingEntry.FullPath) +} + +func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp util.FullPath) (err error) { + actualStore := fsw.getActualStore(fp + "/") + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "deleteFolderChildren").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds()) + }() + + glog.V(4).Infof("DeleteFolderChildren %s", fp) + return actualStore.DeleteFolderChildren(ctx, fp) +} + +func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc ListEachEntryFunc) (string, error) { + actualStore := fsw.getActualStore(dirPath + "/") + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "list").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "list").Observe(time.Since(start).Seconds()) + }() + + glog.V(4).Infof("ListDirectoryEntries %s from %s limit %d", dirPath, startFileName, limit) + return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool { + fsw.maybeReadHardLink(ctx, entry) + filer_pb.AfterEntryDeserialization(entry.Chunks) + return eachEntryFunc(entry) + }) +} + +func (fsw *FilerStoreWrapper) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { + actualStore := fsw.getActualStore(dirPath + "/") + stats.FilerStoreCounter.WithLabelValues(actualStore.GetName(), "prefixList").Inc() + start := time.Now() + defer func() { + stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "prefixList").Observe(time.Since(start).Seconds()) + }() + glog.V(4).Infof("ListDirectoryPrefixedEntries %s from %s prefix %s limit %d", dirPath, startFileName, prefix, limit) + lastFileName, err = actualStore.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, eachEntryFunc) + if err == ErrUnsupportedListDirectoryPrefixed { + lastFileName, err = fsw.prefixFilterEntries(ctx, dirPath, startFileName, includeStartFile, limit, prefix, func(entry *Entry) bool { + fsw.maybeReadHardLink(ctx, entry) + filer_pb.AfterEntryDeserialization(entry.Chunks) + return eachEntryFunc(entry) + }) + } + return lastFileName, err +} + +func (fsw *FilerStoreWrapper) prefixFilterEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) { + actualStore := fsw.getActualStore(dirPath + "/") + + if prefix == "" { + return actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, eachEntryFunc) + } + + var notPrefixed []*Entry + lastFileName, err = actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *Entry) bool { + notPrefixed = append(notPrefixed, entry) + return true + }) + if err != nil { + return + } + + count := int64(0) + for count < limit && len(notPrefixed) > 0 { + for _, entry := range notPrefixed { + if strings.HasPrefix(entry.Name(), prefix) { + count++ + if !eachEntryFunc(entry) { + return + } + if count >= limit { + break + } + } + } + if count < limit { + notPrefixed = notPrefixed[:0] + _, err = actualStore.ListDirectoryEntries(ctx, dirPath, lastFileName, false, limit, func(entry *Entry) bool { + notPrefixed = append(notPrefixed, entry) + return true + }) + if err != nil { + return + } + } + } + return +} + +func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) { + return fsw.getDefaultStore().BeginTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error { + return fsw.getDefaultStore().CommitTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error { + return fsw.getDefaultStore().RollbackTransaction(ctx) +} + +func (fsw *FilerStoreWrapper) Shutdown() { + fsw.getDefaultStore().Shutdown() +} + +func (fsw *FilerStoreWrapper) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + return fsw.getDefaultStore().KvPut(ctx, key, value) +} +func (fsw *FilerStoreWrapper) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + return fsw.getDefaultStore().KvGet(ctx, key) +} +func (fsw *FilerStoreWrapper) KvDelete(ctx context.Context, key []byte) (err error) { + return fsw.getDefaultStore().KvDelete(ctx, key) +} diff --git a/weed/filer/hbase/hbase_store.go b/weed/filer/hbase/hbase_store.go new file mode 100644 index 000000000..e0d878ca7 --- /dev/null +++ b/weed/filer/hbase/hbase_store.go @@ -0,0 +1,231 @@ +package hbase + +import ( + "bytes" + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/tsuna/gohbase" + "github.com/tsuna/gohbase/hrpc" + "io" +) + +func init() { + filer.Stores = append(filer.Stores, &HbaseStore{}) +} + +type HbaseStore struct { + Client gohbase.Client + table []byte + cfKv string + cfMetaDir string + column string +} + +func (store *HbaseStore) GetName() string { + return "hbase" +} + +func (store *HbaseStore) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"zkquorum"), + configuration.GetString(prefix+"table"), + ) +} + +func (store *HbaseStore) initialize(zkquorum, table string) (err error) { + store.Client = gohbase.NewClient(zkquorum) + store.table = []byte(table) + store.cfKv = "kv" + store.cfMetaDir = "meta" + store.column = "a" + + // check table exists + key := "whatever" + headers := map[string][]string{store.cfMetaDir: nil} + get, err := hrpc.NewGet(context.Background(), store.table, []byte(key), hrpc.Families(headers)) + if err != nil { + return fmt.Errorf("NewGet returned an error: %v", err) + } + _, err = store.Client.Get(get) + if err != gohbase.TableNotFound { + return nil + } + + // create table + adminClient := gohbase.NewAdminClient(zkquorum) + cFamilies := []string{store.cfKv, store.cfMetaDir} + cf := make(map[string]map[string]string, len(cFamilies)) + for _, f := range cFamilies { + cf[f] = nil + } + ct := hrpc.NewCreateTable(context.Background(), []byte(table), cf) + if err := adminClient.CreateTable(ct); err != nil { + return err + } + + return nil +} + +func (store *HbaseStore) InsertEntry(ctx context.Context, entry *filer.Entry) error { + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + if len(entry.Chunks) > 50 { + value = util.MaybeGzipData(value) + } + + return store.doPut(ctx, store.cfMetaDir, []byte(entry.FullPath), value, entry.TtlSec) +} + +func (store *HbaseStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + return store.InsertEntry(ctx, entry) +} + +func (store *HbaseStore) FindEntry(ctx context.Context, path util.FullPath) (entry *filer.Entry, err error) { + value, err := store.doGet(ctx, store.cfMetaDir, []byte(path)) + if err != nil { + if err == filer.ErrKvNotFound { + return nil, filer_pb.ErrNotFound + } + return nil, err + } + + entry = &filer.Entry{ + FullPath: path, + } + err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + return entry, nil +} + +func (store *HbaseStore) DeleteEntry(ctx context.Context, path util.FullPath) (err error) { + return store.doDelete(ctx, store.cfMetaDir, []byte(path)) +} + +func (store *HbaseStore) DeleteFolderChildren(ctx context.Context, path util.FullPath) (err error) { + + family := map[string][]string{store.cfMetaDir: {COLUMN_NAME}} + expectedPrefix := []byte(path.Child("")) + scan, err := hrpc.NewScanRange(ctx, store.table, expectedPrefix, nil, hrpc.Families(family)) + if err != nil { + return err + } + + scanner := store.Client.Scan(scan) + defer scanner.Close() + for { + res, err := scanner.Next() + if err != nil { + break + } + if len(res.Cells) == 0 { + continue + } + cell := res.Cells[0] + + if !bytes.HasPrefix(cell.Row, expectedPrefix) { + break + } + fullpath := util.FullPath(cell.Row) + dir, _ := fullpath.DirAndName() + if dir != string(path) { + continue + } + + err = store.doDelete(ctx, store.cfMetaDir, cell.Row) + if err != nil { + break + } + + } + return +} + +func (store *HbaseStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (string, error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) +} + +func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + family := map[string][]string{store.cfMetaDir: {COLUMN_NAME}} + expectedPrefix := []byte(dirPath.Child(prefix)) + scan, err := hrpc.NewScanRange(ctx, store.table, expectedPrefix, nil, hrpc.Families(family)) + if err != nil { + return lastFileName, err + } + + scanner := store.Client.Scan(scan) + defer scanner.Close() + for { + res, err := scanner.Next() + if err == io.EOF { + break + } + if err != nil { + return lastFileName, err + } + if len(res.Cells) == 0 { + continue + } + cell := res.Cells[0] + + if !bytes.HasPrefix(cell.Row, expectedPrefix) { + break + } + + fullpath := util.FullPath(cell.Row) + dir, fileName := fullpath.DirAndName() + if dir != string(dirPath) { + continue + } + + value := cell.Value + + if fileName == startFileName && !includeStartFile { + continue + } + + limit-- + if limit < 0 { + break + } + + lastFileName = fileName + + entry := &filer.Entry{ + FullPath: fullpath, + } + if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + if !eachEntryFunc(entry) { + break + } + } + + return lastFileName, nil +} + +func (store *HbaseStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} + +func (store *HbaseStore) CommitTransaction(ctx context.Context) error { + return nil +} + +func (store *HbaseStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *HbaseStore) Shutdown() { + store.Client.Close() +} diff --git a/weed/filer/hbase/hbase_store_kv.go b/weed/filer/hbase/hbase_store_kv.go new file mode 100644 index 000000000..990e55a24 --- /dev/null +++ b/weed/filer/hbase/hbase_store_kv.go @@ -0,0 +1,76 @@ +package hbase + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/tsuna/gohbase/hrpc" + "time" +) + +const ( + COLUMN_NAME = "a" +) + +func (store *HbaseStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + return store.doPut(ctx, store.cfKv, key, value, 0) +} + +func (store *HbaseStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + return store.doGet(ctx, store.cfKv, key) +} + +func (store *HbaseStore) KvDelete(ctx context.Context, key []byte) (err error) { + return store.doDelete(ctx, store.cfKv, key) +} + +func (store *HbaseStore) doPut(ctx context.Context, cf string, key, value []byte, ttlSecond int32) (err error) { + if ttlSecond > 0 { + return store.doPutWithOptions(ctx, cf, key, value, hrpc.Durability(hrpc.AsyncWal), hrpc.TTL(time.Duration(ttlSecond)*time.Second)) + } + return store.doPutWithOptions(ctx, cf, key, value, hrpc.Durability(hrpc.AsyncWal)) +} + +func (store *HbaseStore) doPutWithOptions(ctx context.Context, cf string, key, value []byte, options ...func(hrpc.Call) error) (err error) { + values := map[string]map[string][]byte{cf: map[string][]byte{}} + values[cf][COLUMN_NAME] = value + putRequest, err := hrpc.NewPut(ctx, store.table, key, values, options...) + if err != nil { + return err + } + _, err = store.Client.Put(putRequest) + if err != nil { + return err + } + return nil +} + +func (store *HbaseStore) doGet(ctx context.Context, cf string, key []byte) (value []byte, err error) { + family := map[string][]string{cf: {COLUMN_NAME}} + getRequest, err := hrpc.NewGet(context.Background(), store.table, key, hrpc.Families(family)) + if err != nil { + return nil, err + } + getResp, err := store.Client.Get(getRequest) + if err != nil { + return nil, err + } + if len(getResp.Cells) == 0 { + return nil, filer.ErrKvNotFound + } + + return getResp.Cells[0].Value, nil +} + +func (store *HbaseStore) doDelete(ctx context.Context, cf string, key []byte) (err error) { + values := map[string]map[string][]byte{cf: map[string][]byte{}} + values[cf][COLUMN_NAME] = nil + deleteRequest, err := hrpc.NewDel(ctx, store.table, key, values, hrpc.Durability(hrpc.AsyncWal)) + if err != nil { + return err + } + _, err = store.Client.Delete(deleteRequest) + if err != nil { + return err + } + return nil +} diff --git a/weed/filer2/leveldb/leveldb_store.go b/weed/filer/leveldb/leveldb_store.go similarity index 64% rename from weed/filer2/leveldb/leveldb_store.go rename to weed/filer/leveldb/leveldb_store.go index 4952b3b3a..ce454f36a 100644 --- a/weed/filer2/leveldb/leveldb_store.go +++ b/weed/filer/leveldb/leveldb_store.go @@ -4,13 +4,15 @@ import ( "bytes" "context" "fmt" - "github.com/syndtr/goleveldb/leveldb" + leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + "os" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" ) @@ -19,7 +21,7 @@ const ( ) func init() { - filer2.Stores = append(filer2.Stores, &LevelDBStore{}) + filer.Stores = append(filer.Stores, &LevelDBStore{}) } type LevelDBStore struct { @@ -30,13 +32,14 @@ func (store *LevelDBStore) GetName() string { return "leveldb" } -func (store *LevelDBStore) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir) } func (store *LevelDBStore) initialize(dir string) (err error) { glog.Infof("filer store dir: %s", dir) + os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } @@ -48,8 +51,13 @@ func (store *LevelDBStore) initialize(dir string) (err error) { } if store.db, err = leveldb.OpenFile(dir, opts); err != nil { - glog.Infof("filer store open dir %s: %v", dir, err) - return + if leveldb_errors.IsCorrupted(err) { + store.db, err = leveldb.RecoverFile(dir, opts) + } + if err != nil { + glog.Infof("filer store open dir %s: %v", dir, err) + return + } } return } @@ -64,7 +72,7 @@ func (store *LevelDBStore) RollbackTransaction(ctx context.Context) error { return nil } -func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { key := genKey(entry.DirAndName()) value, err := entry.EncodeAttributesAndChunks() @@ -72,6 +80,10 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } + if len(entry.Chunks) > 50 { + value = weed_util.MaybeGzipData(value) + } + err = store.db.Put(key, value, nil) if err != nil { @@ -83,27 +95,27 @@ func (store *LevelDBStore) InsertEntry(ctx context.Context, entry *filer2.Entry) return nil } -func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *LevelDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { return store.InsertEntry(ctx, entry) } -func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { key := genKey(fullpath.DirAndName()) data, err := store.db.Get(key, nil) if err == leveldb.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { - return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) + return nil, fmt.Errorf("get %s : %v", fullpath, err) } - entry = &filer2.Entry{ + entry = &filer.Entry{ FullPath: fullpath, } - err = entry.DecodeAttributesAndChunks(data) + err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData((data))) if err != nil { return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } @@ -113,7 +125,7 @@ func (store *LevelDBStore) FindEntry(ctx context.Context, fullpath filer2.FullPa return entry, nil } -func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { key := genKey(fullpath.DirAndName()) err = store.db.Delete(key, nil) @@ -124,7 +136,7 @@ func (store *LevelDBStore) DeleteEntry(ctx context.Context, fullpath filer2.Full return nil } -func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { batch := new(leveldb.Batch) @@ -152,12 +164,19 @@ func (store *LevelDBStore) DeleteFolderChildren(ctx context.Context, fullpath fi return nil } -func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { +func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) +} - directoryPrefix := genDirectoryKeyPrefix(fullpath, "") +func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - iter := store.db.NewIterator(&leveldb_util.Range{Start: genDirectoryKeyPrefix(fullpath, startFileName)}, nil) + directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix) + lastFileStart := directoryPrefix + if startFileName != "" { + lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName) + } + + iter := store.db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil) for iter.Next() { key := iter.Key() if !bytes.HasPrefix(key, directoryPrefix) { @@ -167,26 +186,29 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath fi if fileName == "" { continue } - if fileName == startFileName && !inclusive { + if fileName == startFileName && !includeStartFile { continue } limit-- if limit < 0 { break } - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), + lastFileName = fileName + entry := &filer.Entry{ + FullPath: weed_util.NewFullPath(string(dirPath), fileName), } - if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { + if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } - entries = append(entries, entry) + if !eachEntryFunc(entry) { + break + } } iter.Release() - return entries, err + return lastFileName, err } func genKey(dirPath, fileName string) (key []byte) { @@ -196,7 +218,7 @@ func genKey(dirPath, fileName string) (key []byte) { return key } -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { keyPrefix = []byte(string(fullpath)) keyPrefix = append(keyPrefix, DIR_FILE_SEPARATOR) if len(startFileName) > 0 { @@ -215,3 +237,7 @@ func getNameFromKey(key []byte) string { return string(key[sepIndex+1:]) } + +func (store *LevelDBStore) Shutdown() { + store.db.Close() +} diff --git a/weed/filer/leveldb/leveldb_store_kv.go b/weed/filer/leveldb/leveldb_store_kv.go new file mode 100644 index 000000000..f686cbf21 --- /dev/null +++ b/weed/filer/leveldb/leveldb_store_kv.go @@ -0,0 +1,45 @@ +package leveldb + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/syndtr/goleveldb/leveldb" +) + +func (store *LevelDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + err = store.db.Put(key, value, nil) + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *LevelDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + value, err = store.db.Get(key, nil) + + if err == leveldb.ErrNotFound { + return nil, filer.ErrKvNotFound + } + + if err != nil { + return nil, fmt.Errorf("kv get: %v", err) + } + + return +} + +func (store *LevelDBStore) KvDelete(ctx context.Context, key []byte) (err error) { + + err = store.db.Delete(key, nil) + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer/leveldb/leveldb_store_test.go b/weed/filer/leveldb/leveldb_store_test.go new file mode 100644 index 000000000..d437895f5 --- /dev/null +++ b/weed/filer/leveldb/leveldb_store_test.go @@ -0,0 +1,115 @@ +package leveldb + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TestCreateAndFind(t *testing.T) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") + defer os.RemoveAll(dir) + store := &LevelDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") + + ctx := context.Background() + + entry1 := &filer.Entry{ + FullPath: fullpath, + Attr: filer.Attr{ + Mode: 0440, + Uid: 1234, + Gid: 5678, + }, + } + + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil { + t.Errorf("create entry %v: %v", entry1.FullPath, err) + return + } + + entry, err := testFiler.FindEntry(ctx, fullpath) + + if err != nil { + t.Errorf("find entry: %v", err) + return + } + + if entry.FullPath != entry1.FullPath { + t.Errorf("find wrong entry: %v", entry.FullPath) + return + } + + // checking one upper directory + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + + // checking one upper directory + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} + +func TestEmptyRoot(t *testing.T) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") + defer os.RemoveAll(dir) + store := &LevelDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + ctx := context.Background() + + // checking one upper directory + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") + if err != nil { + t.Errorf("list entries: %v", err) + return + } + if len(entries) != 0 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} + +func BenchmarkInsertEntry(b *testing.B) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench") + defer os.RemoveAll(dir) + store := &LevelDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + ctx := context.Background() + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + entry := &filer.Entry{ + FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)), + Attr: filer.Attr{ + Crtime: time.Now(), + Mtime: time.Now(), + Mode: os.FileMode(0644), + }, + } + store.InsertEntry(ctx, entry) + } +} diff --git a/weed/filer2/leveldb2/leveldb2_store.go b/weed/filer/leveldb2/leveldb2_store.go similarity index 68% rename from weed/filer2/leveldb2/leveldb2_store.go rename to weed/filer/leveldb2/leveldb2_store.go index 8a16822ab..124d61c1c 100644 --- a/weed/filer2/leveldb2/leveldb2_store.go +++ b/weed/filer/leveldb2/leveldb2_store.go @@ -5,20 +5,21 @@ import ( "context" "crypto/md5" "fmt" - "io" - "os" - "github.com/syndtr/goleveldb/leveldb" + leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + "io" + "os" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" weed_util "github.com/chrislusf/seaweedfs/weed/util" ) func init() { - filer2.Stores = append(filer2.Stores, &LevelDB2Store{}) + filer.Stores = append(filer.Stores, &LevelDB2Store{}) } type LevelDB2Store struct { @@ -30,13 +31,14 @@ func (store *LevelDB2Store) GetName() string { return "leveldb2" } -func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration) (err error) { - dir := configuration.GetString("dir") +func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") return store.initialize(dir, 8) } func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { glog.Infof("filer store leveldb2 dir: %s", dir) + os.MkdirAll(dir, 0755) if err := weed_util.TestFolderWritable(dir); err != nil { return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) } @@ -51,9 +53,12 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) { dbFolder := fmt.Sprintf("%s/%02d", dir, d) os.MkdirAll(dbFolder, 0755) db, dbErr := leveldb.OpenFile(dbFolder, opts) + if leveldb_errors.IsCorrupted(dbErr) { + db, dbErr = leveldb.RecoverFile(dbFolder, opts) + } if dbErr != nil { glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) - return + return dbErr } store.dbs = append(store.dbs, db) } @@ -72,7 +77,7 @@ func (store *LevelDB2Store) RollbackTransaction(ctx context.Context) error { return nil } -func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { dir, name := entry.DirAndName() key, partitionId := genKey(dir, name, store.dbCount) @@ -81,6 +86,10 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } + if len(entry.Chunks) > 50 { + value = weed_util.MaybeGzipData(value) + } + err = store.dbs[partitionId].Put(key, value, nil) if err != nil { @@ -92,28 +101,28 @@ func (store *LevelDB2Store) InsertEntry(ctx context.Context, entry *filer2.Entry return nil } -func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *LevelDB2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { return store.InsertEntry(ctx, entry) } -func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { dir, name := fullpath.DirAndName() key, partitionId := genKey(dir, name, store.dbCount) data, err := store.dbs[partitionId].Get(key, nil) if err == leveldb.ErrNotFound { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { - return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) + return nil, fmt.Errorf("get %s : %v", fullpath, err) } - entry = &filer2.Entry{ + entry = &filer.Entry{ FullPath: fullpath, } - err = entry.DecodeAttributesAndChunks(data) + err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(data)) if err != nil { return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } @@ -123,7 +132,7 @@ func (store *LevelDB2Store) FindEntry(ctx context.Context, fullpath filer2.FullP return entry, nil } -func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { dir, name := fullpath.DirAndName() key, partitionId := genKey(dir, name, store.dbCount) @@ -135,7 +144,7 @@ func (store *LevelDB2Store) DeleteEntry(ctx context.Context, fullpath filer2.Ful return nil } -func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) batch := new(leveldb.Batch) @@ -163,11 +172,17 @@ func (store *LevelDB2Store) DeleteFolderChildren(ctx context.Context, fullpath f return nil } -func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { +func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) +} + +func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - directoryPrefix, partitionId := genDirectoryKeyPrefix(fullpath, "", store.dbCount) - lastFileStart, _ := genDirectoryKeyPrefix(fullpath, startFileName, store.dbCount) + directoryPrefix, partitionId := genDirectoryKeyPrefix(dirPath, prefix, store.dbCount) + lastFileStart := directoryPrefix + if startFileName != "" { + lastFileStart, _ = genDirectoryKeyPrefix(dirPath, startFileName, store.dbCount) + } iter := store.dbs[partitionId].NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil) for iter.Next() { @@ -179,29 +194,31 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath f if fileName == "" { continue } - if fileName == startFileName && !inclusive { + if fileName == startFileName && !includeStartFile { continue } limit-- if limit < 0 { break } - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), + lastFileName = fileName + entry := &filer.Entry{ + FullPath: weed_util.NewFullPath(string(dirPath), fileName), } // println("list", entry.FullPath, "chunks", len(entry.Chunks)) - - if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { + if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { err = decodeErr glog.V(0).Infof("list %s : %v", entry.FullPath, err) break } - entries = append(entries, entry) + if !eachEntryFunc(entry) { + break + } } iter.Release() - return entries, err + return lastFileName, err } func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) { @@ -210,7 +227,7 @@ func genKey(dirPath, fileName string, dbCount int) (key []byte, partitionId int) return key, partitionId } -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) { +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string, dbCount int) (keyPrefix []byte, partitionId int) { keyPrefix, partitionId = hashToBytes(string(fullpath), dbCount) if len(startFileName) > 0 { keyPrefix = append(keyPrefix, []byte(startFileName)...) @@ -235,3 +252,9 @@ func hashToBytes(dir string, dbCount int) ([]byte, int) { return b, int(x) % dbCount } + +func (store *LevelDB2Store) Shutdown() { + for d := 0; d < store.dbCount; d++ { + store.dbs[d].Close() + } +} diff --git a/weed/filer/leveldb2/leveldb2_store_kv.go b/weed/filer/leveldb2/leveldb2_store_kv.go new file mode 100644 index 000000000..b415d3c32 --- /dev/null +++ b/weed/filer/leveldb2/leveldb2_store_kv.go @@ -0,0 +1,56 @@ +package leveldb + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/syndtr/goleveldb/leveldb" +) + +func (store *LevelDB2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + partitionId := bucketKvKey(key, store.dbCount) + + err = store.dbs[partitionId].Put(key, value, nil) + + if err != nil { + return fmt.Errorf("kv bucket %d put: %v", partitionId, err) + } + + return nil +} + +func (store *LevelDB2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + partitionId := bucketKvKey(key, store.dbCount) + + value, err = store.dbs[partitionId].Get(key, nil) + + if err == leveldb.ErrNotFound { + return nil, filer.ErrKvNotFound + } + + if err != nil { + return nil, fmt.Errorf("kv bucket %d get: %v", partitionId, err) + } + + return +} + +func (store *LevelDB2Store) KvDelete(ctx context.Context, key []byte) (err error) { + + partitionId := bucketKvKey(key, store.dbCount) + + err = store.dbs[partitionId].Delete(key, nil) + + if err != nil { + return fmt.Errorf("kv bucket %d delete: %v", partitionId, err) + } + + return nil +} + +func bucketKvKey(key []byte, dbCount int) (partitionId int) { + return int(key[len(key)-1]) % dbCount +} diff --git a/weed/filer2/leveldb2/leveldb2_store_test.go b/weed/filer/leveldb2/leveldb2_store_test.go similarity index 59% rename from weed/filer2/leveldb2/leveldb2_store_test.go rename to weed/filer/leveldb2/leveldb2_store_test.go index e28ef7dac..fd0ad18a3 100644 --- a/weed/filer2/leveldb2/leveldb2_store_test.go +++ b/weed/filer/leveldb2/leveldb2_store_test.go @@ -2,40 +2,41 @@ package leveldb import ( "context" - "github.com/chrislusf/seaweedfs/weed/filer2" "io/ioutil" "os" "testing" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) store := &LevelDB2Store{} store.initialize(dir, 2) - filer.SetStore(store) - filer.DisableDirectoryCache() + testFiler.SetStore(store) - fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") ctx := context.Background() - entry1 := &filer2.Entry{ + entry1 := &filer.Entry{ FullPath: fullpath, - Attr: filer2.Attr{ + Attr: filer.Attr{ Mode: 0440, Uid: 1234, Gid: 5678, }, } - if err := filer.CreateEntry(ctx, entry1); err != nil { + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } - entry, err := filer.FindEntry(ctx, fullpath) + entry, err := testFiler.FindEntry(ctx, fullpath) if err != nil { t.Errorf("find entry: %v", err) @@ -48,14 +49,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -64,18 +65,17 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) store := &LevelDB2Store{} store.initialize(dir, 2) - filer.SetStore(store) - filer.DisableDirectoryCache() + testFiler.SetStore(store) ctx := context.Background() // checking one upper directory - entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer/leveldb3/leveldb3_store.go b/weed/filer/leveldb3/leveldb3_store.go new file mode 100644 index 000000000..d1cdfbbf6 --- /dev/null +++ b/weed/filer/leveldb3/leveldb3_store.go @@ -0,0 +1,376 @@ +package leveldb + +import ( + "bytes" + "context" + "crypto/md5" + "fmt" + "github.com/syndtr/goleveldb/leveldb" + leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + leveldb_util "github.com/syndtr/goleveldb/leveldb/util" + "io" + "os" + "strings" + "sync" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + DEFAULT = "_main" +) + +func init() { + filer.Stores = append(filer.Stores, &LevelDB3Store{}) +} + +type LevelDB3Store struct { + dir string + dbs map[string]*leveldb.DB + dbsLock sync.RWMutex +} + +func (store *LevelDB3Store) GetName() string { + return "leveldb3" +} + +func (store *LevelDB3Store) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") + return store.initialize(dir) +} + +func (store *LevelDB3Store) initialize(dir string) (err error) { + glog.Infof("filer store leveldb3 dir: %s", dir) + os.MkdirAll(dir, 0755) + if err := weed_util.TestFolderWritable(dir); err != nil { + return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) + } + store.dir = dir + + db, loadDbErr := store.loadDB(DEFAULT) + if loadDbErr != nil { + return loadDbErr + } + store.dbs = make(map[string]*leveldb.DB) + store.dbs[DEFAULT] = db + + return +} + +func (store *LevelDB3Store) loadDB(name string) (*leveldb.DB, error) { + + opts := &opt.Options{ + BlockCacheCapacity: 32 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 16 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 4, + } + if name != DEFAULT { + opts = &opt.Options{ + BlockCacheCapacity: 4 * 1024 * 1024, // default value is 8MiB + WriteBuffer: 2 * 1024 * 1024, // default value is 4MiB + CompactionTableSizeMultiplier: 4, + } + } + + dbFolder := fmt.Sprintf("%s/%s", store.dir, name) + os.MkdirAll(dbFolder, 0755) + db, dbErr := leveldb.OpenFile(dbFolder, opts) + if leveldb_errors.IsCorrupted(dbErr) { + db, dbErr = leveldb.RecoverFile(dbFolder, opts) + } + if dbErr != nil { + glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr) + return nil, dbErr + } + return db, nil +} + +func (store *LevelDB3Store) findDB(fullpath weed_util.FullPath, isForChildren bool) (*leveldb.DB, string, weed_util.FullPath, error) { + + store.dbsLock.RLock() + + defaultDB := store.dbs[DEFAULT] + if !strings.HasPrefix(string(fullpath), "/buckets/") { + store.dbsLock.RUnlock() + return defaultDB, DEFAULT, fullpath, nil + } + + // detect bucket + bucketAndObjectKey := string(fullpath)[len("/buckets/"):] + t := strings.Index(bucketAndObjectKey, "/") + if t < 0 && !isForChildren { + store.dbsLock.RUnlock() + return defaultDB, DEFAULT, fullpath, nil + } + bucket := bucketAndObjectKey + shortPath := weed_util.FullPath("/") + if t > 0 { + bucket = bucketAndObjectKey[:t] + shortPath = weed_util.FullPath(bucketAndObjectKey[t:]) + } + + if db, found := store.dbs[bucket]; found { + store.dbsLock.RUnlock() + return db, bucket, shortPath, nil + } + + store.dbsLock.RUnlock() + // upgrade to write lock + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + // double check after getting the write lock + if db, found := store.dbs[bucket]; found { + return db, bucket, shortPath, nil + } + + // create db + db, err := store.loadDB(bucket) + if err != nil { + return nil, bucket, shortPath, err + } + store.dbs[bucket] = db + + return db, bucket, shortPath, nil +} + +func (store *LevelDB3Store) closeDB(bucket string) { + + store.dbsLock.Lock() + defer store.dbsLock.Unlock() + + if db, found := store.dbs[bucket]; found { + db.Close() + delete(store.dbs, bucket) + } + +} + +func (store *LevelDB3Store) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *LevelDB3Store) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *LevelDB3Store) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *LevelDB3Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + + db, _, shortPath, err := store.findDB(entry.FullPath, false) + if err != nil { + return fmt.Errorf("findDB %s : %v", entry.FullPath, err) + } + + dir, name := shortPath.DirAndName() + key := genKey(dir, name) + + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + if len(entry.Chunks) > 50 { + value = weed_util.MaybeGzipData(value) + } + + err = db.Put(key, value, nil) + + if err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) + } + + // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) + + return nil +} + +func (store *LevelDB3Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *LevelDB3Store) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { + + db, _, shortPath, err := store.findDB(fullpath, false) + if err != nil { + return nil, fmt.Errorf("findDB %s : %v", fullpath, err) + } + + dir, name := shortPath.DirAndName() + key := genKey(dir, name) + + data, err := db.Get(key, nil) + + if err == leveldb.ErrNotFound { + return nil, filer_pb.ErrNotFound + } + if err != nil { + return nil, fmt.Errorf("get %s : %v", fullpath, err) + } + + entry = &filer.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(data)) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) + + return entry, nil +} + +func (store *LevelDB3Store) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { + + db, _, shortPath, err := store.findDB(fullpath, false) + if err != nil { + return fmt.Errorf("findDB %s : %v", fullpath, err) + } + + dir, name := shortPath.DirAndName() + key := genKey(dir, name) + + err = db.Delete(key, nil) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *LevelDB3Store) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { + + db, bucket, shortPath, err := store.findDB(fullpath, true) + if err != nil { + return fmt.Errorf("findDB %s : %v", fullpath, err) + } + + if bucket != DEFAULT && shortPath == "/" { + store.closeDB(bucket) + if bucket != "" { // just to make sure + os.RemoveAll(store.dir + "/" + bucket) + } + return nil + } + + directoryPrefix := genDirectoryKeyPrefix(shortPath, "") + + batch := new(leveldb.Batch) + + iter := db.NewIterator(&leveldb_util.Range{Start: directoryPrefix}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + batch.Delete(append(directoryPrefix, []byte(fileName)...)) + } + iter.Release() + + err = db.Write(batch, nil) + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *LevelDB3Store) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) +} + +func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + db, _, shortPath, err := store.findDB(dirPath, true) + if err != nil { + return lastFileName, fmt.Errorf("findDB %s : %v", dirPath, err) + } + + directoryPrefix := genDirectoryKeyPrefix(shortPath, prefix) + lastFileStart := directoryPrefix + if startFileName != "" { + lastFileStart = genDirectoryKeyPrefix(shortPath, startFileName) + } + + iter := db.NewIterator(&leveldb_util.Range{Start: lastFileStart}, nil) + for iter.Next() { + key := iter.Key() + if !bytes.HasPrefix(key, directoryPrefix) { + break + } + fileName := getNameFromKey(key) + if fileName == "" { + continue + } + if fileName == startFileName && !includeStartFile { + continue + } + limit-- + if limit < 0 { + break + } + lastFileName = fileName + entry := &filer.Entry{ + FullPath: weed_util.NewFullPath(string(dirPath), fileName), + } + + // println("list", entry.FullPath, "chunks", len(entry.Chunks)) + if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + if !eachEntryFunc(entry) { + break + } + } + iter.Release() + + return lastFileName, err +} + +func genKey(dirPath, fileName string) (key []byte) { + key = hashToBytes(dirPath) + key = append(key, []byte(fileName)...) + return key +} + +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { + keyPrefix = hashToBytes(string(fullpath)) + if len(startFileName) > 0 { + keyPrefix = append(keyPrefix, []byte(startFileName)...) + } + return keyPrefix +} + +func getNameFromKey(key []byte) string { + + return string(key[md5.Size:]) + +} + +// hash directory +func hashToBytes(dir string) []byte { + h := md5.New() + io.WriteString(h, dir) + b := h.Sum(nil) + return b +} + +func (store *LevelDB3Store) Shutdown() { + for _, db := range store.dbs { + db.Close() + } +} diff --git a/weed/filer/leveldb3/leveldb3_store_kv.go b/weed/filer/leveldb3/leveldb3_store_kv.go new file mode 100644 index 000000000..18d782b80 --- /dev/null +++ b/weed/filer/leveldb3/leveldb3_store_kv.go @@ -0,0 +1,46 @@ +package leveldb + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/syndtr/goleveldb/leveldb" +) + +func (store *LevelDB3Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + err = store.dbs[DEFAULT].Put(key, value, nil) + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *LevelDB3Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + value, err = store.dbs[DEFAULT].Get(key, nil) + + if err == leveldb.ErrNotFound { + return nil, filer.ErrKvNotFound + } + + if err != nil { + return nil, fmt.Errorf("kv get: %v", err) + } + + return +} + +func (store *LevelDB3Store) KvDelete(ctx context.Context, key []byte) (err error) { + + err = store.dbs[DEFAULT].Delete(key, nil) + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer2/leveldb/leveldb_store_test.go b/weed/filer/leveldb3/leveldb3_store_test.go similarity index 56% rename from weed/filer2/leveldb/leveldb_store_test.go rename to weed/filer/leveldb3/leveldb3_store_test.go index 904de8c97..0b970a539 100644 --- a/weed/filer2/leveldb/leveldb_store_test.go +++ b/weed/filer/leveldb3/leveldb3_store_test.go @@ -2,40 +2,41 @@ package leveldb import ( "context" - "github.com/chrislusf/seaweedfs/weed/filer2" "io/ioutil" "os" "testing" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" ) func TestCreateAndFind(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") defer os.RemoveAll(dir) - store := &LevelDBStore{} + store := &LevelDB3Store{} store.initialize(dir) - filer.SetStore(store) - filer.DisableDirectoryCache() + testFiler.SetStore(store) - fullpath := filer2.FullPath("/home/chris/this/is/one/file1.jpg") + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") ctx := context.Background() - entry1 := &filer2.Entry{ + entry1 := &filer.Entry{ FullPath: fullpath, - Attr: filer2.Attr{ + Attr: filer.Attr{ Mode: 0440, Uid: 1234, Gid: 5678, }, } - if err := filer.CreateEntry(ctx, entry1); err != nil { + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil { t.Errorf("create entry %v: %v", entry1.FullPath, err) return } - entry, err := filer.FindEntry(ctx, fullpath) + entry, err := testFiler.FindEntry(ctx, fullpath) if err != nil { t.Errorf("find entry: %v", err) @@ -48,14 +49,14 @@ func TestCreateAndFind(t *testing.T) { } // checking one upper directory - entries, _ := filer.ListDirectoryEntries(ctx, filer2.FullPath("/home/chris/this/is/one"), "", false, 100) + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return } // checking one upper directory - entries, _ = filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if len(entries) != 1 { t.Errorf("list entries count: %v", len(entries)) return @@ -64,18 +65,17 @@ func TestCreateAndFind(t *testing.T) { } func TestEmptyRoot(t *testing.T) { - filer := filer2.NewFiler(nil, nil) + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") defer os.RemoveAll(dir) - store := &LevelDBStore{} + store := &LevelDB3Store{} store.initialize(dir) - filer.SetStore(store) - filer.DisableDirectoryCache() + testFiler.SetStore(store) ctx := context.Background() // checking one upper directory - entries, err := filer.ListDirectoryEntries(ctx, filer2.FullPath("/"), "", false, 100) + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") if err != nil { t.Errorf("list entries: %v", err) return diff --git a/weed/filer/meta_aggregator.go b/weed/filer/meta_aggregator.go new file mode 100644 index 000000000..5c368a57e --- /dev/null +++ b/weed/filer/meta_aggregator.go @@ -0,0 +1,213 @@ +package filer + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" +) + +type MetaAggregator struct { + filers []string + grpcDialOption grpc.DialOption + MetaLogBuffer *log_buffer.LogBuffer + // notifying clients + ListenersLock sync.Mutex + ListenersCond *sync.Cond +} + +// MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk. +// The old data comes from what each LocalMetadata persisted on disk. +func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAggregator { + t := &MetaAggregator{ + filers: filers, + grpcDialOption: grpcDialOption, + } + t.ListenersCond = sync.NewCond(&t.ListenersLock) + t.MetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, nil, func() { + t.ListenersCond.Broadcast() + }) + return t +} + +func (ma *MetaAggregator) StartLoopSubscribe(f *Filer, self string) { + for _, filer := range ma.filers { + go ma.subscribeToOneFiler(f, self, filer) + } +} + +func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string) { + + /* + Each filer reads the "filer.store.id", which is the store's signature when filer starts. + + When reading from other filers' local meta changes: + * if the received change does not contain signature from self, apply the change to current filer store. + + Upon connecting to other filers, need to remember their signature and their offsets. + + */ + + var maybeReplicateMetadataChange func(*filer_pb.SubscribeMetadataResponse) + lastPersistTime := time.Now() + lastTsNs := time.Now().Add(-LogFlushInterval).UnixNano() + + peerSignature, err := ma.readFilerStoreSignature(peer) + for err != nil { + glog.V(0).Infof("connecting to peer filer %s: %v", peer, err) + time.Sleep(1357 * time.Millisecond) + peerSignature, err = ma.readFilerStoreSignature(peer) + } + + // when filer store is not shared by multiple filers + if peerSignature != f.Signature { + if prevTsNs, err := ma.readOffset(f, peer, peerSignature); err == nil { + lastTsNs = prevTsNs + } + + glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs) + var counter int64 + var synced bool + maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) { + if err := Replay(f.Store, event); err != nil { + glog.Errorf("failed to reply metadata change from %v: %v", peer, err) + return + } + counter++ + if lastPersistTime.Add(time.Minute).Before(time.Now()) { + if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil { + if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() { + glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0) + } else if !synced { + synced = true + glog.V(0).Infof("synced with %s", peer) + } + lastPersistTime = time.Now() + counter = 0 + } else { + glog.V(0).Infof("failed to update offset for %v: %v", peer, err) + } + } + } + } + + processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error { + data, err := proto.Marshal(event) + if err != nil { + glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err) + return err + } + dir := event.Directory + // println("received meta change", dir, "size", len(data)) + ma.MetaLogBuffer.AddToBuffer([]byte(dir), data, 0) + if maybeReplicateMetadataChange != nil { + maybeReplicateMetadataChange(event) + } + return nil + } + + for { + err := pb.WithFilerClient(peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.SubscribeLocalMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "filer:" + self, + PathPrefix: "/", + SinceNs: lastTsNs, + }) + if err != nil { + return fmt.Errorf("subscribe: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + return fmt.Errorf("process %v: %v", resp, err) + } + lastTsNs = resp.TsNs + + f.onMetadataChangeEvent(resp) + + } + }) + if err != nil { + glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err) + time.Sleep(1733 * time.Millisecond) + } + } +} + +func (ma *MetaAggregator) readFilerStoreSignature(peer string) (sig int32, err error) { + err = pb.WithFilerClient(peer, ma.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return err + } + sig = resp.Signature + return nil + }) + return +} + +const ( + MetaOffsetPrefix = "Meta" +) + +func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32) (lastTsNs int64, err error) { + + key := []byte(MetaOffsetPrefix + "xxxx") + util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature)) + + value, err := f.Store.KvGet(context.Background(), key) + + if err == ErrKvNotFound { + glog.Warningf("readOffset %s not found", peer) + return 0, nil + } + + if err != nil { + return 0, fmt.Errorf("readOffset %s : %v", peer, err) + } + + lastTsNs = int64(util.BytesToUint64(value)) + + glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs) + + return +} + +func (ma *MetaAggregator) updateOffset(f *Filer, peer string, peerSignature int32, lastTsNs int64) (err error) { + + key := []byte(MetaOffsetPrefix + "xxxx") + util.Uint32toBytes(key[len(MetaOffsetPrefix):], uint32(peerSignature)) + + value := make([]byte, 8) + util.Uint64toBytes(value, uint64(lastTsNs)) + + err = f.Store.KvPut(context.Background(), key, value) + + if err != nil { + return fmt.Errorf("updateOffset %s : %v", peer, err) + } + + glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs) + + return +} diff --git a/weed/filer/meta_replay.go b/weed/filer/meta_replay.go new file mode 100644 index 000000000..feb76278b --- /dev/null +++ b/weed/filer/meta_replay.go @@ -0,0 +1,37 @@ +package filer + +import ( + "context" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) error { + message := resp.EventNotification + var oldPath util.FullPath + var newEntry *Entry + if message.OldEntry != nil { + oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name) + glog.V(4).Infof("deleting %v", oldPath) + if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil { + return err + } + } + + if message.NewEntry != nil { + dir := resp.Directory + if message.NewParentPath != "" { + dir = message.NewParentPath + } + key := util.NewFullPath(dir, message.NewEntry.Name) + glog.V(4).Infof("creating %v", key) + newEntry = FromPbEntry(dir, message.NewEntry) + if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil { + return err + } + } + + return nil +} diff --git a/weed/filer/mongodb/mongodb_store.go b/weed/filer/mongodb/mongodb_store.go new file mode 100644 index 000000000..1ef5056f4 --- /dev/null +++ b/weed/filer/mongodb/mongodb_store.go @@ -0,0 +1,229 @@ +package mongodb + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/x/bsonx" + "time" +) + +func init() { + filer.Stores = append(filer.Stores, &MongodbStore{}) +} + +type MongodbStore struct { + connect *mongo.Client + database string + collectionName string +} + +type Model struct { + Directory string `bson:"directory"` + Name string `bson:"name"` + Meta []byte `bson:"meta"` +} + +func (store *MongodbStore) GetName() string { + return "mongodb" +} + +func (store *MongodbStore) Initialize(configuration util.Configuration, prefix string) (err error) { + store.database = configuration.GetString(prefix + "database") + store.collectionName = "filemeta" + poolSize := configuration.GetInt(prefix + "option_pool_size") + return store.connection(configuration.GetString(prefix+"uri"), uint64(poolSize)) +} + +func (store *MongodbStore) connection(uri string, poolSize uint64) (err error) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + opts := options.Client().ApplyURI(uri) + + if poolSize > 0 { + opts.SetMaxPoolSize(poolSize) + } + + client, err := mongo.Connect(ctx, opts) + if err != nil { + return err + } + + c := client.Database(store.database).Collection(store.collectionName) + err = store.indexUnique(c) + store.connect = client + return err +} + +func (store *MongodbStore) createIndex(c *mongo.Collection, index mongo.IndexModel, opts *options.CreateIndexesOptions) error { + _, err := c.Indexes().CreateOne(context.Background(), index, opts) + return err +} + +func (store *MongodbStore) indexUnique(c *mongo.Collection) error { + opts := options.CreateIndexes().SetMaxTime(10 * time.Second) + + unique := new(bool) + *unique = true + + index := mongo.IndexModel{ + Keys: bsonx.Doc{{Key: "directory", Value: bsonx.Int32(1)}, {Key: "name", Value: bsonx.Int32(1)}}, + Options: &options.IndexOptions{ + Unique: unique, + }, + } + + return store.createIndex(c, index, opts) +} + +func (store *MongodbStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} + +func (store *MongodbStore) CommitTransaction(ctx context.Context) error { + return nil +} + +func (store *MongodbStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *MongodbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + + return store.UpdateEntry(ctx, entry) + +} + +func (store *MongodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + dir, name := entry.FullPath.DirAndName() + meta, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encode %s: %s", entry.FullPath, err) + } + + if len(entry.Chunks) > 50 { + meta = util.MaybeGzipData(meta) + } + + c := store.connect.Database(store.database).Collection(store.collectionName) + + opts := options.Update().SetUpsert(true) + filter := bson.D{{"directory", dir}, {"name", name}} + update := bson.D{{"$set", bson.D{{"meta", meta}}}} + + _, err = c.UpdateOne(ctx, filter, update, opts) + + if err != nil { + return fmt.Errorf("UpdateEntry %s: %v", entry.FullPath, err) + } + + return nil +} + +func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { + + dir, name := fullpath.DirAndName() + var data Model + + var where = bson.M{"directory": dir, "name": name} + err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) + if err != mongo.ErrNoDocuments && err != nil { + glog.Errorf("find %s: %v", fullpath, err) + return nil, filer_pb.ErrNotFound + } + + if len(data.Meta) == 0 { + return nil, filer_pb.ErrNotFound + } + + entry = &filer.Entry{ + FullPath: fullpath, + } + + err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *MongodbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error { + + dir, name := fullpath.DirAndName() + + where := bson.M{"directory": dir, "name": name} + _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *MongodbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) error { + + where := bson.M{"directory": fullpath} + _, err := store.connect.Database(store.database).Collection(store.collectionName).DeleteMany(ctx, where) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + var where = bson.M{"directory": string(dirPath), "name": bson.M{"$gt": startFileName}} + if includeStartFile { + where["name"] = bson.M{ + "$gte": startFileName, + } + } + optLimit := int64(limit) + opts := &options.FindOptions{Limit: &optLimit, Sort: bson.M{"name": 1}} + cur, err := store.connect.Database(store.database).Collection(store.collectionName).Find(ctx, where, opts) + for cur.Next(ctx) { + var data Model + err := cur.Decode(&data) + if err != nil && err != mongo.ErrNoDocuments { + return lastFileName, err + } + + entry := &filer.Entry{ + FullPath: util.NewFullPath(string(dirPath), data.Name), + } + lastFileName = data.Name + if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + break + } + + if !eachEntryFunc(entry) { + break + } + + } + + if err := cur.Close(ctx); err != nil { + glog.V(0).Infof("list iterator close: %v", err) + } + + return lastFileName, err +} + +func (store *MongodbStore) Shutdown() { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + store.connect.Disconnect(ctx) +} diff --git a/weed/filer/mongodb/mongodb_store_kv.go b/weed/filer/mongodb/mongodb_store_kv.go new file mode 100644 index 000000000..4aa9c3a33 --- /dev/null +++ b/weed/filer/mongodb/mongodb_store_kv.go @@ -0,0 +1,72 @@ +package mongodb + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" +) + +func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + dir, name := genDirAndName(key) + + c := store.connect.Database(store.database).Collection(store.collectionName) + + _, err = c.InsertOne(ctx, Model{ + Directory: dir, + Name: name, + Meta: value, + }) + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + dir, name := genDirAndName(key) + + var data Model + + var where = bson.M{"directory": dir, "name": name} + err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data) + if err != mongo.ErrNoDocuments && err != nil { + glog.Errorf("kv get: %v", err) + return nil, filer.ErrKvNotFound + } + + if len(data.Meta) == 0 { + return nil, filer.ErrKvNotFound + } + + return data.Meta, nil +} + +func (store *MongodbStore) KvDelete(ctx context.Context, key []byte) (err error) { + + dir, name := genDirAndName(key) + + where := bson.M{"directory": dir, "name": name} + _, err = store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where) + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} + +func genDirAndName(key []byte) (dir string, name string) { + for len(key) < 8 { + key = append(key, 0) + } + + dir = string(key[:8]) + name = string(key[8:]) + + return +} diff --git a/weed/filer/mysql/mysql_sql_gen.go b/weed/filer/mysql/mysql_sql_gen.go new file mode 100644 index 000000000..93d3e3f9e --- /dev/null +++ b/weed/filer/mysql/mysql_sql_gen.go @@ -0,0 +1,58 @@ +package mysql + +import ( + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + _ "github.com/go-sql-driver/mysql" +) + +type SqlGenMysql struct { + CreateTableSqlTemplate string + DropTableSqlTemplate string + UpsertQueryTemplate string +} + +var ( + _ = abstract_sql.SqlGenerator(&SqlGenMysql{}) +) + +func (gen *SqlGenMysql) GetSqlInsert(tableName string) string { + if gen.UpsertQueryTemplate != "" { + return fmt.Sprintf(gen.UpsertQueryTemplate, tableName) + } else { + return fmt.Sprintf("INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?)", tableName) + } +} + +func (gen *SqlGenMysql) GetSqlUpdate(tableName string) string { + return fmt.Sprintf("UPDATE `%s` SET meta=? WHERE dirhash=? AND name=? AND directory=?", tableName) +} + +func (gen *SqlGenMysql) GetSqlFind(tableName string) string { + return fmt.Sprintf("SELECT meta FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName) +} + +func (gen *SqlGenMysql) GetSqlDelete(tableName string) string { + return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND name=? AND directory=?", tableName) +} + +func (gen *SqlGenMysql) GetSqlDeleteFolderChildren(tableName string) string { + return fmt.Sprintf("DELETE FROM `%s` WHERE dirhash=? AND directory=?", tableName) +} + +func (gen *SqlGenMysql) GetSqlListExclusive(tableName string) string { + return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName) +} + +func (gen *SqlGenMysql) GetSqlListInclusive(tableName string) string { + return fmt.Sprintf("SELECT NAME, meta FROM `%s` WHERE dirhash=? AND name>=? AND directory=? AND name like ? ORDER BY NAME ASC LIMIT ?", tableName) +} + +func (gen *SqlGenMysql) GetSqlCreateTable(tableName string) string { + return fmt.Sprintf(gen.CreateTableSqlTemplate, tableName) +} + +func (gen *SqlGenMysql) GetSqlDropTable(tableName string) string { + return fmt.Sprintf(gen.DropTableSqlTemplate, tableName) +} diff --git a/weed/filer/mysql/mysql_store.go b/weed/filer/mysql/mysql_store.go new file mode 100644 index 000000000..fbaa4d5f9 --- /dev/null +++ b/weed/filer/mysql/mysql_store.go @@ -0,0 +1,84 @@ +package mysql + +import ( + "database/sql" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/util" + _ "github.com/go-sql-driver/mysql" +) + +const ( + CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" +) + +func init() { + filer.Stores = append(filer.Stores, &MysqlStore{}) +} + +type MysqlStore struct { + abstract_sql.AbstractSqlStore +} + +func (store *MysqlStore) GetName() string { + return "mysql" +} + +func (store *MysqlStore) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetInt(prefix+"connection_max_lifetime_seconds"), + configuration.GetBool(prefix+"interpolateParams"), + ) +} + +func (store *MysqlStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database string, maxIdle, maxOpen, + maxLifetimeSeconds int, interpolateParams bool) (err error) { + + store.SupportBucketTable = false + if !enableUpsert { + upsertQuery = "" + } + store.SqlGenerator = &SqlGenMysql{ + CreateTableSqlTemplate: "", + DropTableSqlTemplate: "drop table `%s`", + UpsertQueryTemplate: upsertQuery, + } + + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) + adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "", hostname, port, database) + if interpolateParams { + sqlUrl += "&interpolateParams=true" + adaptedSqlUrl += "&interpolateParams=true" + } + + var dbErr error + store.DB, dbErr = sql.Open("mysql", sqlUrl) + if dbErr != nil { + store.DB.Close() + store.DB = nil + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) + } + + store.DB.SetMaxIdleConns(maxIdle) + store.DB.SetMaxOpenConns(maxOpen) + store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) + + if err = store.DB.Ping(); err != nil { + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) + } + + return nil +} diff --git a/weed/filer/mysql2/mysql2_store.go b/weed/filer/mysql2/mysql2_store.go new file mode 100644 index 000000000..a1f54455a --- /dev/null +++ b/weed/filer/mysql2/mysql2_store.go @@ -0,0 +1,90 @@ +package mysql2 + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/filer/mysql" + "github.com/chrislusf/seaweedfs/weed/util" + _ "github.com/go-sql-driver/mysql" +) + +const ( + CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" +) + +func init() { + filer.Stores = append(filer.Stores, &MysqlStore2{}) +} + +type MysqlStore2 struct { + abstract_sql.AbstractSqlStore +} + +func (store *MysqlStore2) GetName() string { + return "mysql2" +} + +func (store *MysqlStore2) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"createTable"), + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetInt(prefix+"connection_max_lifetime_seconds"), + configuration.GetBool(prefix+"interpolateParams"), + ) +} + +func (store *MysqlStore2) initialize(createTable, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database string, maxIdle, maxOpen, + maxLifetimeSeconds int, interpolateParams bool) (err error) { + + store.SupportBucketTable = true + if !enableUpsert { + upsertQuery = "" + } + store.SqlGenerator = &mysql.SqlGenMysql{ + CreateTableSqlTemplate: createTable, + DropTableSqlTemplate: "drop table `%s`", + UpsertQueryTemplate: upsertQuery, + } + + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) + adaptedSqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, "", hostname, port, database) + if interpolateParams { + sqlUrl += "&interpolateParams=true" + adaptedSqlUrl += "&interpolateParams=true" + } + + var dbErr error + store.DB, dbErr = sql.Open("mysql", sqlUrl) + if dbErr != nil { + store.DB.Close() + store.DB = nil + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) + } + + store.DB.SetMaxIdleConns(maxIdle) + store.DB.SetMaxOpenConns(maxOpen) + store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) + + if err = store.DB.Ping(); err != nil { + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) + } + + if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil { + return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err) + } + + return nil +} diff --git a/weed/filer2/permission.go b/weed/filer/permission.go similarity index 95% rename from weed/filer2/permission.go rename to weed/filer/permission.go index 8a9508fbc..0d8b8292b 100644 --- a/weed/filer2/permission.go +++ b/weed/filer/permission.go @@ -1,4 +1,4 @@ -package filer2 +package filer func hasWritePermission(dir *Entry, entry *Entry) bool { diff --git a/weed/filer2/postgres/README.txt b/weed/filer/postgres/README.txt similarity index 100% rename from weed/filer2/postgres/README.txt rename to weed/filer/postgres/README.txt diff --git a/weed/filer/postgres/postgres_sql_gen.go b/weed/filer/postgres/postgres_sql_gen.go new file mode 100644 index 000000000..6cee3d2da --- /dev/null +++ b/weed/filer/postgres/postgres_sql_gen.go @@ -0,0 +1,58 @@ +package postgres + +import ( + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + _ "github.com/lib/pq" +) + +type SqlGenPostgres struct { + CreateTableSqlTemplate string + DropTableSqlTemplate string + UpsertQueryTemplate string +} + +var ( + _ = abstract_sql.SqlGenerator(&SqlGenPostgres{}) +) + +func (gen *SqlGenPostgres) GetSqlInsert(tableName string) string { + if gen.UpsertQueryTemplate != "" { + return fmt.Sprintf(gen.UpsertQueryTemplate, tableName) + } else { + return fmt.Sprintf(`INSERT INTO "%s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)`, tableName) + } +} + +func (gen *SqlGenPostgres) GetSqlUpdate(tableName string) string { + return fmt.Sprintf(`UPDATE "%s" SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlFind(tableName string) string { + return fmt.Sprintf(`SELECT meta FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlDelete(tableName string) string { + return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND name=$2 AND directory=$3`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlDeleteFolderChildren(tableName string) string { + return fmt.Sprintf(`DELETE FROM "%s" WHERE dirhash=$1 AND directory=$2`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlListExclusive(tableName string) string { + return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlListInclusive(tableName string) string { + return fmt.Sprintf(`SELECT NAME, meta FROM "%s" WHERE dirhash=$1 AND name>=$2 AND directory=$3 AND name like $4 ORDER BY NAME ASC LIMIT $5`, tableName) +} + +func (gen *SqlGenPostgres) GetSqlCreateTable(tableName string) string { + return fmt.Sprintf(gen.CreateTableSqlTemplate, tableName) +} + +func (gen *SqlGenPostgres) GetSqlDropTable(tableName string) string { + return fmt.Sprintf(gen.DropTableSqlTemplate, tableName) +} diff --git a/weed/filer/postgres/postgres_store.go b/weed/filer/postgres/postgres_store.go new file mode 100644 index 000000000..a1e16a92a --- /dev/null +++ b/weed/filer/postgres/postgres_store.go @@ -0,0 +1,93 @@ +package postgres + +import ( + "database/sql" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/util" + _ "github.com/lib/pq" +) + +const ( + CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30" +) + +func init() { + filer.Stores = append(filer.Stores, &PostgresStore{}) +} + +type PostgresStore struct { + abstract_sql.AbstractSqlStore +} + +func (store *PostgresStore) GetName() string { + return "postgres" +} + +func (store *PostgresStore) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetString(prefix+"schema"), + configuration.GetString(prefix+"sslmode"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetInt(prefix+"connection_max_lifetime_seconds"), + ) +} + +func (store *PostgresStore) initialize(upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { + + store.SupportBucketTable = false + if !enableUpsert { + upsertQuery = "" + } + store.SqlGenerator = &SqlGenPostgres{ + CreateTableSqlTemplate: "", + DropTableSqlTemplate: `drop table "%s"`, + UpsertQueryTemplate: upsertQuery, + } + + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode) + if user != "" { + sqlUrl += " user=" + user + } + adaptedSqlUrl := sqlUrl + if password != "" { + sqlUrl += " password=" + password + adaptedSqlUrl += " password=ADAPTED" + } + if database != "" { + sqlUrl += " dbname=" + database + adaptedSqlUrl += " dbname=" + database + } + if schema != "" { + sqlUrl += " search_path=" + schema + adaptedSqlUrl += " search_path=" + schema + } + var dbErr error + store.DB, dbErr = sql.Open("postgres", sqlUrl) + if dbErr != nil { + store.DB.Close() + store.DB = nil + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) + } + + store.DB.SetMaxIdleConns(maxIdle) + store.DB.SetMaxOpenConns(maxOpen) + store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) + + if err = store.DB.Ping(); err != nil { + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) + } + + return nil +} diff --git a/weed/filer/postgres2/postgres2_store.go b/weed/filer/postgres2/postgres2_store.go new file mode 100644 index 000000000..0f573d8d0 --- /dev/null +++ b/weed/filer/postgres2/postgres2_store.go @@ -0,0 +1,100 @@ +package postgres2 + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/abstract_sql" + "github.com/chrislusf/seaweedfs/weed/filer/postgres" + "github.com/chrislusf/seaweedfs/weed/util" + _ "github.com/lib/pq" +) + +const ( + CONNECTION_URL_PATTERN = "host=%s port=%d sslmode=%s connect_timeout=30" +) + +func init() { + filer.Stores = append(filer.Stores, &PostgresStore2{}) +} + +type PostgresStore2 struct { + abstract_sql.AbstractSqlStore +} + +func (store *PostgresStore2) GetName() string { + return "postgres2" +} + +func (store *PostgresStore2) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"createTable"), + configuration.GetString(prefix+"upsertQuery"), + configuration.GetBool(prefix+"enableUpsert"), + configuration.GetString(prefix+"username"), + configuration.GetString(prefix+"password"), + configuration.GetString(prefix+"hostname"), + configuration.GetInt(prefix+"port"), + configuration.GetString(prefix+"database"), + configuration.GetString(prefix+"schema"), + configuration.GetString(prefix+"sslmode"), + configuration.GetInt(prefix+"connection_max_idle"), + configuration.GetInt(prefix+"connection_max_open"), + configuration.GetInt(prefix+"connection_max_lifetime_seconds"), + ) +} + +func (store *PostgresStore2) initialize(createTable, upsertQuery string, enableUpsert bool, user, password, hostname string, port int, database, schema, sslmode string, maxIdle, maxOpen, maxLifetimeSeconds int) (err error) { + + store.SupportBucketTable = true + if !enableUpsert { + upsertQuery = "" + } + store.SqlGenerator = &postgres.SqlGenPostgres{ + CreateTableSqlTemplate: createTable, + DropTableSqlTemplate: `drop table "%s"`, + UpsertQueryTemplate: upsertQuery, + } + + sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, sslmode) + if user != "" { + sqlUrl += " user=" + user + } + adaptedSqlUrl := sqlUrl + if password != "" { + sqlUrl += " password=" + password + adaptedSqlUrl += " password=ADAPTED" + } + if database != "" { + sqlUrl += " dbname=" + database + adaptedSqlUrl += " dbname=" + database + } + if schema != "" { + sqlUrl += " search_path=" + schema + adaptedSqlUrl += " search_path=" + schema + } + var dbErr error + store.DB, dbErr = sql.Open("postgres", sqlUrl) + if dbErr != nil { + store.DB.Close() + store.DB = nil + return fmt.Errorf("can not connect to %s error:%v", adaptedSqlUrl, err) + } + + store.DB.SetMaxIdleConns(maxIdle) + store.DB.SetMaxOpenConns(maxOpen) + store.DB.SetConnMaxLifetime(time.Duration(maxLifetimeSeconds) * time.Second) + + if err = store.DB.Ping(); err != nil { + return fmt.Errorf("connect to %s error:%v", sqlUrl, err) + } + + if err = store.CreateTable(context.Background(), abstract_sql.DEFAULT_TABLE); err != nil { + return fmt.Errorf("init table %s: %v", abstract_sql.DEFAULT_TABLE, err) + } + + return nil +} diff --git a/weed/filer/read_write.go b/weed/filer/read_write.go new file mode 100644 index 000000000..d92d526d5 --- /dev/null +++ b/weed/filer/read_write.go @@ -0,0 +1,116 @@ +package filer + +import ( + "bytes" + "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "io/ioutil" + "math" + "net/http" + "time" +) + +func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.SeaweedFilerClient, dir, name string, byteBuffer *bytes.Buffer) error { + + request := &filer_pb.LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, + } + respLookupEntry, err := filer_pb.LookupEntry(filerClient, request) + if err != nil { + return err + } + if len(respLookupEntry.Entry.Content) > 0 { + _, err = byteBuffer.Write(respLookupEntry.Entry.Content) + return err + } + + return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false) + +} + +func ReadContent(filerAddress string, dir, name string) ([]byte, error) { + + target := fmt.Sprintf("http://%s%s/%s", filerAddress, dir, name) + + data, _, err := util.Get(target) + + return data, err +} + +func SaveAs(host string, port int, dir, name string, contentType string, byteBuffer *bytes.Buffer) error { + var target string + if port == 0 { + target = fmt.Sprintf("http://%s%s/%s", host, dir, name) + } else { + target = fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name) + } + + // set the HTTP method, url, and request body + req, err := http.NewRequest(http.MethodPut, target, byteBuffer) + if err != nil { + return err + } + + // set the request header Content-Type for json + if contentType != "" { + req.Header.Set("Content-Type", contentType) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer util.CloseResponse(resp) + + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode >= 400 { + return fmt.Errorf("%s: %s %v", target, resp.Status, string(b)) + } + + return nil + +} + +func SaveInsideFiler(client filer_pb.SeaweedFilerClient, dir, name string, content []byte) error { + + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, + }) + + if err == filer_pb.ErrNotFound { + err = filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ + Directory: dir, + Entry: &filer_pb.Entry{ + Name: name, + IsDirectory: false, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0644), + Collection: "", + Replication: "", + FileSize: uint64(len(content)), + }, + Content: content, + }, + }) + } else if err == nil { + entry := resp.Entry + entry.Content = content + entry.Attributes.Mtime = time.Now().Unix() + entry.Attributes.FileSize = uint64(len(content)) + err = filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ + Directory: dir, + Entry: entry, + }) + } + + return err +} diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go new file mode 100644 index 000000000..a1e989684 --- /dev/null +++ b/weed/filer/reader_at.go @@ -0,0 +1,229 @@ +package filer + +import ( + "context" + "fmt" + "io" + "math/rand" + "sync" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "github.com/golang/groupcache/singleflight" +) + +type ChunkReadAt struct { + masterClient *wdclient.MasterClient + chunkViews []*ChunkView + lookupFileId wdclient.LookupFileIdFunctionType + readerLock sync.Mutex + fileSize int64 + + fetchGroup singleflight.Group + chunkCache chunk_cache.ChunkCache + lastChunkFileId string + lastChunkData []byte +} + +var _ = io.ReaderAt(&ChunkReadAt{}) +var _ = io.Closer(&ChunkReadAt{}) + +func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionType { + + vidCache := make(map[string]*filer_pb.Locations) + var vicCacheLock sync.RWMutex + return func(fileId string) (targetUrls []string, err error) { + vid := VolumeId(fileId) + vicCacheLock.RLock() + locations, found := vidCache[vid] + vicCacheLock.RUnlock() + + if !found { + util.Retry("lookup volume "+vid, func() error { + err = filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ + VolumeIds: []string{vid}, + }) + if err != nil { + return err + } + + locations = resp.LocationsMap[vid] + if locations == nil || len(locations.Locations) == 0 { + glog.V(0).Infof("failed to locate %s", fileId) + return fmt.Errorf("failed to locate %s", fileId) + } + vicCacheLock.Lock() + vidCache[vid] = locations + vicCacheLock.Unlock() + + return nil + }) + return err + }) + } + + if err != nil { + return nil, err + } + + for _, loc := range locations.Locations { + volumeServerAddress := filerClient.AdjustedUrl(loc) + targetUrl := fmt.Sprintf("http://%s/%s", volumeServerAddress, fileId) + targetUrls = append(targetUrls, targetUrl) + } + + for i := len(targetUrls) - 1; i > 0; i-- { + j := rand.Intn(i + 1) + targetUrls[i], targetUrls[j] = targetUrls[j], targetUrls[i] + } + + return + } +} + +func NewChunkReaderAtFromClient(lookupFn wdclient.LookupFileIdFunctionType, chunkViews []*ChunkView, chunkCache chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt { + + return &ChunkReadAt{ + chunkViews: chunkViews, + lookupFileId: lookupFn, + chunkCache: chunkCache, + fileSize: fileSize, + } +} + +func (c *ChunkReadAt) Close() error { + c.lastChunkData = nil + c.lastChunkFileId = "" + return nil +} + +func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) { + + c.readerLock.Lock() + defer c.readerLock.Unlock() + + glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews)) + return c.doReadAt(p, offset) +} + +func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) { + + startOffset, remaining := offset, int64(len(p)) + var nextChunk *ChunkView + for i, chunk := range c.chunkViews { + if remaining <= 0 { + break + } + if i+1 < len(c.chunkViews) { + nextChunk = c.chunkViews[i+1] + } else { + nextChunk = nil + } + if startOffset < chunk.LogicOffset { + gap := int(chunk.LogicOffset - startOffset) + glog.V(4).Infof("zero [%d,%d)", startOffset, startOffset+int64(gap)) + n += int(min(int64(gap), remaining)) + startOffset, remaining = chunk.LogicOffset, remaining-int64(gap) + if remaining <= 0 { + break + } + } + // fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d)\n", offset, offset+int64(len(p)), chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size)) + chunkStart, chunkStop := max(chunk.LogicOffset, startOffset), min(chunk.LogicOffset+int64(chunk.Size), startOffset+remaining) + if chunkStart >= chunkStop { + continue + } + glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size)) + var buffer []byte + buffer, err = c.readFromWholeChunkData(chunk, nextChunk) + if err != nil { + glog.Errorf("fetching chunk %+v: %v\n", chunk, err) + return + } + bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset + copied := copy(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], buffer[bufferOffset:bufferOffset+chunkStop-chunkStart]) + n += copied + startOffset, remaining = startOffset+int64(copied), remaining-int64(copied) + } + + glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err) + + if err == nil && remaining > 0 && c.fileSize > startOffset { + delta := int(min(remaining, c.fileSize-startOffset)) + glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize) + n += delta + } + + if err == nil && offset+int64(len(p)) >= c.fileSize { + err = io.EOF + } + // fmt.Printf("~~~ filled %d, err: %v\n\n", n, err) + + return + +} + +func (c *ChunkReadAt) readFromWholeChunkData(chunkView *ChunkView, nextChunkViews ...*ChunkView) (chunkData []byte, err error) { + + if c.lastChunkFileId == chunkView.FileId { + return c.lastChunkData, nil + } + + v, doErr := c.readOneWholeChunk(chunkView) + + if doErr != nil { + return nil, doErr + } + + chunkData = v.([]byte) + + c.lastChunkData = chunkData + c.lastChunkFileId = chunkView.FileId + + for _, nextChunkView := range nextChunkViews { + if c.chunkCache != nil && nextChunkView != nil { + go c.readOneWholeChunk(nextChunkView) + } + } + + return +} + +func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, error) { + + var err error + + return c.fetchGroup.Do(chunkView.FileId, func() (interface{}, error) { + + glog.V(4).Infof("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize) + + data := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize) + if data != nil { + glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data))) + } else { + var err error + data, err = c.doFetchFullChunkData(chunkView) + if err != nil { + return data, err + } + c.chunkCache.SetChunk(chunkView.FileId, data) + } + return data, err + }) +} + +func (c *ChunkReadAt) doFetchFullChunkData(chunkView *ChunkView) ([]byte, error) { + + glog.V(4).Infof("+ doFetchFullChunkData %s", chunkView.FileId) + + data, err := fetchChunk(c.lookupFileId, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped) + + glog.V(4).Infof("- doFetchFullChunkData %s", chunkView.FileId) + + return data, err + +} diff --git a/weed/filer/reader_at_test.go b/weed/filer/reader_at_test.go new file mode 100644 index 000000000..37a34f4ea --- /dev/null +++ b/weed/filer/reader_at_test.go @@ -0,0 +1,156 @@ +package filer + +import ( + "fmt" + "io" + "math" + "strconv" + "sync" + "testing" +) + +type mockChunkCache struct { +} + +func (m *mockChunkCache) GetChunk(fileId string, minSize uint64) (data []byte) { + x, _ := strconv.Atoi(fileId) + data = make([]byte, minSize) + for i := 0; i < int(minSize); i++ { + data[i] = byte(x) + } + return data +} +func (m *mockChunkCache) SetChunk(fileId string, data []byte) { +} + +func TestReaderAt(t *testing.T) { + + visibles := []VisibleInterval{ + { + start: 1, + stop: 2, + fileId: "1", + chunkSize: 9, + }, + { + start: 3, + stop: 4, + fileId: "3", + chunkSize: 1, + }, + { + start: 5, + stop: 6, + fileId: "5", + chunkSize: 2, + }, + { + start: 7, + stop: 9, + fileId: "7", + chunkSize: 2, + }, + { + start: 9, + stop: 10, + fileId: "9", + chunkSize: 2, + }, + } + + readerAt := &ChunkReadAt{ + chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), + lookupFileId: nil, + readerLock: sync.Mutex{}, + fileSize: 10, + chunkCache: &mockChunkCache{}, + } + + testReadAt(t, readerAt, 0, 10, 10, io.EOF) + testReadAt(t, readerAt, 0, 12, 10, io.EOF) + testReadAt(t, readerAt, 2, 8, 8, io.EOF) + testReadAt(t, readerAt, 3, 6, 6, nil) + +} + +func testReadAt(t *testing.T, readerAt *ChunkReadAt, offset int64, size int, expected int, expectedErr error) { + data := make([]byte, size) + n, err := readerAt.ReadAt(data, offset) + + for _, d := range data { + fmt.Printf("%x", d) + } + fmt.Println() + + if expected != n { + t.Errorf("unexpected read size: %d, expect: %d", n, expected) + } + if err != expectedErr { + t.Errorf("unexpected read error: %v, expect: %v", err, expectedErr) + } + +} + +func TestReaderAt0(t *testing.T) { + + visibles := []VisibleInterval{ + { + start: 2, + stop: 5, + fileId: "1", + chunkSize: 9, + }, + { + start: 7, + stop: 9, + fileId: "2", + chunkSize: 9, + }, + } + + readerAt := &ChunkReadAt{ + chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), + lookupFileId: nil, + readerLock: sync.Mutex{}, + fileSize: 10, + chunkCache: &mockChunkCache{}, + } + + testReadAt(t, readerAt, 0, 10, 10, io.EOF) + testReadAt(t, readerAt, 3, 16, 7, io.EOF) + testReadAt(t, readerAt, 3, 5, 5, nil) + + testReadAt(t, readerAt, 11, 5, 0, io.EOF) + testReadAt(t, readerAt, 10, 5, 0, io.EOF) + +} + +func TestReaderAt1(t *testing.T) { + + visibles := []VisibleInterval{ + { + start: 2, + stop: 5, + fileId: "1", + chunkSize: 9, + }, + } + + readerAt := &ChunkReadAt{ + chunkViews: ViewFromVisibleIntervals(visibles, 0, math.MaxInt64), + lookupFileId: nil, + readerLock: sync.Mutex{}, + fileSize: 20, + chunkCache: &mockChunkCache{}, + } + + testReadAt(t, readerAt, 0, 20, 20, io.EOF) + testReadAt(t, readerAt, 1, 7, 7, nil) + testReadAt(t, readerAt, 0, 1, 1, nil) + testReadAt(t, readerAt, 18, 4, 2, io.EOF) + testReadAt(t, readerAt, 12, 4, 4, nil) + testReadAt(t, readerAt, 4, 20, 16, io.EOF) + testReadAt(t, readerAt, 4, 10, 10, nil) + testReadAt(t, readerAt, 1, 10, 10, nil) + +} diff --git a/weed/filer2/redis/redis_cluster_store.go b/weed/filer/redis/redis_cluster_store.go similarity index 56% rename from weed/filer2/redis/redis_cluster_store.go rename to weed/filer/redis/redis_cluster_store.go index f1ad4b35c..9572058a8 100644 --- a/weed/filer2/redis/redis_cluster_store.go +++ b/weed/filer/redis/redis_cluster_store.go @@ -1,13 +1,13 @@ package redis import ( - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/go-redis/redis" + "github.com/go-redis/redis/v8" ) func init() { - filer2.Stores = append(filer2.Stores, &RedisClusterStore{}) + filer.Stores = append(filer.Stores, &RedisClusterStore{}) } type RedisClusterStore struct { @@ -18,16 +18,16 @@ func (store *RedisClusterStore) GetName() string { return "redis_cluster" } -func (store *RedisClusterStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisClusterStore) Initialize(configuration util.Configuration, prefix string) (err error) { - configuration.SetDefault("useReadOnly", true) - configuration.SetDefault("routeByLatency", true) + configuration.SetDefault(prefix+"useReadOnly", false) + configuration.SetDefault(prefix+"routeByLatency", false) return store.initialize( - configuration.GetStringSlice("addresses"), - configuration.GetString("password"), - configuration.GetBool("useReadOnly"), - configuration.GetBool("routeByLatency"), + configuration.GetStringSlice(prefix+"addresses"), + configuration.GetString(prefix+"password"), + configuration.GetBool(prefix+"useReadOnly"), + configuration.GetBool(prefix+"routeByLatency"), ) } diff --git a/weed/filer2/redis/redis_store.go b/weed/filer/redis/redis_store.go similarity index 63% rename from weed/filer2/redis/redis_store.go rename to weed/filer/redis/redis_store.go index c56fa014c..665352a63 100644 --- a/weed/filer2/redis/redis_store.go +++ b/weed/filer/redis/redis_store.go @@ -1,13 +1,13 @@ package redis import ( - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/go-redis/redis" + "github.com/go-redis/redis/v8" ) func init() { - filer2.Stores = append(filer2.Stores, &RedisStore{}) + filer.Stores = append(filer.Stores, &RedisStore{}) } type RedisStore struct { @@ -18,11 +18,11 @@ func (store *RedisStore) GetName() string { return "redis" } -func (store *RedisStore) Initialize(configuration util.Configuration) (err error) { +func (store *RedisStore) Initialize(configuration util.Configuration, prefix string) (err error) { return store.initialize( - configuration.GetString("address"), - configuration.GetString("password"), - configuration.GetInt("database"), + configuration.GetString(prefix+"address"), + configuration.GetString(prefix+"password"), + configuration.GetInt(prefix+"database"), ) } diff --git a/weed/filer2/redis/universal_redis_store.go b/weed/filer/redis/universal_redis_store.go similarity index 53% rename from weed/filer2/redis/universal_redis_store.go rename to weed/filer/redis/universal_redis_store.go index 62257e91e..30d11a7f4 100644 --- a/weed/filer2/redis/universal_redis_store.go +++ b/weed/filer/redis/universal_redis_store.go @@ -3,12 +3,16 @@ package redis import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/go-redis/redis" "sort" "strings" "time" + + "github.com/go-redis/redis/v8" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const ( @@ -29,14 +33,18 @@ func (store *UniversalRedisStore) RollbackTransaction(ctx context.Context) error return nil } -func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { value, err := entry.EncodeAttributesAndChunks() if err != nil { return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) } - _, err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result() + if len(entry.Chunks) > 50 { + value = util.MaybeGzipData(value) + } + + _, err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result() if err != nil { return fmt.Errorf("persisting %s : %v", entry.FullPath, err) @@ -44,7 +52,7 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2 dir, name := entry.FullPath.DirAndName() if name != "" { - _, err = store.Client.SAdd(genDirectoryListKey(dir), name).Result() + _, err = store.Client.SAdd(ctx, genDirectoryListKey(dir), name).Result() if err != nil { return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err) } @@ -53,26 +61,26 @@ func (store *UniversalRedisStore) InsertEntry(ctx context.Context, entry *filer2 return nil } -func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { +func (store *UniversalRedisStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { return store.InsertEntry(ctx, entry) } -func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { +func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { - data, err := store.Client.Get(string(fullpath)).Result() + data, err := store.Client.Get(ctx, string(fullpath)).Result() if err == redis.Nil { - return nil, filer2.ErrNotFound + return nil, filer_pb.ErrNotFound } if err != nil { return nil, fmt.Errorf("get %s : %v", fullpath, err) } - entry = &filer2.Entry{ + entry = &filer.Entry{ FullPath: fullpath, } - err = entry.DecodeAttributesAndChunks([]byte(data)) + err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))) if err != nil { return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) } @@ -80,9 +88,9 @@ func (store *UniversalRedisStore) FindEntry(ctx context.Context, fullpath filer2 return entry, nil } -func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { - _, err = store.Client.Del(string(fullpath)).Result() + _, err = store.Client.Del(ctx, string(fullpath)).Result() if err != nil { return fmt.Errorf("delete %s : %v", fullpath, err) @@ -90,7 +98,7 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath file dir, name := fullpath.DirAndName() if name != "" { - _, err = store.Client.SRem(genDirectoryListKey(dir), name).Result() + _, err = store.Client.SRem(ctx, genDirectoryListKey(dir), name).Result() if err != nil { return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) } @@ -99,16 +107,16 @@ func (store *UniversalRedisStore) DeleteEntry(ctx context.Context, fullpath file return nil } -func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { +func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) { - members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() + members, err := store.Client.SMembers(ctx, genDirectoryListKey(string(fullpath))).Result() if err != nil { return fmt.Errorf("delete folder %s : %v", fullpath, err) } for _, fileName := range members { - path := filer2.NewFullPath(string(fullpath), fileName) - _, err = store.Client.Del(string(path)).Result() + path := util.NewFullPath(string(fullpath), fileName) + _, err = store.Client.Del(ctx, string(path)).Result() if err != nil { return fmt.Errorf("delete %s in parent dir: %v", fullpath, err) } @@ -117,12 +125,16 @@ func (store *UniversalRedisStore) DeleteFolderChildren(ctx context.Context, full return nil } -func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { +func (store *UniversalRedisStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { - members, err := store.Client.SMembers(genDirectoryListKey(string(fullpath))).Result() + dirListKey := genDirectoryListKey(string(dirPath)) + members, err := store.Client.SMembers(ctx, dirListKey).Result() if err != nil { - return nil, fmt.Errorf("list %s : %v", fullpath, err) + return lastFileName, fmt.Errorf("list %s : %v", dirPath, err) } // skip @@ -131,7 +143,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full for _, m := range members { if strings.Compare(m, startFileName) >= 0 { if m == startFileName { - if inclusive { + if includeStartFile { t = append(t, m) } } else { @@ -148,24 +160,41 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full }) // limit - if limit < len(members) { + if limit < int64(len(members)) { members = members[:limit] } // fetch entry meta for _, fileName := range members { - path := filer2.NewFullPath(string(fullpath), fileName) + path := util.NewFullPath(string(dirPath), fileName) entry, err := store.FindEntry(ctx, path) + lastFileName = fileName if err != nil { glog.V(0).Infof("list %s : %v", path, err) + if err == filer_pb.ErrNotFound { + continue + } } else { - entries = append(entries, entry) + if entry.TtlSec > 0 { + if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + store.Client.Del(ctx, string(path)).Result() + store.Client.SRem(ctx, dirListKey, fileName).Result() + continue + } + } + if !eachEntryFunc(entry) { + break + } } } - return entries, err + return lastFileName, err } func genDirectoryListKey(dir string) (dirList string) { return dir + DIR_LIST_MARKER } + +func (store *UniversalRedisStore) Shutdown() { + store.Client.Close() +} diff --git a/weed/filer/redis/universal_redis_store_kv.go b/weed/filer/redis/universal_redis_store_kv.go new file mode 100644 index 000000000..ad6e389ed --- /dev/null +++ b/weed/filer/redis/universal_redis_store_kv.go @@ -0,0 +1,42 @@ +package redis + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/go-redis/redis/v8" +) + +func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + _, err = store.Client.Set(ctx, string(key), value, 0).Result() + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *UniversalRedisStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + data, err := store.Client.Get(ctx, string(key)).Result() + + if err == redis.Nil { + return nil, filer.ErrKvNotFound + } + + return []byte(data), err +} + +func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err error) { + + _, err = store.Client.Del(ctx, string(key)).Result() + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer/redis2/redis_cluster_store.go b/weed/filer/redis2/redis_cluster_store.go new file mode 100644 index 000000000..22d09da25 --- /dev/null +++ b/weed/filer/redis2/redis_cluster_store.go @@ -0,0 +1,44 @@ +package redis2 + +import ( + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" +) + +func init() { + filer.Stores = append(filer.Stores, &RedisCluster2Store{}) +} + +type RedisCluster2Store struct { + UniversalRedis2Store +} + +func (store *RedisCluster2Store) GetName() string { + return "redis_cluster2" +} + +func (store *RedisCluster2Store) Initialize(configuration util.Configuration, prefix string) (err error) { + + configuration.SetDefault(prefix+"useReadOnly", false) + configuration.SetDefault(prefix+"routeByLatency", false) + + return store.initialize( + configuration.GetStringSlice(prefix+"addresses"), + configuration.GetString(prefix+"password"), + configuration.GetBool(prefix+"useReadOnly"), + configuration.GetBool(prefix+"routeByLatency"), + configuration.GetStringSlice(prefix+"superLargeDirectories"), + ) +} + +func (store *RedisCluster2Store) initialize(addresses []string, password string, readOnly, routeByLatency bool, superLargeDirectories []string) (err error) { + store.Client = redis.NewClusterClient(&redis.ClusterOptions{ + Addrs: addresses, + Password: password, + ReadOnly: readOnly, + RouteByLatency: routeByLatency, + }) + store.loadSuperLargeDirectories(superLargeDirectories) + return +} diff --git a/weed/filer/redis2/redis_store.go b/weed/filer/redis2/redis_store.go new file mode 100644 index 000000000..8eb97e374 --- /dev/null +++ b/weed/filer/redis2/redis_store.go @@ -0,0 +1,38 @@ +package redis2 + +import ( + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/go-redis/redis/v8" +) + +func init() { + filer.Stores = append(filer.Stores, &Redis2Store{}) +} + +type Redis2Store struct { + UniversalRedis2Store +} + +func (store *Redis2Store) GetName() string { + return "redis2" +} + +func (store *Redis2Store) Initialize(configuration util.Configuration, prefix string) (err error) { + return store.initialize( + configuration.GetString(prefix+"address"), + configuration.GetString(prefix+"password"), + configuration.GetInt(prefix+"database"), + configuration.GetStringSlice(prefix+"superLargeDirectories"), + ) +} + +func (store *Redis2Store) initialize(hostPort string, password string, database int, superLargeDirectories []string) (err error) { + store.Client = redis.NewClient(&redis.Options{ + Addr: hostPort, + Password: password, + DB: database, + }) + store.loadSuperLargeDirectories(superLargeDirectories) + return +} diff --git a/weed/filer/redis2/universal_redis_store.go b/weed/filer/redis2/universal_redis_store.go new file mode 100644 index 000000000..aab3d1f4a --- /dev/null +++ b/weed/filer/redis2/universal_redis_store.go @@ -0,0 +1,204 @@ +package redis2 + +import ( + "context" + "fmt" + "time" + + "github.com/go-redis/redis/v8" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +const ( + DIR_LIST_MARKER = "\x00" +) + +type UniversalRedis2Store struct { + Client redis.UniversalClient + superLargeDirectoryHash map[string]bool +} + +func (store *UniversalRedis2Store) isSuperLargeDirectory(dir string) (isSuperLargeDirectory bool) { + _, isSuperLargeDirectory = store.superLargeDirectoryHash[dir] + return +} + +func (store *UniversalRedis2Store) loadSuperLargeDirectories(superLargeDirectories []string) { + // set directory hash + store.superLargeDirectoryHash = make(map[string]bool) + for _, dir := range superLargeDirectories { + store.superLargeDirectoryHash[dir] = true + } +} + +func (store *UniversalRedis2Store) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *UniversalRedis2Store) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *UniversalRedis2Store) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *UniversalRedis2Store) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + if len(entry.Chunks) > 50 { + value = util.MaybeGzipData(value) + } + + if err = store.Client.Set(ctx, string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Err(); err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) + } + + dir, name := entry.FullPath.DirAndName() + if store.isSuperLargeDirectory(dir) { + return nil + } + + if name != "" { + if err = store.Client.ZAddNX(ctx, genDirectoryListKey(dir), &redis.Z{Score: 0, Member: name}).Err(); err != nil { + return fmt.Errorf("persisting %s in parent dir: %v", entry.FullPath, err) + } + } + + return nil +} + +func (store *UniversalRedis2Store) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *UniversalRedis2Store) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) { + + data, err := store.Client.Get(ctx, string(fullpath)).Result() + if err == redis.Nil { + return nil, filer_pb.ErrNotFound + } + + if err != nil { + return nil, fmt.Errorf("get %s : %v", fullpath, err) + } + + entry = &filer.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + return entry, nil +} + +func (store *UniversalRedis2Store) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) { + + _, err = store.Client.Del(ctx, genDirectoryListKey(string(fullpath))).Result() + if err != nil { + return fmt.Errorf("delete dir list %s : %v", fullpath, err) + } + + _, err = store.Client.Del(ctx, string(fullpath)).Result() + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + dir, name := fullpath.DirAndName() + if store.isSuperLargeDirectory(dir) { + return nil + } + if name != "" { + _, err = store.Client.ZRem(ctx, genDirectoryListKey(dir), name).Result() + if err != nil { + return fmt.Errorf("DeleteEntry %s in parent dir: %v", fullpath, err) + } + } + + return nil +} + +func (store *UniversalRedis2Store) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) { + + if store.isSuperLargeDirectory(string(fullpath)) { + return nil + } + + members, err := store.Client.ZRange(ctx, genDirectoryListKey(string(fullpath)), 0, -1).Result() + if err != nil { + return fmt.Errorf("DeleteFolderChildren %s : %v", fullpath, err) + } + + for _, fileName := range members { + path := util.NewFullPath(string(fullpath), fileName) + _, err = store.Client.Del(ctx, string(path)).Result() + if err != nil { + return fmt.Errorf("DeleteFolderChildren %s in parent dir: %v", fullpath, err) + } + } + + return nil +} + +func (store *UniversalRedis2Store) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return lastFileName, filer.ErrUnsupportedListDirectoryPrefixed +} + +func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + dirListKey := genDirectoryListKey(string(dirPath)) + start := int64(0) + if startFileName != "" { + start, _ = store.Client.ZRank(ctx, dirListKey, startFileName).Result() + if !includeStartFile { + start++ + } + } + members, err := store.Client.ZRange(ctx, dirListKey, start, start+int64(limit)-1).Result() + if err != nil { + return lastFileName, fmt.Errorf("list %s : %v", dirPath, err) + } + + // fetch entry meta + for _, fileName := range members { + path := util.NewFullPath(string(dirPath), fileName) + entry, err := store.FindEntry(ctx, path) + lastFileName = fileName + if err != nil { + glog.V(0).Infof("list %s : %v", path, err) + if err == filer_pb.ErrNotFound { + continue + } + } else { + if entry.TtlSec > 0 { + if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) { + store.Client.Del(ctx, string(path)).Result() + store.Client.ZRem(ctx, dirListKey, fileName).Result() + continue + } + } + if !eachEntryFunc(entry) { + break + } + } + } + + return lastFileName, err +} + +func genDirectoryListKey(dir string) (dirList string) { + return dir + DIR_LIST_MARKER +} + +func (store *UniversalRedis2Store) Shutdown() { + store.Client.Close() +} diff --git a/weed/filer/redis2/universal_redis_store_kv.go b/weed/filer/redis2/universal_redis_store_kv.go new file mode 100644 index 000000000..bde994dc9 --- /dev/null +++ b/weed/filer/redis2/universal_redis_store_kv.go @@ -0,0 +1,42 @@ +package redis2 + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/go-redis/redis/v8" +) + +func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + _, err = store.Client.Set(ctx, string(key), value, 0).Result() + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *UniversalRedis2Store) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + data, err := store.Client.Get(ctx, string(key)).Result() + + if err == redis.Nil { + return nil, filer.ErrKvNotFound + } + + return []byte(data), err +} + +func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (err error) { + + _, err = store.Client.Del(ctx, string(key)).Result() + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer/rocksdb/README.md b/weed/filer/rocksdb/README.md new file mode 100644 index 000000000..6bae6d34e --- /dev/null +++ b/weed/filer/rocksdb/README.md @@ -0,0 +1,41 @@ +# Prepare the compilation environment on linux +- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test +- sudo apt-get update -qq +- sudo apt-get install gcc-6 g++-6 libsnappy-dev zlib1g-dev libbz2-dev -qq +- export CXX="g++-6" CC="gcc-6" + +- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags2_2.0-1.1ubuntu1_amd64.deb +- sudo dpkg -i libgflags2_2.0-1.1ubuntu1_amd64.deb +- wget https://launchpad.net/ubuntu/+archive/primary/+files/libgflags-dev_2.0-1.1ubuntu1_amd64.deb +- sudo dpkg -i libgflags-dev_2.0-1.1ubuntu1_amd64.deb + +# Prepare the compilation environment on mac os +``` +brew install snappy +``` + +# install rocksdb: +``` + export ROCKSDB_HOME=/Users/chris/dev/rocksdb + + git clone https://github.com/facebook/rocksdb.git $ROCKSDB_HOME + pushd $ROCKSDB_HOME + make clean + make install-static + popd +``` + +# install gorocksdb + +``` +export CGO_CFLAGS="-I$ROCKSDB_HOME/include" +export CGO_LDFLAGS="-L$ROCKSDB_HOME -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" + +go get github.com/tecbot/gorocksdb +``` +# compile with rocksdb + +``` +cd ~/go/src/github.com/chrislusf/seaweedfs/weed +go install -tags rocksdb +``` diff --git a/weed/filer/rocksdb/rocksdb_store.go b/weed/filer/rocksdb/rocksdb_store.go new file mode 100644 index 000000000..379a18c62 --- /dev/null +++ b/weed/filer/rocksdb/rocksdb_store.go @@ -0,0 +1,304 @@ +// +build rocksdb + +package rocksdb + +import ( + "bytes" + "context" + "crypto/md5" + "fmt" + "io" + "os" + + "github.com/tecbot/gorocksdb" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_util "github.com/chrislusf/seaweedfs/weed/util" +) + +func init() { + filer.Stores = append(filer.Stores, &RocksDBStore{}) +} + +type options struct { + opt *gorocksdb.Options + ro *gorocksdb.ReadOptions + wo *gorocksdb.WriteOptions +} + +func (opt *options) init() { + opt.opt = gorocksdb.NewDefaultOptions() + opt.ro = gorocksdb.NewDefaultReadOptions() + opt.wo = gorocksdb.NewDefaultWriteOptions() +} + +func (opt *options) close() { + opt.opt.Destroy() + opt.ro.Destroy() + opt.wo.Destroy() +} + +type RocksDBStore struct { + path string + db *gorocksdb.DB + options +} + +func (store *RocksDBStore) GetName() string { + return "rocksdb" +} + +func (store *RocksDBStore) Initialize(configuration weed_util.Configuration, prefix string) (err error) { + dir := configuration.GetString(prefix + "dir") + return store.initialize(dir) +} + +func (store *RocksDBStore) initialize(dir string) (err error) { + glog.Infof("filer store rocksdb dir: %s", dir) + os.MkdirAll(dir, 0755) + if err := weed_util.TestFolderWritable(dir); err != nil { + return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err) + } + store.options.init() + store.opt.SetCreateIfMissing(true) + // reduce write amplification + // also avoid expired data stored in highest level never get compacted + store.opt.SetLevelCompactionDynamicLevelBytes(true) + store.opt.SetCompactionFilter(NewTTLFilter()) + // store.opt.SetMaxBackgroundCompactions(2) + + store.db, err = gorocksdb.OpenDb(store.opt, dir) + + return +} + +func (store *RocksDBStore) BeginTransaction(ctx context.Context) (context.Context, error) { + return ctx, nil +} +func (store *RocksDBStore) CommitTransaction(ctx context.Context) error { + return nil +} +func (store *RocksDBStore) RollbackTransaction(ctx context.Context) error { + return nil +} + +func (store *RocksDBStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) { + dir, name := entry.DirAndName() + key := genKey(dir, name) + + value, err := entry.EncodeAttributesAndChunks() + if err != nil { + return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) + } + + err = store.db.Put(store.wo, key, value) + + if err != nil { + return fmt.Errorf("persisting %s : %v", entry.FullPath, err) + } + + // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) + + return nil +} + +func (store *RocksDBStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) { + + return store.InsertEntry(ctx, entry) +} + +func (store *RocksDBStore) FindEntry(ctx context.Context, fullpath weed_util.FullPath) (entry *filer.Entry, err error) { + dir, name := fullpath.DirAndName() + key := genKey(dir, name) + data, err := store.db.Get(store.ro, key) + + if data == nil { + return nil, filer_pb.ErrNotFound + } + defer data.Free() + + if err != nil { + return nil, fmt.Errorf("get %s : %v", fullpath, err) + } + + entry = &filer.Entry{ + FullPath: fullpath, + } + err = entry.DecodeAttributesAndChunks(data.Data()) + if err != nil { + return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) + } + + // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) + + return entry, nil +} + +func (store *RocksDBStore) DeleteEntry(ctx context.Context, fullpath weed_util.FullPath) (err error) { + dir, name := fullpath.DirAndName() + key := genKey(dir, name) + + err = store.db.Delete(store.wo, key) + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func (store *RocksDBStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) { + directoryPrefix := genDirectoryKeyPrefix(fullpath, "") + + batch := gorocksdb.NewWriteBatch() + defer batch.Destroy() + + ro := gorocksdb.NewDefaultReadOptions() + defer ro.Destroy() + ro.SetFillCache(false) + + iter := store.db.NewIterator(ro) + defer iter.Close() + err = enumerate(iter, directoryPrefix, nil, false, -1, func(key, value []byte) bool { + batch.Delete(key) + return true + }) + if err != nil { + return fmt.Errorf("delete list %s : %v", fullpath, err) + } + + err = store.db.Write(store.wo, batch) + + if err != nil { + return fmt.Errorf("delete %s : %v", fullpath, err) + } + + return nil +} + +func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey bool, limit int64, fn func(key, value []byte) bool) (err error) { + + if len(lastKey) == 0 { + iter.Seek(prefix) + } else { + iter.Seek(lastKey) + if !includeLastKey { + if iter.Valid() { + if bytes.Equal(iter.Key().Data(), lastKey) { + iter.Next() + } + } + } + } + + i := int64(0) + for ; iter.Valid(); iter.Next() { + + if limit > 0 { + i++ + if i > limit { + break + } + } + + key := iter.Key().Data() + + if !bytes.HasPrefix(key, prefix) { + break + } + + ret := fn(key, iter.Value().Data()) + + if !ret { + break + } + + } + + if err := iter.Err(); err != nil { + return fmt.Errorf("prefix scan iterator: %v", err) + } + return nil +} + +func (store *RocksDBStore) ListDirectoryEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + return store.ListDirectoryPrefixedEntries(ctx, dirPath, startFileName, includeStartFile, limit, "", eachEntryFunc) +} + +func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath weed_util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) { + + directoryPrefix := genDirectoryKeyPrefix(dirPath, prefix) + lastFileStart := directoryPrefix + if startFileName != "" { + lastFileStart = genDirectoryKeyPrefix(dirPath, startFileName) + } + + ro := gorocksdb.NewDefaultReadOptions() + defer ro.Destroy() + ro.SetFillCache(false) + + iter := store.db.NewIterator(ro) + defer iter.Close() + err = enumerate(iter, directoryPrefix, lastFileStart, includeStartFile, limit, func(key, value []byte) bool { + fileName := getNameFromKey(key) + if fileName == "" { + return true + } + entry := &filer.Entry{ + FullPath: weed_util.NewFullPath(string(dirPath), fileName), + } + lastFileName = fileName + + // println("list", entry.FullPath, "chunks", len(entry.Chunks)) + if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil { + err = decodeErr + glog.V(0).Infof("list %s : %v", entry.FullPath, err) + return false + } + if !eachEntryFunc(entry) { + return false + } + return true + }) + if err != nil { + return lastFileName, fmt.Errorf("prefix list %s : %v", dirPath, err) + } + + return lastFileName, err +} + +func genKey(dirPath, fileName string) (key []byte) { + key = hashToBytes(dirPath) + key = append(key, []byte(fileName)...) + return key +} + +func genDirectoryKeyPrefix(fullpath weed_util.FullPath, startFileName string) (keyPrefix []byte) { + keyPrefix = hashToBytes(string(fullpath)) + if len(startFileName) > 0 { + keyPrefix = append(keyPrefix, []byte(startFileName)...) + } + return keyPrefix +} + +func getNameFromKey(key []byte) string { + + return string(key[md5.Size:]) + +} + +// hash directory, and use last byte for partitioning +func hashToBytes(dir string) []byte { + h := md5.New() + io.WriteString(h, dir) + + b := h.Sum(nil) + + return b +} + +func (store *RocksDBStore) Shutdown() { + store.db.Close() + store.options.close() +} diff --git a/weed/filer/rocksdb/rocksdb_store_kv.go b/weed/filer/rocksdb/rocksdb_store_kv.go new file mode 100644 index 000000000..cf1214d5b --- /dev/null +++ b/weed/filer/rocksdb/rocksdb_store_kv.go @@ -0,0 +1,47 @@ +// +build rocksdb + +package rocksdb + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" +) + +func (store *RocksDBStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) { + + err = store.db.Put(store.wo, key, value) + + if err != nil { + return fmt.Errorf("kv put: %v", err) + } + + return nil +} + +func (store *RocksDBStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) { + + value, err = store.db.GetBytes(store.ro, key) + + if value == nil { + return nil, filer.ErrKvNotFound + } + + if err != nil { + return nil, fmt.Errorf("kv get: %v", err) + } + + return +} + +func (store *RocksDBStore) KvDelete(ctx context.Context, key []byte) (err error) { + + err = store.db.Delete(store.wo, key) + + if err != nil { + return fmt.Errorf("kv delete: %v", err) + } + + return nil +} diff --git a/weed/filer/rocksdb/rocksdb_store_test.go b/weed/filer/rocksdb/rocksdb_store_test.go new file mode 100644 index 000000000..f6e755b4b --- /dev/null +++ b/weed/filer/rocksdb/rocksdb_store_test.go @@ -0,0 +1,117 @@ +// +build rocksdb + +package rocksdb + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TestCreateAndFind(t *testing.T) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") + defer os.RemoveAll(dir) + store := &RocksDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + fullpath := util.FullPath("/home/chris/this/is/one/file1.jpg") + + ctx := context.Background() + + entry1 := &filer.Entry{ + FullPath: fullpath, + Attr: filer.Attr{ + Mode: 0440, + Uid: 1234, + Gid: 5678, + }, + } + + if err := testFiler.CreateEntry(ctx, entry1, false, false, nil); err != nil { + t.Errorf("create entry %v: %v", entry1.FullPath, err) + return + } + + entry, err := testFiler.FindEntry(ctx, fullpath) + + if err != nil { + t.Errorf("find entry: %v", err) + return + } + + if entry.FullPath != entry1.FullPath { + t.Errorf("find wrong entry: %v", entry.FullPath) + return + } + + // checking one upper directory + entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "") + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + + // checking one upper directory + entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") + if len(entries) != 1 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} + +func TestEmptyRoot(t *testing.T) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") + defer os.RemoveAll(dir) + store := &RocksDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + ctx := context.Background() + + // checking one upper directory + entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "") + if err != nil { + t.Errorf("list entries: %v", err) + return + } + if len(entries) != 0 { + t.Errorf("list entries count: %v", len(entries)) + return + } + +} + +func BenchmarkInsertEntry(b *testing.B) { + testFiler := filer.NewFiler(nil, nil, "", 0, "", "", "", nil) + dir, _ := ioutil.TempDir("", "seaweedfs_filer_bench") + defer os.RemoveAll(dir) + store := &RocksDBStore{} + store.initialize(dir) + testFiler.SetStore(store) + + ctx := context.Background() + + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + entry := &filer.Entry{ + FullPath: util.FullPath(fmt.Sprintf("/file%d.txt", i)), + Attr: filer.Attr{ + Crtime: time.Now(), + Mtime: time.Now(), + Mode: os.FileMode(0644), + }, + } + store.InsertEntry(ctx, entry) + } +} diff --git a/weed/filer/rocksdb/rocksdb_ttl.go b/weed/filer/rocksdb/rocksdb_ttl.go new file mode 100644 index 000000000..faed22310 --- /dev/null +++ b/weed/filer/rocksdb/rocksdb_ttl.go @@ -0,0 +1,40 @@ +//+build rocksdb + +package rocksdb + +import ( + "time" + + "github.com/tecbot/gorocksdb" + + "github.com/chrislusf/seaweedfs/weed/filer" +) + +type TTLFilter struct { + skipLevel0 bool +} + +func NewTTLFilter() gorocksdb.CompactionFilter { + return &TTLFilter{ + skipLevel0: true, + } +} + +func (t *TTLFilter) Filter(level int, key, val []byte) (remove bool, newVal []byte) { + // decode could be slow, causing write stall + // level >0 sst can run compaction in parallel + if !t.skipLevel0 || level > 0 { + entry := filer.Entry{} + if err := entry.DecodeAttributesAndChunks(val); err == nil { + if entry.TtlSec > 0 && + entry.Crtime.Add(time.Duration(entry.TtlSec)*time.Second).Before(time.Now()) { + return true, nil + } + } + } + return false, val +} + +func (t *TTLFilter) Name() string { + return "TTLFilter" +} diff --git a/weed/filer/s3iam_conf.go b/weed/filer/s3iam_conf.go new file mode 100644 index 000000000..92387fb09 --- /dev/null +++ b/weed/filer/s3iam_conf.go @@ -0,0 +1,25 @@ +package filer + +import ( + "bytes" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/golang/protobuf/jsonpb" + "io" +) + +func ParseS3ConfigurationFromBytes(content []byte, config *iam_pb.S3ApiConfiguration) error { + if err := jsonpb.Unmarshal(bytes.NewBuffer(content), config); err != nil { + return err + } + return nil +} + +func S3ConfigurationToText(writer io.Writer, config *iam_pb.S3ApiConfiguration) error { + + m := jsonpb.Marshaler{ + EmitDefaults: false, + Indent: " ", + } + + return m.Marshal(writer, config) +} diff --git a/weed/filer/s3iam_conf_test.go b/weed/filer/s3iam_conf_test.go new file mode 100644 index 000000000..65cc49840 --- /dev/null +++ b/weed/filer/s3iam_conf_test.go @@ -0,0 +1,57 @@ +package filer + +import ( + "bytes" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "testing" + + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + + "github.com/stretchr/testify/assert" +) + +func TestS3Conf(t *testing.T) { + s3Conf := &iam_pb.S3ApiConfiguration{ + Identities: []*iam_pb.Identity{ + { + Name: "some_name", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + }, + }, + Actions: []string{ + ACTION_ADMIN, + ACTION_READ, + ACTION_WRITE, + }, + }, + { + Name: "some_read_only_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key2", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_READ, + ACTION_TAGGING, + ACTION_LIST, + }, + }, + }, + } + var buf bytes.Buffer + err := S3ConfigurationToText(&buf, s3Conf) + assert.Equal(t, err, nil) + s3ConfSaved := &iam_pb.S3ApiConfiguration{} + err = ParseS3ConfigurationFromBytes(buf.Bytes(), s3ConfSaved) + assert.Equal(t, err, nil) + + assert.Equal(t, "some_name", s3ConfSaved.Identities[0].Name) + assert.Equal(t, "some_read_only_user", s3ConfSaved.Identities[1].Name) + assert.Equal(t, "some_access_key1", s3ConfSaved.Identities[0].Credentials[0].AccessKey) + assert.Equal(t, "some_secret_key2", s3ConfSaved.Identities[1].Credentials[0].SecretKey) +} diff --git a/weed/filer/stream.go b/weed/filer/stream.go new file mode 100644 index 000000000..661a210ea --- /dev/null +++ b/weed/filer/stream.go @@ -0,0 +1,245 @@ +package filer + +import ( + "bytes" + "fmt" + "golang.org/x/sync/errgroup" + "io" + "math" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/wdclient" +) + +func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64, isCheck bool) error { + + glog.V(9).Infof("start to stream content for chunks: %+v\n", chunks) + chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size) + + fileId2Url := make(map[string][]string) + + for _, chunkView := range chunkViews { + + urlStrings, err := masterClient.GetLookupFileIdFunction()(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return err + } else if len(urlStrings) == 0 { + glog.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) + return fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId) + } + fileId2Url[chunkView.FileId] = urlStrings + } + + if isCheck { + // Pre-check all chunkViews urls + gErr := new(errgroup.Group) + CheckAllChunkViews(chunkViews, &fileId2Url, gErr) + if err := gErr.Wait(); err != nil { + glog.Errorf("check all chunks: %v", err) + return fmt.Errorf("check all chunks: %v", err) + } + return nil + } + + for _, chunkView := range chunkViews { + + urlStrings := fileId2Url[chunkView.FileId] + data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) + if err != nil { + glog.Errorf("read chunk: %v", err) + return fmt.Errorf("read chunk: %v", err) + } + + _, err = w.Write(data) + if err != nil { + glog.Errorf("write chunk: %v", err) + return fmt.Errorf("write chunk: %v", err) + } + } + + return nil + +} + +func CheckAllChunkViews(chunkViews []*ChunkView, fileId2Url *map[string][]string, gErr *errgroup.Group) { + for _, chunkView := range chunkViews { + urlStrings := (*fileId2Url)[chunkView.FileId] + glog.V(9).Infof("Check chunk: %+v\n url: %v", chunkView, urlStrings) + gErr.Go(func() error { + _, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) + return err + }) + } +} + +// ---------------- ReadAllReader ---------------------------------- + +func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) { + + buffer := bytes.Buffer{} + + lookupFileIdFn := func(fileId string) (targetUrls []string, err error) { + return masterClient.LookupFileId(fileId) + } + + chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64) + + for _, chunkView := range chunkViews { + urlStrings, err := lookupFileIdFn(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return nil, err + } + + data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size)) + if err != nil { + return nil, err + } + buffer.Write(data) + } + return buffer.Bytes(), nil +} + +// ---------------- ChunkStreamReader ---------------------------------- +type ChunkStreamReader struct { + chunkViews []*ChunkView + logicOffset int64 + buffer []byte + bufferOffset int64 + bufferPos int + chunkIndex int + lookupFileId wdclient.LookupFileIdFunctionType +} + +var _ = io.ReadSeeker(&ChunkStreamReader{}) + +func NewChunkStreamReaderFromFiler(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { + + lookupFileIdFn := func(fileId string) (targetUrl []string, err error) { + return masterClient.LookupFileId(fileId) + } + + chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64) + + return &ChunkStreamReader{ + chunkViews: chunkViews, + lookupFileId: lookupFileIdFn, + } +} + +func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.FileChunk) *ChunkStreamReader { + + lookupFileIdFn := LookupFn(filerClient) + + chunkViews := ViewFromChunks(lookupFileIdFn, chunks, 0, math.MaxInt64) + + return &ChunkStreamReader{ + chunkViews: chunkViews, + lookupFileId: lookupFileIdFn, + } +} + +func (c *ChunkStreamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if c.isBufferEmpty() { + if c.chunkIndex >= len(c.chunkViews) { + return n, io.EOF + } + chunkView := c.chunkViews[c.chunkIndex] + c.fetchChunkToBuffer(chunkView) + c.chunkIndex++ + } + t := copy(p[n:], c.buffer[c.bufferPos:]) + c.bufferPos += t + n += t + } + return +} + +func (c *ChunkStreamReader) isBufferEmpty() bool { + return len(c.buffer) <= c.bufferPos +} + +func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) { + + var totalSize int64 + for _, chunk := range c.chunkViews { + totalSize += int64(chunk.Size) + } + + var err error + switch whence { + case io.SeekStart: + case io.SeekCurrent: + offset += c.bufferOffset + int64(c.bufferPos) + case io.SeekEnd: + offset = totalSize + offset + } + if offset > totalSize { + err = io.ErrUnexpectedEOF + } + + for i, chunk := range c.chunkViews { + if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) { + if c.isBufferEmpty() || c.bufferOffset != chunk.LogicOffset { + c.fetchChunkToBuffer(chunk) + c.chunkIndex = i + 1 + break + } + } + } + c.bufferPos = int(offset - c.bufferOffset) + + return offset, err + +} + +func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error { + urlStrings, err := c.lookupFileId(chunkView.FileId) + if err != nil { + glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) + return err + } + var buffer bytes.Buffer + var shouldRetry bool + for _, urlString := range urlStrings { + shouldRetry, err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size), func(data []byte) { + buffer.Write(data) + }) + if !shouldRetry { + break + } + if err != nil { + glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) + buffer.Reset() + } else { + break + } + } + if err != nil { + return err + } + c.buffer = buffer.Bytes() + c.bufferPos = 0 + c.bufferOffset = chunkView.LogicOffset + + // glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size)) + + return nil +} + +func (c *ChunkStreamReader) Close() { + // TODO try to release and reuse buffer +} + +func VolumeId(fileId string) string { + lastCommaIndex := strings.LastIndex(fileId, ",") + if lastCommaIndex > 0 { + return fileId[:lastCommaIndex] + } + return fileId +} diff --git a/weed/filer/topics.go b/weed/filer/topics.go new file mode 100644 index 000000000..3a2fde8c4 --- /dev/null +++ b/weed/filer/topics.go @@ -0,0 +1,6 @@ +package filer + +const ( + TopicsDir = "/topics" + SystemLogDir = TopicsDir + "/.system/log" +) diff --git a/weed/filer2/abstract_sql/abstract_sql_store.go b/weed/filer2/abstract_sql/abstract_sql_store.go deleted file mode 100644 index d512467c7..000000000 --- a/weed/filer2/abstract_sql/abstract_sql_store.go +++ /dev/null @@ -1,184 +0,0 @@ -package abstract_sql - -import ( - "context" - "database/sql" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" -) - -type AbstractSqlStore struct { - DB *sql.DB - SqlInsert string - SqlUpdate string - SqlFind string - SqlDelete string - SqlDeleteFolderChildren string - SqlListExclusive string - SqlListInclusive string -} - -type TxOrDB interface { - ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) - QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row - QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) -} - -func (store *AbstractSqlStore) BeginTransaction(ctx context.Context) (context.Context, error) { - tx, err := store.DB.BeginTx(ctx, &sql.TxOptions{ - Isolation: sql.LevelReadCommitted, - ReadOnly: false, - }) - if err != nil { - return ctx, err - } - - return context.WithValue(ctx, "tx", tx), nil -} -func (store *AbstractSqlStore) CommitTransaction(ctx context.Context) error { - if tx, ok := ctx.Value("tx").(*sql.Tx); ok { - return tx.Commit() - } - return nil -} -func (store *AbstractSqlStore) RollbackTransaction(ctx context.Context) error { - if tx, ok := ctx.Value("tx").(*sql.Tx); ok { - return tx.Rollback() - } - return nil -} - -func (store *AbstractSqlStore) getTxOrDB(ctx context.Context) TxOrDB { - if tx, ok := ctx.Value("tx").(*sql.Tx); ok { - return tx - } - return store.DB -} - -func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - dir, name := entry.FullPath.DirAndName() - meta, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encode %s: %s", entry.FullPath, err) - } - - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlInsert, hashToLong(dir), name, dir, meta) - if err != nil { - return fmt.Errorf("insert %s: %s", entry.FullPath, err) - } - - _, err = res.RowsAffected() - if err != nil { - return fmt.Errorf("insert %s but no rows affected: %s", entry.FullPath, err) - } - return nil -} - -func (store *AbstractSqlStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - dir, name := entry.FullPath.DirAndName() - meta, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encode %s: %s", entry.FullPath, err) - } - - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, hashToLong(dir), name, dir) - if err != nil { - return fmt.Errorf("update %s: %s", entry.FullPath, err) - } - - _, err = res.RowsAffected() - if err != nil { - return fmt.Errorf("update %s but no rows affected: %s", entry.FullPath, err) - } - return nil -} - -func (store *AbstractSqlStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (*filer2.Entry, error) { - - dir, name := fullpath.DirAndName() - row := store.getTxOrDB(ctx).QueryRowContext(ctx, store.SqlFind, hashToLong(dir), name, dir) - var data []byte - if err := row.Scan(&data); err != nil { - return nil, filer2.ErrNotFound - } - - entry := &filer2.Entry{ - FullPath: fullpath, - } - if err := entry.DecodeAttributesAndChunks(data); err != nil { - return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) - } - - return entry, nil -} - -func (store *AbstractSqlStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error { - - dir, name := fullpath.DirAndName() - - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDelete, hashToLong(dir), name, dir) - if err != nil { - return fmt.Errorf("delete %s: %s", fullpath, err) - } - - _, err = res.RowsAffected() - if err != nil { - return fmt.Errorf("delete %s but no rows affected: %s", fullpath, err) - } - - return nil -} - -func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { - - res, err := store.getTxOrDB(ctx).ExecContext(ctx, store.SqlDeleteFolderChildren, hashToLong(string(fullpath)), fullpath) - if err != nil { - return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err) - } - - _, err = res.RowsAffected() - if err != nil { - return fmt.Errorf("deleteFolderChildren %s but no rows affected: %s", fullpath, err) - } - - return nil -} - -func (store *AbstractSqlStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, limit int) (entries []*filer2.Entry, err error) { - - sqlText := store.SqlListExclusive - if inclusive { - sqlText = store.SqlListInclusive - } - - rows, err := store.getTxOrDB(ctx).QueryContext(ctx, sqlText, hashToLong(string(fullpath)), startFileName, string(fullpath), limit) - if err != nil { - return nil, fmt.Errorf("list %s : %v", fullpath, err) - } - defer rows.Close() - - for rows.Next() { - var name string - var data []byte - if err = rows.Scan(&name, &data); err != nil { - glog.V(0).Infof("scan %s : %v", fullpath, err) - return nil, fmt.Errorf("scan %s: %v", fullpath, err) - } - - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), name), - } - if err = entry.DecodeAttributesAndChunks(data); err != nil { - glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err) - return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err) - } - - entries = append(entries, entry) - } - - return entries, nil -} diff --git a/weed/filer2/abstract_sql/hashing.go b/weed/filer2/abstract_sql/hashing.go deleted file mode 100644 index 5c982c537..000000000 --- a/weed/filer2/abstract_sql/hashing.go +++ /dev/null @@ -1,32 +0,0 @@ -package abstract_sql - -import ( - "crypto/md5" - "io" -) - -// returns a 64 bit big int -func hashToLong(dir string) (v int64) { - h := md5.New() - io.WriteString(h, dir) - - b := h.Sum(nil) - - v += int64(b[0]) - v <<= 8 - v += int64(b[1]) - v <<= 8 - v += int64(b[2]) - v <<= 8 - v += int64(b[3]) - v <<= 8 - v += int64(b[4]) - v <<= 8 - v += int64(b[5]) - v <<= 8 - v += int64(b[6]) - v <<= 8 - v += int64(b[7]) - - return -} diff --git a/weed/filer2/cassandra/cassandra_store.go b/weed/filer2/cassandra/cassandra_store.go deleted file mode 100644 index dcaab8bc4..000000000 --- a/weed/filer2/cassandra/cassandra_store.go +++ /dev/null @@ -1,153 +0,0 @@ -package cassandra - -import ( - "context" - "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/gocql/gocql" -) - -func init() { - filer2.Stores = append(filer2.Stores, &CassandraStore{}) -} - -type CassandraStore struct { - cluster *gocql.ClusterConfig - session *gocql.Session -} - -func (store *CassandraStore) GetName() string { - return "cassandra" -} - -func (store *CassandraStore) Initialize(configuration util.Configuration) (err error) { - return store.initialize( - configuration.GetString("keyspace"), - configuration.GetStringSlice("hosts"), - ) -} - -func (store *CassandraStore) initialize(keyspace string, hosts []string) (err error) { - store.cluster = gocql.NewCluster(hosts...) - store.cluster.Keyspace = keyspace - store.cluster.Consistency = gocql.LocalQuorum - store.session, err = store.cluster.CreateSession() - if err != nil { - glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace) - } - return -} - -func (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) { - return ctx, nil -} -func (store *CassandraStore) CommitTransaction(ctx context.Context) error { - return nil -} -func (store *CassandraStore) RollbackTransaction(ctx context.Context) error { - return nil -} - -func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - dir, name := entry.FullPath.DirAndName() - meta, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encode %s: %s", entry.FullPath, err) - } - - if err := store.session.Query( - "INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? ", - dir, name, meta, entry.TtlSec).Exec(); err != nil { - return fmt.Errorf("insert %s: %s", entry.FullPath, err) - } - - return nil -} - -func (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - return store.InsertEntry(ctx, entry) -} - -func (store *CassandraStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - - dir, name := fullpath.DirAndName() - var data []byte - if err := store.session.Query( - "SELECT meta FROM filemeta WHERE directory=? AND name=?", - dir, name).Consistency(gocql.One).Scan(&data); err != nil { - if err != gocql.ErrNotFound { - return nil, filer2.ErrNotFound - } - } - - if len(data) == 0 { - return nil, filer2.ErrNotFound - } - - entry = &filer2.Entry{ - FullPath: fullpath, - } - err = entry.DecodeAttributesAndChunks(data) - if err != nil { - return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) - } - - return entry, nil -} - -func (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) error { - - dir, name := fullpath.DirAndName() - - if err := store.session.Query( - "DELETE FROM filemeta WHERE directory=? AND name=?", - dir, name).Exec(); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) error { - - if err := store.session.Query( - "DELETE FROM filemeta WHERE directory=?", - fullpath).Exec(); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { - - cqlStr := "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?" - if inclusive { - cqlStr = "SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?" - } - - var data []byte - var name string - iter := store.session.Query(cqlStr, string(fullpath), startFileName, limit).Iter() - for iter.Scan(&name, &data) { - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), name), - } - if decodeErr := entry.DecodeAttributesAndChunks(data); decodeErr != nil { - err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) - break - } - entries = append(entries, entry) - } - if err := iter.Close(); err != nil { - glog.V(0).Infof("list iterator close: %v", err) - } - - return entries, err -} diff --git a/weed/filer2/configuration.go b/weed/filer2/configuration.go deleted file mode 100644 index 7b05b53dc..000000000 --- a/weed/filer2/configuration.go +++ /dev/null @@ -1,51 +0,0 @@ -package filer2 - -import ( - "os" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/spf13/viper" -) - -var ( - Stores []FilerStore -) - -func (f *Filer) LoadConfiguration(config *viper.Viper) { - - validateOneEnabledStore(config) - - for _, store := range Stores { - if config.GetBool(store.GetName() + ".enabled") { - viperSub := config.Sub(store.GetName()) - if err := store.Initialize(viperSub); err != nil { - glog.Fatalf("Failed to initialize store for %s: %+v", - store.GetName(), err) - } - f.SetStore(store) - glog.V(0).Infof("Configure filer for %s", store.GetName()) - return - } - } - - println() - println("Supported filer stores are:") - for _, store := range Stores { - println(" " + store.GetName()) - } - - os.Exit(-1) -} - -func validateOneEnabledStore(config *viper.Viper) { - enabledStore := "" - for _, store := range Stores { - if config.GetBool(store.GetName() + ".enabled") { - if enabledStore == "" { - enabledStore = store.GetName() - } else { - glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName()) - } - } - } -} diff --git a/weed/filer2/entry.go b/weed/filer2/entry.go deleted file mode 100644 index c901927bb..000000000 --- a/weed/filer2/entry.go +++ /dev/null @@ -1,73 +0,0 @@ -package filer2 - -import ( - "os" - "time" - - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" -) - -type Attr struct { - Mtime time.Time // time of last modification - Crtime time.Time // time of creation (OS X only) - Mode os.FileMode // file mode - Uid uint32 // owner uid - Gid uint32 // group gid - Mime string // mime type - Replication string // replication - Collection string // collection name - TtlSec int32 // ttl in seconds - UserName string - GroupNames []string - SymlinkTarget string -} - -func (attr Attr) IsDirectory() bool { - return attr.Mode&os.ModeDir > 0 -} - -type Entry struct { - FullPath - - Attr - Extended map[string][]byte - - // the following is for files - Chunks []*filer_pb.FileChunk `json:"chunks,omitempty"` -} - -func (entry *Entry) Size() uint64 { - return TotalSize(entry.Chunks) -} - -func (entry *Entry) Timestamp() time.Time { - if entry.IsDirectory() { - return entry.Crtime - } else { - return entry.Mtime - } -} - -func (entry *Entry) ToProtoEntry() *filer_pb.Entry { - if entry == nil { - return nil - } - return &filer_pb.Entry{ - Name: entry.FullPath.Name(), - IsDirectory: entry.IsDirectory(), - Attributes: EntryAttributeToPb(entry), - Chunks: entry.Chunks, - Extended: entry.Extended, - } -} - -func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry { - if entry == nil { - return nil - } - dir, _ := entry.FullPath.DirAndName() - return &filer_pb.FullEntry{ - Dir: dir, - Entry: entry.ToProtoEntry(), - } -} diff --git a/weed/filer2/filechunks.go b/weed/filer2/filechunks.go deleted file mode 100644 index b5876df82..000000000 --- a/weed/filer2/filechunks.go +++ /dev/null @@ -1,228 +0,0 @@ -package filer2 - -import ( - "fmt" - "hash/fnv" - "sort" - "sync" - - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" -) - -func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) { - for _, c := range chunks { - t := uint64(c.Offset + int64(c.Size)) - if size < t { - size = t - } - } - return -} - -func ETag(chunks []*filer_pb.FileChunk) (etag string) { - if len(chunks) == 1 { - return chunks[0].ETag - } - - h := fnv.New32a() - for _, c := range chunks { - h.Write([]byte(c.ETag)) - } - return fmt.Sprintf("%x", h.Sum32()) -} - -func CompactFileChunks(chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) { - - visibles := NonOverlappingVisibleIntervals(chunks) - - fileIds := make(map[string]bool) - for _, interval := range visibles { - fileIds[interval.fileId] = true - } - for _, chunk := range chunks { - if _, found := fileIds[chunk.GetFileIdString()]; found { - compacted = append(compacted, chunk) - } else { - garbage = append(garbage, chunk) - } - } - - return -} - -func MinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) { - - fileIds := make(map[string]bool) - for _, interval := range bs { - fileIds[interval.GetFileIdString()] = true - } - for _, chunk := range as { - if _, found := fileIds[chunk.GetFileIdString()]; !found { - delta = append(delta, chunk) - } - } - - return -} - -type ChunkView struct { - FileId string - Offset int64 - Size uint64 - LogicOffset int64 - IsFullChunk bool -} - -func ViewFromChunks(chunks []*filer_pb.FileChunk, offset int64, size int) (views []*ChunkView) { - - visibles := NonOverlappingVisibleIntervals(chunks) - - return ViewFromVisibleIntervals(visibles, offset, size) - -} - -func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int) (views []*ChunkView) { - - stop := offset + int64(size) - - for _, chunk := range visibles { - if chunk.start <= offset && offset < chunk.stop && offset < stop { - isFullChunk := chunk.isFullChunk && chunk.start == offset && chunk.stop <= stop - views = append(views, &ChunkView{ - FileId: chunk.fileId, - Offset: offset - chunk.start, // offset is the data starting location in this file id - Size: uint64(min(chunk.stop, stop) - offset), - LogicOffset: offset, - IsFullChunk: isFullChunk, - }) - offset = min(chunk.stop, stop) - } - } - - return views - -} - -func logPrintf(name string, visibles []VisibleInterval) { - /* - log.Printf("%s len %d", name, len(visibles)) - for _, v := range visibles { - log.Printf("%s: => %+v", name, v) - } - */ -} - -var bufPool = sync.Pool{ - New: func() interface{} { - return new(VisibleInterval) - }, -} - -func MergeIntoVisibles(visibles, newVisibles []VisibleInterval, chunk *filer_pb.FileChunk) []VisibleInterval { - - newV := newVisibleInterval( - chunk.Offset, - chunk.Offset+int64(chunk.Size), - chunk.GetFileIdString(), - chunk.Mtime, - true, - ) - - length := len(visibles) - if length == 0 { - return append(visibles, newV) - } - last := visibles[length-1] - if last.stop <= chunk.Offset { - return append(visibles, newV) - } - - logPrintf(" before", visibles) - for _, v := range visibles { - if v.start < chunk.Offset && chunk.Offset < v.stop { - newVisibles = append(newVisibles, newVisibleInterval( - v.start, - chunk.Offset, - v.fileId, - v.modifiedTime, - false, - )) - } - chunkStop := chunk.Offset + int64(chunk.Size) - if v.start < chunkStop && chunkStop < v.stop { - newVisibles = append(newVisibles, newVisibleInterval( - chunkStop, - v.stop, - v.fileId, - v.modifiedTime, - false, - )) - } - if chunkStop <= v.start || v.stop <= chunk.Offset { - newVisibles = append(newVisibles, v) - } - } - newVisibles = append(newVisibles, newV) - - logPrintf(" append", newVisibles) - - for i := len(newVisibles) - 1; i >= 0; i-- { - if i > 0 && newV.start < newVisibles[i-1].start { - newVisibles[i] = newVisibles[i-1] - } else { - newVisibles[i] = newV - break - } - } - logPrintf(" sorted", newVisibles) - - return newVisibles -} - -func NonOverlappingVisibleIntervals(chunks []*filer_pb.FileChunk) (visibles []VisibleInterval) { - - sort.Slice(chunks, func(i, j int) bool { - return chunks[i].Mtime < chunks[j].Mtime - }) - - var newVisibles []VisibleInterval - for _, chunk := range chunks { - newVisibles = MergeIntoVisibles(visibles, newVisibles, chunk) - t := visibles[:0] - visibles = newVisibles - newVisibles = t - - logPrintf("add", visibles) - - } - - return -} - -// find non-overlapping visible intervals -// visible interval map to one file chunk - -type VisibleInterval struct { - start int64 - stop int64 - modifiedTime int64 - fileId string - isFullChunk bool -} - -func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, isFullChunk bool) VisibleInterval { - return VisibleInterval{ - start: start, - stop: stop, - fileId: fileId, - modifiedTime: modifiedTime, - isFullChunk: isFullChunk, - } -} - -func min(x, y int64) int64 { - if x <= y { - return x - } - return y -} diff --git a/weed/filer2/filer.go b/weed/filer2/filer.go deleted file mode 100644 index b724e20fd..000000000 --- a/weed/filer2/filer.go +++ /dev/null @@ -1,253 +0,0 @@ -package filer2 - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "google.golang.org/grpc" - - "github.com/karlseguin/ccache" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/wdclient" -) - -const PaginationSize = 1024 * 256 - -var ( - OS_UID = uint32(os.Getuid()) - OS_GID = uint32(os.Getgid()) -) - -type Filer struct { - store *FilerStoreWrapper - directoryCache *ccache.Cache - MasterClient *wdclient.MasterClient - fileIdDeletionChan chan string - GrpcDialOption grpc.DialOption -} - -func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer { - f := &Filer{ - directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), - MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters), - fileIdDeletionChan: make(chan string, PaginationSize), - GrpcDialOption: grpcDialOption, - } - - go f.loopProcessingDeletion() - - return f -} - -func (f *Filer) SetStore(store FilerStore) { - f.store = NewFilerStoreWrapper(store) -} - -func (f *Filer) DisableDirectoryCache() { - f.directoryCache = nil -} - -func (fs *Filer) GetMaster() string { - return fs.MasterClient.GetMaster() -} - -func (fs *Filer) KeepConnectedToMaster() { - fs.MasterClient.KeepConnectedToMaster() -} - -func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) { - return f.store.BeginTransaction(ctx) -} - -func (f *Filer) CommitTransaction(ctx context.Context) error { - return f.store.CommitTransaction(ctx) -} - -func (f *Filer) RollbackTransaction(ctx context.Context) error { - return f.store.RollbackTransaction(ctx) -} - -func (f *Filer) CreateEntry(ctx context.Context, entry *Entry) error { - - if string(entry.FullPath) == "/" { - return nil - } - - dirParts := strings.Split(string(entry.FullPath), "/") - - // fmt.Printf("directory parts: %+v\n", dirParts) - - var lastDirectoryEntry *Entry - - for i := 1; i < len(dirParts); i++ { - dirPath := "/" + filepath.ToSlash(filepath.Join(dirParts[:i]...)) - // fmt.Printf("%d directory: %+v\n", i, dirPath) - - // first check local cache - dirEntry := f.cacheGetDirectory(dirPath) - - // not found, check the store directly - if dirEntry == nil { - glog.V(4).Infof("find uncached directory: %s", dirPath) - dirEntry, _ = f.FindEntry(ctx, FullPath(dirPath)) - } else { - glog.V(4).Infof("found cached directory: %s", dirPath) - } - - // no such existing directory - if dirEntry == nil { - - // create the directory - now := time.Now() - - dirEntry = &Entry{ - FullPath: FullPath(dirPath), - Attr: Attr{ - Mtime: now, - Crtime: now, - Mode: os.ModeDir | 0770, - Uid: entry.Uid, - Gid: entry.Gid, - }, - } - - glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode) - mkdirErr := f.store.InsertEntry(ctx, dirEntry) - if mkdirErr != nil { - if _, err := f.FindEntry(ctx, FullPath(dirPath)); err == ErrNotFound { - return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr) - } - } else { - f.NotifyUpdateEvent(nil, dirEntry, false) - } - - } else if !dirEntry.IsDirectory() { - return fmt.Errorf("%s is a file", dirPath) - } - - // cache the directory entry - f.cacheSetDirectory(dirPath, dirEntry, i) - - // remember the direct parent directory entry - if i == len(dirParts)-1 { - lastDirectoryEntry = dirEntry - } - - } - - if lastDirectoryEntry == nil { - return fmt.Errorf("parent folder not found: %v", entry.FullPath) - } - - /* - if !hasWritePermission(lastDirectoryEntry, entry) { - glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d", - lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid) - return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath) - } - */ - - oldEntry, _ := f.FindEntry(ctx, entry.FullPath) - - if oldEntry == nil { - if err := f.store.InsertEntry(ctx, entry); err != nil { - glog.Errorf("insert entry %s: %v", entry.FullPath, err) - return fmt.Errorf("insert entry %s: %v", entry.FullPath, err) - } - } else { - if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil { - glog.Errorf("update entry %s: %v", entry.FullPath, err) - return fmt.Errorf("update entry %s: %v", entry.FullPath, err) - } - } - - f.NotifyUpdateEvent(oldEntry, entry, true) - - f.deleteChunksIfNotNew(oldEntry, entry) - - return nil -} - -func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) { - if oldEntry != nil { - if oldEntry.IsDirectory() && !entry.IsDirectory() { - glog.Errorf("existing %s is a directory", entry.FullPath) - return fmt.Errorf("existing %s is a directory", entry.FullPath) - } - if !oldEntry.IsDirectory() && entry.IsDirectory() { - glog.Errorf("existing %s is a file", entry.FullPath) - return fmt.Errorf("existing %s is a file", entry.FullPath) - } - } - return f.store.UpdateEntry(ctx, entry) -} - -func (f *Filer) FindEntry(ctx context.Context, p FullPath) (entry *Entry, err error) { - - now := time.Now() - - if string(p) == "/" { - return &Entry{ - FullPath: p, - Attr: Attr{ - Mtime: now, - Crtime: now, - Mode: os.ModeDir | 0755, - Uid: OS_UID, - Gid: OS_GID, - }, - }, nil - } - return f.store.FindEntry(ctx, p) -} - -func (f *Filer) ListDirectoryEntries(ctx context.Context, p FullPath, startFileName string, inclusive bool, limit int) ([]*Entry, error) { - if strings.HasSuffix(string(p), "/") && len(p) > 1 { - p = p[0 : len(p)-1] - } - return f.store.ListDirectoryEntries(ctx, p, startFileName, inclusive, limit) -} - -func (f *Filer) cacheDelDirectory(dirpath string) { - - if dirpath == "/" { - return - } - - if f.directoryCache == nil { - return - } - f.directoryCache.Delete(dirpath) - return -} - -func (f *Filer) cacheGetDirectory(dirpath string) *Entry { - - if f.directoryCache == nil { - return nil - } - item := f.directoryCache.Get(dirpath) - if item == nil { - return nil - } - return item.Value().(*Entry) -} - -func (f *Filer) cacheSetDirectory(dirpath string, dirEntry *Entry, level int) { - - if f.directoryCache == nil { - return - } - - minutes := 60 - if level < 10 { - minutes -= level * 6 - } - - f.directoryCache.Set(dirpath, dirEntry, time.Duration(minutes)*time.Minute) -} diff --git a/weed/filer2/filer_client_util.go b/weed/filer2/filer_client_util.go deleted file mode 100644 index 1a10f7c20..000000000 --- a/weed/filer2/filer_client_util.go +++ /dev/null @@ -1,172 +0,0 @@ -package filer2 - -import ( - "context" - "fmt" - "io" - "math" - "strings" - "sync" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" -) - -func VolumeId(fileId string) string { - lastCommaIndex := strings.LastIndex(fileId, ",") - if lastCommaIndex > 0 { - return fileId[:lastCommaIndex] - } - return fileId -} - -type FilerClient interface { - WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error -} - -func ReadIntoBuffer(ctx context.Context, filerClient FilerClient, fullFilePath string, buff []byte, chunkViews []*ChunkView, baseOffset int64) (totalRead int64, err error) { - var vids []string - for _, chunkView := range chunkViews { - vids = append(vids, VolumeId(chunkView.FileId)) - } - - vid2Locations := make(map[string]*filer_pb.Locations) - - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - glog.V(4).Infof("read fh lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ - VolumeIds: vids, - }) - if err != nil { - return err - } - - vid2Locations = resp.LocationsMap - - return nil - }) - - if err != nil { - return 0, fmt.Errorf("failed to lookup volume ids %v: %v", vids, err) - } - - var wg sync.WaitGroup - for _, chunkView := range chunkViews { - wg.Add(1) - go func(chunkView *ChunkView) { - defer wg.Done() - - glog.V(4).Infof("read fh reading chunk: %+v", chunkView) - - locations := vid2Locations[VolumeId(chunkView.FileId)] - if locations == nil || len(locations.Locations) == 0 { - glog.V(0).Infof("failed to locate %s", chunkView.FileId) - err = fmt.Errorf("failed to locate %s", chunkView.FileId) - return - } - - var n int64 - n, err = util.ReadUrl( - fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, chunkView.FileId), - chunkView.Offset, - int(chunkView.Size), - buff[chunkView.LogicOffset-baseOffset:chunkView.LogicOffset-baseOffset+int64(chunkView.Size)], - !chunkView.IsFullChunk) - - if err != nil { - - glog.V(0).Infof("%v read http://%s/%v %v bytes: %v", fullFilePath, locations.Locations[0].Url, chunkView.FileId, n, err) - - err = fmt.Errorf("failed to read http://%s/%s: %v", - locations.Locations[0].Url, chunkView.FileId, err) - return - } - - glog.V(4).Infof("read fh read %d bytes: %+v", n, chunkView) - totalRead += n - - }(chunkView) - } - wg.Wait() - return -} - -func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath string) (entry *filer_pb.Entry, err error) { - - dir, name := FullPath(fullFilePath).DirAndName() - - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: dir, - Name: name, - } - - glog.V(3).Infof("read %s request: %v", fullFilePath, request) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { - return nil - } - glog.V(3).Infof("read %s attr %v: %v", fullFilePath, request, err) - return err - } - - if resp.Entry == nil { - glog.V(3).Infof("read %s entry: %v", fullFilePath, entry) - return nil - } - - entry = resp.Entry - return nil - }) - - return -} - -func ReadDirAllEntries(ctx context.Context, filerClient FilerClient, fullDirPath, prefix string, fn func(entry *filer_pb.Entry, isLast bool)) (err error) { - - err = filerClient.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - lastEntryName := "" - - request := &filer_pb.ListEntriesRequest{ - Directory: fullDirPath, - Prefix: prefix, - StartFromFileName: lastEntryName, - Limit: math.MaxUint32, - } - - glog.V(3).Infof("read directory: %v", request) - stream, err := client.ListEntries(ctx, request) - if err != nil { - return fmt.Errorf("list %s: %v", fullDirPath, err) - } - - var prevEntry *filer_pb.Entry - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - if prevEntry != nil { - fn(prevEntry, true) - } - break - } else { - return recvErr - } - } - if prevEntry != nil { - fn(prevEntry, false) - } - prevEntry = resp.Entry - } - - return nil - - }) - - return -} diff --git a/weed/filer2/filer_delete_entry.go b/weed/filer2/filer_delete_entry.go deleted file mode 100644 index 75a09e7ef..000000000 --- a/weed/filer2/filer_delete_entry.go +++ /dev/null @@ -1,102 +0,0 @@ -package filer2 - -import ( - "context" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" -) - -func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p FullPath, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (err error) { - if p == "/" { - return nil - } - - entry, findErr := f.FindEntry(ctx, p) - if findErr != nil { - return findErr - } - - var chunks []*filer_pb.FileChunk - chunks = append(chunks, entry.Chunks...) - if entry.IsDirectory() { - // delete the folder children, not including the folder itself - var dirChunks []*filer_pb.FileChunk - dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks) - if err != nil { - return fmt.Errorf("delete directory %s: %v", p, err) - } - chunks = append(chunks, dirChunks...) - f.cacheDelDirectory(string(p)) - } - // delete the file or folder - err = f.doDeleteEntryMetaAndData(ctx, entry, shouldDeleteChunks) - if err != nil { - return fmt.Errorf("delete file %s: %v", p, err) - } - - if shouldDeleteChunks { - go f.DeleteChunks(chunks) - } - - return nil -} - -func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry, isRecursive bool, ignoreRecursiveError, shouldDeleteChunks bool) (chunks []*filer_pb.FileChunk, err error) { - - lastFileName := "" - includeLastFile := false - for { - entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize) - if err != nil { - glog.Errorf("list folder %s: %v", entry.FullPath, err) - return nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err) - } - if lastFileName == "" && !isRecursive && len(entries) > 0 { - // only for first iteration in the loop - return nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath) - } - - for _, sub := range entries { - lastFileName = sub.Name() - var dirChunks []*filer_pb.FileChunk - if sub.IsDirectory() { - dirChunks, err = f.doBatchDeleteFolderMetaAndData(ctx, sub, isRecursive, ignoreRecursiveError, shouldDeleteChunks) - } - if err != nil && !ignoreRecursiveError { - return nil, err - } - if shouldDeleteChunks { - chunks = append(chunks, dirChunks...) - } - } - - if len(entries) < PaginationSize { - break - } - } - - f.cacheDelDirectory(string(entry.FullPath)) - - glog.V(3).Infof("deleting directory %v", entry.FullPath) - - if storeDeletionErr := f.store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil { - return nil, fmt.Errorf("filer store delete: %v", storeDeletionErr) - } - f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) - - return chunks, nil -} - -func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool) (err error) { - - glog.V(3).Infof("deleting entry %v", entry.FullPath) - - if storeDeletionErr := f.store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil { - return fmt.Errorf("filer store delete: %v", storeDeletionErr) - } - f.NotifyUpdateEvent(entry, nil, shouldDeleteChunks) - - return nil -} diff --git a/weed/filer2/filer_deletion.go b/weed/filer2/filer_deletion.go deleted file mode 100644 index 9937685f7..000000000 --- a/weed/filer2/filer_deletion.go +++ /dev/null @@ -1,87 +0,0 @@ -package filer2 - -import ( - "time" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" -) - -func (f *Filer) loopProcessingDeletion() { - - ticker := time.NewTicker(5 * time.Second) - - lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { - m := make(map[string]operation.LookupResult) - for _, vid := range vids { - locs, _ := f.MasterClient.GetVidLocations(vid) - var locations []operation.Location - for _, loc := range locs { - locations = append(locations, operation.Location{ - Url: loc.Url, - PublicUrl: loc.PublicUrl, - }) - } - m[vid] = operation.LookupResult{ - VolumeId: vid, - Locations: locations, - } - } - return m, nil - } - - var fileIds []string - for { - select { - case fid := <-f.fileIdDeletionChan: - fileIds = append(fileIds, fid) - if len(fileIds) >= 4096 { - glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) - fileIds = fileIds[:0] - } - case <-ticker.C: - if len(fileIds) > 0 { - glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) - operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc) - fileIds = fileIds[:0] - } - } - } -} - -func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) { - for _, chunk := range chunks { - f.fileIdDeletionChan <- chunk.GetFileIdString() - } -} - -// DeleteFileByFileId direct delete by file id. -// Only used when the fileId is not being managed by snapshots. -func (f *Filer) DeleteFileByFileId(fileId string) { - f.fileIdDeletionChan <- fileId -} - -func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) { - - if oldEntry == nil { - return - } - if newEntry == nil { - f.DeleteChunks(oldEntry.Chunks) - } - - var toDelete []*filer_pb.FileChunk - newChunkIds := make(map[string]bool) - for _, newChunk := range newEntry.Chunks { - newChunkIds[newChunk.GetFileIdString()] = true - } - - for _, oldChunk := range oldEntry.Chunks { - if _, found := newChunkIds[oldChunk.GetFileIdString()]; !found { - toDelete = append(toDelete, oldChunk) - } - } - f.DeleteChunks(toDelete) -} diff --git a/weed/filer2/filer_notify.go b/weed/filer2/filer_notify.go deleted file mode 100644 index c37381116..000000000 --- a/weed/filer2/filer_notify.go +++ /dev/null @@ -1,39 +0,0 @@ -package filer2 - -import ( - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/notification" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" -) - -func (f *Filer) NotifyUpdateEvent(oldEntry, newEntry *Entry, deleteChunks bool) { - var key string - if oldEntry != nil { - key = string(oldEntry.FullPath) - } else if newEntry != nil { - key = string(newEntry.FullPath) - } else { - return - } - - if notification.Queue != nil { - - glog.V(3).Infof("notifying entry update %v", key) - - newParentPath := "" - if newEntry != nil { - newParentPath, _ = newEntry.FullPath.DirAndName() - } - - notification.Queue.SendMessage( - key, - &filer_pb.EventNotification{ - OldEntry: oldEntry.ToProtoEntry(), - NewEntry: newEntry.ToProtoEntry(), - DeleteChunks: deleteChunks, - NewParentPath: newParentPath, - }, - ) - - } -} diff --git a/weed/filer2/filerstore.go b/weed/filer2/filerstore.go deleted file mode 100644 index 0bb0bd611..000000000 --- a/weed/filer2/filerstore.go +++ /dev/null @@ -1,138 +0,0 @@ -package filer2 - -import ( - "context" - "errors" - "time" - - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/stats" - "github.com/chrislusf/seaweedfs/weed/util" -) - -type FilerStore interface { - // GetName gets the name to locate the configuration in filer.toml file - GetName() string - // Initialize initializes the file store - Initialize(configuration util.Configuration) error - InsertEntry(context.Context, *Entry) error - UpdateEntry(context.Context, *Entry) (err error) - // err == filer2.ErrNotFound if not found - FindEntry(context.Context, FullPath) (entry *Entry, err error) - DeleteEntry(context.Context, FullPath) (err error) - DeleteFolderChildren(context.Context, FullPath) (err error) - ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) - - BeginTransaction(ctx context.Context) (context.Context, error) - CommitTransaction(ctx context.Context) error - RollbackTransaction(ctx context.Context) error -} - -var ErrNotFound = errors.New("filer: no entry is found in filer store") - -type FilerStoreWrapper struct { - actualStore FilerStore -} - -func NewFilerStoreWrapper(store FilerStore) *FilerStoreWrapper { - if innerStore, ok := store.(*FilerStoreWrapper); ok { - return innerStore - } - return &FilerStoreWrapper{ - actualStore: store, - } -} - -func (fsw *FilerStoreWrapper) GetName() string { - return fsw.actualStore.GetName() -} - -func (fsw *FilerStoreWrapper) Initialize(configuration util.Configuration) error { - return fsw.actualStore.Initialize(configuration) -} - -func (fsw *FilerStoreWrapper) InsertEntry(ctx context.Context, entry *Entry) error { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "insert").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "insert").Observe(time.Since(start).Seconds()) - }() - - filer_pb.BeforeEntrySerialization(entry.Chunks) - return fsw.actualStore.InsertEntry(ctx, entry) -} - -func (fsw *FilerStoreWrapper) UpdateEntry(ctx context.Context, entry *Entry) error { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "update").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "update").Observe(time.Since(start).Seconds()) - }() - - filer_pb.BeforeEntrySerialization(entry.Chunks) - return fsw.actualStore.UpdateEntry(ctx, entry) -} - -func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp FullPath) (entry *Entry, err error) { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "find").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "find").Observe(time.Since(start).Seconds()) - }() - - entry, err = fsw.actualStore.FindEntry(ctx, fp) - if err != nil { - return nil, err - } - filer_pb.AfterEntryDeserialization(entry.Chunks) - return -} - -func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp FullPath) (err error) { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "delete").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "delete").Observe(time.Since(start).Seconds()) - }() - - return fsw.actualStore.DeleteEntry(ctx, fp) -} - -func (fsw *FilerStoreWrapper) DeleteFolderChildren(ctx context.Context, fp FullPath) (err error) { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "deleteFolderChildren").Observe(time.Since(start).Seconds()) - }() - - return fsw.actualStore.DeleteFolderChildren(ctx, fp) -} - -func (fsw *FilerStoreWrapper) ListDirectoryEntries(ctx context.Context, dirPath FullPath, startFileName string, includeStartFile bool, limit int) ([]*Entry, error) { - stats.FilerStoreCounter.WithLabelValues(fsw.actualStore.GetName(), "list").Inc() - start := time.Now() - defer func() { - stats.FilerStoreHistogram.WithLabelValues(fsw.actualStore.GetName(), "list").Observe(time.Since(start).Seconds()) - }() - - entries, err := fsw.actualStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit) - if err != nil { - return nil, err - } - for _, entry := range entries { - filer_pb.AfterEntryDeserialization(entry.Chunks) - } - return entries, err -} - -func (fsw *FilerStoreWrapper) BeginTransaction(ctx context.Context) (context.Context, error) { - return fsw.actualStore.BeginTransaction(ctx) -} - -func (fsw *FilerStoreWrapper) CommitTransaction(ctx context.Context) error { - return fsw.actualStore.CommitTransaction(ctx) -} - -func (fsw *FilerStoreWrapper) RollbackTransaction(ctx context.Context) error { - return fsw.actualStore.RollbackTransaction(ctx) -} diff --git a/weed/filer2/mysql/mysql_store.go b/weed/filer2/mysql/mysql_store.go deleted file mode 100644 index d1b06ece5..000000000 --- a/weed/filer2/mysql/mysql_store.go +++ /dev/null @@ -1,74 +0,0 @@ -package mysql - -import ( - "database/sql" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql" - "github.com/chrislusf/seaweedfs/weed/util" - _ "github.com/go-sql-driver/mysql" -) - -const ( - CONNECTION_URL_PATTERN = "%s:%s@tcp(%s:%d)/%s?charset=utf8" -) - -func init() { - filer2.Stores = append(filer2.Stores, &MysqlStore{}) -} - -type MysqlStore struct { - abstract_sql.AbstractSqlStore -} - -func (store *MysqlStore) GetName() string { - return "mysql" -} - -func (store *MysqlStore) Initialize(configuration util.Configuration) (err error) { - return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), - configuration.GetBool("interpolateParams"), - ) -} - -func (store *MysqlStore) initialize(user, password, hostname string, port int, database string, maxIdle, maxOpen int, - interpolateParams bool) (err error) { - - store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES(?,?,?,?)" - store.SqlUpdate = "UPDATE filemeta SET meta=? WHERE dirhash=? AND name=? AND directory=?" - store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=? AND name=? AND directory=?" - store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=? AND name=? AND directory=?" - store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=? AND directory=?" - store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>? AND directory=? ORDER BY NAME ASC LIMIT ?" - store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=? AND name>=? AND directory=? ORDER BY NAME ASC LIMIT ?" - - sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, user, password, hostname, port, database) - if interpolateParams { - sqlUrl += "&interpolateParams=true" - } - - var dbErr error - store.DB, dbErr = sql.Open("mysql", sqlUrl) - if dbErr != nil { - store.DB.Close() - store.DB = nil - return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err) - } - - store.DB.SetMaxIdleConns(maxIdle) - store.DB.SetMaxOpenConns(maxOpen) - - if err = store.DB.Ping(); err != nil { - return fmt.Errorf("connect to %s error:%v", sqlUrl, err) - } - - return nil -} diff --git a/weed/filer2/postgres/postgres_store.go b/weed/filer2/postgres/postgres_store.go deleted file mode 100644 index 3ec000fe0..000000000 --- a/weed/filer2/postgres/postgres_store.go +++ /dev/null @@ -1,69 +0,0 @@ -package postgres - -import ( - "database/sql" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/filer2/abstract_sql" - "github.com/chrislusf/seaweedfs/weed/util" - _ "github.com/lib/pq" -) - -const ( - CONNECTION_URL_PATTERN = "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s connect_timeout=30" -) - -func init() { - filer2.Stores = append(filer2.Stores, &PostgresStore{}) -} - -type PostgresStore struct { - abstract_sql.AbstractSqlStore -} - -func (store *PostgresStore) GetName() string { - return "postgres" -} - -func (store *PostgresStore) Initialize(configuration util.Configuration) (err error) { - return store.initialize( - configuration.GetString("username"), - configuration.GetString("password"), - configuration.GetString("hostname"), - configuration.GetInt("port"), - configuration.GetString("database"), - configuration.GetString("sslmode"), - configuration.GetInt("connection_max_idle"), - configuration.GetInt("connection_max_open"), - ) -} - -func (store *PostgresStore) initialize(user, password, hostname string, port int, database, sslmode string, maxIdle, maxOpen int) (err error) { - - store.SqlInsert = "INSERT INTO filemeta (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)" - store.SqlUpdate = "UPDATE filemeta SET meta=$1 WHERE dirhash=$2 AND name=$3 AND directory=$4" - store.SqlFind = "SELECT meta FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" - store.SqlDelete = "DELETE FROM filemeta WHERE dirhash=$1 AND name=$2 AND directory=$3" - store.SqlDeleteFolderChildren = "DELETE FROM filemeta WHERE dirhash=$1 AND directory=$2" - store.SqlListExclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" - store.SqlListInclusive = "SELECT NAME, meta FROM filemeta WHERE dirhash=$1 AND name>=$2 AND directory=$3 ORDER BY NAME ASC LIMIT $4" - - sqlUrl := fmt.Sprintf(CONNECTION_URL_PATTERN, hostname, port, user, password, database, sslmode) - var dbErr error - store.DB, dbErr = sql.Open("postgres", sqlUrl) - if dbErr != nil { - store.DB.Close() - store.DB = nil - return fmt.Errorf("can not connect to %s error:%v", sqlUrl, err) - } - - store.DB.SetMaxIdleConns(maxIdle) - store.DB.SetMaxOpenConns(maxOpen) - - if err = store.DB.Ping(); err != nil { - return fmt.Errorf("connect to %s error:%v", sqlUrl, err) - } - - return nil -} diff --git a/weed/filer2/stream.go b/weed/filer2/stream.go deleted file mode 100644 index 01b87cad1..000000000 --- a/weed/filer2/stream.go +++ /dev/null @@ -1,41 +0,0 @@ -package filer2 - -import ( - "io" - - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "github.com/chrislusf/seaweedfs/weed/wdclient" -) - -func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int) error { - - chunkViews := ViewFromChunks(chunks, offset, size) - - fileId2Url := make(map[string]string) - - for _, chunkView := range chunkViews { - - urlString, err := masterClient.LookupFileId(chunkView.FileId) - if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err) - return err - } - fileId2Url[chunkView.FileId] = urlString - } - - for _, chunkView := range chunkViews { - urlString := fileId2Url[chunkView.FileId] - _, err := util.ReadUrlAsStream(urlString, chunkView.Offset, int(chunkView.Size), func(data []byte) { - w.Write(data) - }) - if err != nil { - glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err) - return err - } - } - - return nil - -} diff --git a/weed/filer2/tikv/tikv_store.go b/weed/filer2/tikv/tikv_store.go deleted file mode 100644 index 4eb8cb90d..000000000 --- a/weed/filer2/tikv/tikv_store.go +++ /dev/null @@ -1,251 +0,0 @@ -// +build !386 -// +build !arm - -package tikv - -import ( - "bytes" - "context" - "crypto/md5" - "fmt" - "io" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - weed_util "github.com/chrislusf/seaweedfs/weed/util" - - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/store/tikv" -) - -func init() { - filer2.Stores = append(filer2.Stores, &TikvStore{}) -} - -type TikvStore struct { - store kv.Storage -} - -func (store *TikvStore) GetName() string { - return "tikv" -} - -func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) { - pdAddr := configuration.GetString("pdAddress") - return store.initialize(pdAddr) -} - -func (store *TikvStore) initialize(pdAddr string) (err error) { - glog.Infof("filer store tikv pd address: %s", pdAddr) - - driver := tikv.Driver{} - - store.store, err = driver.Open(fmt.Sprintf("tikv://%s", pdAddr)) - - if err != nil { - return fmt.Errorf("open tikv %s : %v", pdAddr, err) - } - - return -} - -func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) { - tx, err := store.store.Begin() - if err != nil { - return ctx, err - } - return context.WithValue(ctx, "tx", tx), nil -} -func (store *TikvStore) CommitTransaction(ctx context.Context) error { - tx, ok := ctx.Value("tx").(kv.Transaction) - if ok { - return tx.Commit(ctx) - } - return nil -} -func (store *TikvStore) RollbackTransaction(ctx context.Context) error { - tx, ok := ctx.Value("tx").(kv.Transaction) - if ok { - return tx.Rollback() - } - return nil -} - -func (store *TikvStore) getTx(ctx context.Context) kv.Transaction { - if tx, ok := ctx.Value("tx").(kv.Transaction); ok { - return tx - } - return nil -} - -func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - dir, name := entry.DirAndName() - key := genKey(dir, name) - - value, err := entry.EncodeAttributesAndChunks() - if err != nil { - return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err) - } - - err = store.getTx(ctx).Set(key, value) - - if err != nil { - return fmt.Errorf("persisting %s : %v", entry.FullPath, err) - } - - // println("saved", entry.FullPath, "chunks", len(entry.Chunks)) - - return nil -} - -func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - - return store.InsertEntry(ctx, entry) -} - -func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - dir, name := fullpath.DirAndName() - key := genKey(dir, name) - - data, err := store.getTx(ctx).Get(ctx, key) - - if err == kv.ErrNotExist { - return nil, filer2.ErrNotFound - } - if err != nil { - return nil, fmt.Errorf("get %s : %v", entry.FullPath, err) - } - - entry = &filer2.Entry{ - FullPath: fullpath, - } - err = entry.DecodeAttributesAndChunks(data) - if err != nil { - return entry, fmt.Errorf("decode %s : %v", entry.FullPath, err) - } - - // println("read", entry.FullPath, "chunks", len(entry.Chunks), "data", len(data), string(data)) - - return entry, nil -} - -func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { - dir, name := fullpath.DirAndName() - key := genKey(dir, name) - - err = store.getTx(ctx).Delete(key) - if err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - return nil -} - -func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { - - directoryPrefix := genDirectoryKeyPrefix(fullpath, "") - - tx := store.getTx(ctx) - - iter, err := tx.Iter(directoryPrefix, nil) - if err != nil { - return fmt.Errorf("deleteFolderChildren %s: %v", fullpath, err) - } - defer iter.Close() - for iter.Valid() { - key := iter.Key() - if !bytes.HasPrefix(key, directoryPrefix) { - break - } - fileName := getNameFromKey(key) - if fileName == "" { - iter.Next() - continue - } - - if err = tx.Delete(genKey(string(fullpath), fileName)); err != nil { - return fmt.Errorf("delete %s : %v", fullpath, err) - } - - iter.Next() - } - - return nil -} - -func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { - - directoryPrefix := genDirectoryKeyPrefix(fullpath, "") - lastFileStart := genDirectoryKeyPrefix(fullpath, startFileName) - - iter, err := store.getTx(ctx).Iter(lastFileStart, nil) - if err != nil { - return nil, fmt.Errorf("list %s: %v", fullpath, err) - } - defer iter.Close() - for iter.Valid() { - key := iter.Key() - if !bytes.HasPrefix(key, directoryPrefix) { - break - } - fileName := getNameFromKey(key) - if fileName == "" { - iter.Next() - continue - } - if fileName == startFileName && !inclusive { - iter.Next() - continue - } - limit-- - if limit < 0 { - break - } - entry := &filer2.Entry{ - FullPath: filer2.NewFullPath(string(fullpath), fileName), - } - - // println("list", entry.FullPath, "chunks", len(entry.Chunks)) - - if decodeErr := entry.DecodeAttributesAndChunks(iter.Value()); decodeErr != nil { - err = decodeErr - glog.V(0).Infof("list %s : %v", entry.FullPath, err) - break - } - entries = append(entries, entry) - iter.Next() - } - - return entries, err -} - -func genKey(dirPath, fileName string) (key []byte) { - key = hashToBytes(dirPath) - key = append(key, []byte(fileName)...) - return key -} - -func genDirectoryKeyPrefix(fullpath filer2.FullPath, startFileName string) (keyPrefix []byte) { - keyPrefix = hashToBytes(string(fullpath)) - if len(startFileName) > 0 { - keyPrefix = append(keyPrefix, []byte(startFileName)...) - } - return keyPrefix -} - -func getNameFromKey(key []byte) string { - - return string(key[md5.Size:]) - -} - -// hash directory -func hashToBytes(dir string) []byte { - h := md5.New() - io.WriteString(h, dir) - - b := h.Sum(nil) - - return b -} diff --git a/weed/filer2/tikv/tikv_store_unsupported.go b/weed/filer2/tikv/tikv_store_unsupported.go deleted file mode 100644 index 36de2d974..000000000 --- a/weed/filer2/tikv/tikv_store_unsupported.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build 386 arm - -package tikv - -import ( - "context" - "fmt" - - "github.com/chrislusf/seaweedfs/weed/filer2" - weed_util "github.com/chrislusf/seaweedfs/weed/util" -) - -func init() { - filer2.Stores = append(filer2.Stores, &TikvStore{}) -} - -type TikvStore struct { -} - -func (store *TikvStore) GetName() string { - return "tikv" -} - -func (store *TikvStore) Initialize(configuration weed_util.Configuration) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) initialize(pdAddr string) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) BeginTransaction(ctx context.Context) (context.Context, error) { - return nil, fmt.Errorf("not implemented for 32 bit computers") -} -func (store *TikvStore) CommitTransaction(ctx context.Context) error { - return fmt.Errorf("not implemented for 32 bit computers") -} -func (store *TikvStore) RollbackTransaction(ctx context.Context) error { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) InsertEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) UpdateEntry(ctx context.Context, entry *filer2.Entry) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) FindEntry(ctx context.Context, fullpath filer2.FullPath) (entry *filer2.Entry, err error) { - return nil, fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) DeleteEntry(ctx context.Context, fullpath filer2.FullPath) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) DeleteFolderChildren(ctx context.Context, fullpath filer2.FullPath) (err error) { - return fmt.Errorf("not implemented for 32 bit computers") -} - -func (store *TikvStore) ListDirectoryEntries(ctx context.Context, fullpath filer2.FullPath, startFileName string, inclusive bool, - limit int) (entries []*filer2.Entry, err error) { - return nil, fmt.Errorf("not implemented for 32 bit computers") -} diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go index 7b24a1ec5..6ee20974b 100644 --- a/weed/filesys/dir.go +++ b/weed/filesys/dir.go @@ -1,27 +1,39 @@ package filesys import ( + "bytes" "context" + "math" "os" - "path" + "strings" + "syscall" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) type Dir struct { - Path string - wfs *WFS - entry *filer_pb.Entry + name string + wfs *WFS + entry *filer_pb.Entry + parent *Dir + id uint64 } var _ = fs.Node(&Dir{}) + +//var _ = fs.NodeIdentifier(&Dir{}) var _ = fs.NodeCreater(&Dir{}) +var _ = fs.NodeMknoder(&Dir{}) var _ = fs.NodeMkdirer(&Dir{}) +var _ = fs.NodeFsyncer(&Dir{}) var _ = fs.NodeRequestLookuper(&Dir{}) var _ = fs.HandleReadDirAller(&Dir{}) var _ = fs.NodeRemover(&Dir{}) @@ -31,44 +43,57 @@ var _ = fs.NodeGetxattrer(&Dir{}) var _ = fs.NodeSetxattrer(&Dir{}) var _ = fs.NodeRemovexattrer(&Dir{}) var _ = fs.NodeListxattrer(&Dir{}) +var _ = fs.NodeForgetter(&Dir{}) -func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { +func (dir *Dir) xId() uint64 { + return dir.id +} - glog.V(3).Infof("dir Attr %s", dir.Path) +func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error { // https://github.com/bazil/fuse/issues/196 attr.Valid = time.Second - if dir.Path == dir.wfs.option.FilerMountRootPath { + if dir.FullPath() == dir.wfs.option.FilerMountRootPath { dir.setRootDirAttributes(attr) + glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr) return nil } - if err := dir.maybeLoadEntry(ctx); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { + glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err) return err } - attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir - attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0) - attr.Ctime = time.Unix(dir.entry.Attributes.Crtime, 0) - attr.Gid = dir.entry.Attributes.Gid - attr.Uid = dir.entry.Attributes.Uid + // attr.Inode = dir.Id() + attr.Mode = os.FileMode(entry.Attributes.FileMode) | os.ModeDir + attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) + attr.Crtime = time.Unix(entry.Attributes.Crtime, 0) + attr.Gid = entry.Attributes.Gid + attr.Uid = entry.Attributes.Uid + + glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr) return nil } func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { - glog.V(4).Infof("dir Getxattr %s", dir.Path) + glog.V(4).Infof("dir Getxattr %s", dir.FullPath()) - if err := dir.maybeLoadEntry(ctx); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - return getxattr(dir.entry, req, resp) + return getxattr(entry, req, resp) } func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { + // attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode() + attr.Valid = time.Second + attr.Inode = 1 // dir.Id() attr.Uid = dir.wfs.option.MountUid attr.Gid = dir.wfs.option.MountGid attr.Mode = dir.wfs.option.MountMode @@ -76,84 +101,178 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) { attr.Ctime = dir.wfs.option.MountCtime attr.Mtime = dir.wfs.option.MountMtime attr.Atime = dir.wfs.option.MountMtime + attr.BlockSize = blockSize +} + +func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { + // fsync works at OS level + // write the file chunks to the filerGrpcAddress + glog.V(3).Infof("dir %s fsync %+v", dir.FullPath(), req) + + return nil } -func (dir *Dir) newFile(name string, entry *filer_pb.Entry) *File { +func (dir *Dir) newFile(name string) fs.Node { + + fileFullPath := util.NewFullPath(dir.FullPath(), name) + fileId := fileFullPath.AsInode() + dir.wfs.handlesLock.Lock() + existingHandle, found := dir.wfs.handles[fileId] + dir.wfs.handlesLock.Unlock() + + if found { + glog.V(4).Infof("newFile found opened file handle: %+v", fileFullPath) + return existingHandle.f + } return &File{ - Name: name, - dir: dir, - wfs: dir.wfs, - entry: entry, - entryViewCache: nil, + Name: name, + dir: dir, + wfs: dir.wfs, + id: fileId, } } +func (dir *Dir) newDirectory(fullpath util.FullPath) fs.Node { + + return &Dir{name: fullpath.Name(), wfs: dir.wfs, parent: dir, id: fullpath.AsInode()} + +} + func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { + exclusive := req.Flags&fuse.OpenExclusive != 0 + isDirectory := req.Mode&os.ModeDir > 0 + + if exclusive || isDirectory { + _, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, exclusive) + if err != nil { + return nil, nil, err + } + } + var node fs.Node + if isDirectory { + node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name)) + return node, nil, nil + } + + node = dir.newFile(req.Name) + file := node.(*File) + file.entry = &filer_pb.Entry{ + Name: req.Name, + IsDirectory: req.Mode&os.ModeDir > 0, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), + Uid: req.Uid, + Gid: req.Gid, + Collection: dir.wfs.option.Collection, + Replication: dir.wfs.option.Replication, + TtlSec: dir.wfs.option.TtlSec, + }, + } + file.dirtyMetadata = true + fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) + return file, fh, nil + +} + +func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) { + + _, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false) + + if err != nil { + return nil, err + } + var node fs.Node + node = dir.newFile(req.Name) + return node, nil +} + +func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exclusive bool) (*filer_pb.CreateEntryRequest, error) { + dirFullPath := dir.FullPath() request := &filer_pb.CreateEntryRequest{ - Directory: dir.Path, + Directory: dirFullPath, Entry: &filer_pb.Entry{ - Name: req.Name, - IsDirectory: req.Mode&os.ModeDir > 0, + Name: name, + IsDirectory: mode&os.ModeDir > 0, Attributes: &filer_pb.FuseAttributes{ Mtime: time.Now().Unix(), Crtime: time.Now().Unix(), - FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), - Uid: req.Uid, - Gid: req.Gid, + FileMode: uint32(mode &^ dir.wfs.option.Umask), + Uid: uid, + Gid: gid, Collection: dir.wfs.option.Collection, Replication: dir.wfs.option.Replication, TtlSec: dir.wfs.option.TtlSec, }, }, + OExcl: exclusive, + Signatures: []int32{dir.wfs.signature}, } - glog.V(1).Infof("create: %v", request) + glog.V(1).Infof("create %s/%s", dirFullPath, name) - if request.Entry.IsDirectory { - if err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("create %s/%s: %v", dir.Path, req.Name, err) - return fuse.EIO + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + dir.wfs.mapPbIdFromLocalToFiler(request.Entry) + defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry) + + if err := filer_pb.CreateEntry(client, request); err != nil { + if strings.Contains(err.Error(), "EEXIST") { + return fuse.EEXIST } - return nil - }); err != nil { - return nil, nil, err + glog.V(0).Infof("create %s/%s: %v", dirFullPath, name, err) + return fuse.EIO } - } - file := dir.newFile(req.Name, request.Entry) - if !request.Entry.IsDirectory { - file.isOpen = true - } - fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid) - fh.dirtyMetadata = true - return file, fh, nil + if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil { + glog.Errorf("local InsertEntry dir %s/%s: %v", dirFullPath, name, err) + return fuse.EIO + } + return nil + }) + return request, err } func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name) + + newEntry := &filer_pb.Entry{ + Name: req.Name, + IsDirectory: true, + Attributes: &filer_pb.FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), + Uid: req.Uid, + Gid: req.Gid, + }, + } + + dirFullPath := dir.FullPath() + + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + dir.wfs.mapPbIdFromLocalToFiler(newEntry) + defer dir.wfs.mapPbIdFromFilerToLocal(newEntry) request := &filer_pb.CreateEntryRequest{ - Directory: dir.Path, - Entry: &filer_pb.Entry{ - Name: req.Name, - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(req.Mode &^ dir.wfs.option.Umask), - Uid: req.Uid, - Gid: req.Gid, - }, - }, + Directory: dirFullPath, + Entry: newEntry, + Signatures: []int32{dir.wfs.signature}, } glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("mkdir %s/%s: %v", dir.Path, req.Name, err) + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err) + return err + } + + if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil { + glog.Errorf("local mkdir dir %s/%s: %v", dirFullPath, req.Name, err) return fuse.EIO } @@ -161,221 +280,258 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err }) if err == nil { - node := &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs} + node := dir.newDirectory(util.NewFullPath(dirFullPath, req.Name)) + return node, nil } - return nil, err + glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err) + + return nil, fuse.EIO } func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) { - glog.V(4).Infof("dir Lookup %s: %s", dir.Path, req.Name) + dirPath := util.FullPath(dir.FullPath()) + glog.V(4).Infof("dir Lookup %s: %s by %s", dirPath, req.Name, req.Header.String()) - var entry *filer_pb.Entry - fullFilePath := path.Join(dir.Path, req.Name) - - item := dir.wfs.listDirectoryEntriesCache.Get(fullFilePath) - if item != nil && !item.Expired() { - entry = item.Value().(*filer_pb.Entry) + fullFilePath := dirPath.Child(req.Name) + visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath) + if visitErr != nil { + glog.Errorf("dir Lookup %s: %v", dirPath, visitErr) + return nil, fuse.EIO + } + localEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath) + if cacheErr == filer_pb.ErrNotFound { + return nil, fuse.ENOENT } - if entry == nil { - glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) - entry, err = filer2.GetEntry(ctx, dir.wfs, fullFilePath) + if localEntry == nil { + // glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath) + entry, err := filer_pb.GetEntry(dir.wfs, fullFilePath) if err != nil { - return nil, err - } - if entry != nil { - dir.wfs.listDirectoryEntriesCache.Set(fullFilePath, entry, 5*time.Minute) + glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err) + return nil, fuse.ENOENT } + localEntry = filer.FromPbEntry(string(dirPath), entry) } else { glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath) } - if entry != nil { - if entry.IsDirectory { - node = &Dir{Path: path.Join(dir.Path, req.Name), wfs: dir.wfs, entry: entry} + if localEntry != nil { + if localEntry.IsDirectory() { + node = dir.newDirectory(fullFilePath) } else { - node = dir.newFile(req.Name, entry) + node = dir.newFile(req.Name) } - resp.EntryValid = time.Duration(0) - resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) - resp.Attr.Ctime = time.Unix(entry.Attributes.Crtime, 0) - resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode) - resp.Attr.Gid = entry.Attributes.Gid - resp.Attr.Uid = entry.Attributes.Uid + // resp.EntryValid = time.Second + resp.Attr.Inode = fullFilePath.AsInode() + resp.Attr.Valid = time.Second + resp.Attr.Mtime = localEntry.Attr.Mtime + resp.Attr.Crtime = localEntry.Attr.Crtime + resp.Attr.Mode = localEntry.Attr.Mode + resp.Attr.Gid = localEntry.Attr.Gid + resp.Attr.Uid = localEntry.Attr.Uid + if localEntry.HardLinkCounter > 0 { + resp.Attr.Nlink = uint32(localEntry.HardLinkCounter) + } return node, nil } + glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err) return nil, fuse.ENOENT } func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) { - glog.V(3).Infof("dir ReadDirAll %s", dir.Path) + dirPath := util.FullPath(dir.FullPath()) + glog.V(4).Infof("dir ReadDirAll %s", dirPath) - cacheTtl := 5 * time.Minute - - readErr := filer2.ReadDirAllEntries(ctx, dir.wfs, dir.Path, "", func(entry *filer_pb.Entry, isLast bool) { - if entry.IsDirectory { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir} + processEachEntryFn := func(entry *filer.Entry, isLast bool) { + if entry.IsDirectory() { + dirent := fuse.Dirent{Name: entry.Name(), Type: fuse.DT_Dir, Inode: dirPath.Child(entry.Name()).AsInode()} ret = append(ret, dirent) } else { - dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_File} + dirent := fuse.Dirent{Name: entry.Name(), Type: findFileType(uint16(entry.Attr.Mode)), Inode: dirPath.Child(entry.Name()).AsInode()} ret = append(ret, dirent) } - dir.wfs.listDirectoryEntriesCache.Set(path.Join(dir.Path, entry.Name), entry, cacheTtl) + } + + if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil { + glog.Errorf("dir ReadDirAll %s: %v", dirPath, err) + return nil, fuse.EIO + } + listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool { + processEachEntryFn(entry, false) + return true }) - if readErr != nil { - glog.V(0).Infof("list %s: %v", dir.Path, err) - return ret, fuse.EIO + if listErr != nil { + glog.Errorf("list meta cache: %v", listErr) + return nil, fuse.EIO } + return +} - return ret, err +func findFileType(mode uint16) fuse.DirentType { + switch mode & (syscall.S_IFMT & 0xffff) { + case syscall.S_IFSOCK: + return fuse.DT_Socket + case syscall.S_IFLNK: + return fuse.DT_Link + case syscall.S_IFREG: + return fuse.DT_File + case syscall.S_IFBLK: + return fuse.DT_Block + case syscall.S_IFDIR: + return fuse.DT_Dir + case syscall.S_IFCHR: + return fuse.DT_Char + case syscall.S_IFIFO: + return fuse.DT_FIFO + } + return fuse.DT_File } func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { if !req.Dir { - return dir.removeOneFile(ctx, req) + return dir.removeOneFile(req) } - return dir.removeFolder(ctx, req) + return dir.removeFolder(req) } -func (dir *Dir) removeOneFile(ctx context.Context, req *fuse.RemoveRequest) error { +func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error { - entry, err := filer2.GetEntry(ctx, dir.wfs, path.Join(dir.Path, req.Name)) + dirFullPath := dir.FullPath() + filePath := util.NewFullPath(dirFullPath, req.Name) + entry, err := filer_pb.GetEntry(dir.wfs, filePath) if err != nil { return err } - dir.wfs.deleteFileChunks(ctx, entry.Chunks) - - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) - - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + // first, ensure the filer store can correctly delete + glog.V(3).Infof("remove file: %v", req) + isDeleteData := entry != nil && entry.HardLinkCounter <= 1 + err = filer_pb.Remove(dir.wfs, dirFullPath, req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature}) + if err != nil { + glog.V(3).Infof("not found remove file %s: %v", filePath, err) + return fuse.ENOENT + } - request := &filer_pb.DeleteEntryRequest{ - Directory: dir.Path, - Name: req.Name, - IsDeleteData: false, - } + // then, delete meta cache and fsNode cache + if err = dir.wfs.metaCache.DeleteEntry(context.Background(), filePath); err != nil { + glog.V(3).Infof("local DeleteEntry %s: %v", filePath, err) + return fuse.ESTALE + } - glog.V(3).Infof("remove file: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(3).Infof("remove file %s/%s: %v", dir.Path, req.Name, err) - return fuse.ENOENT - } + // remove current file handle if any + dir.wfs.handlesLock.Lock() + defer dir.wfs.handlesLock.Unlock() + inodeId := filePath.AsInode() + delete(dir.wfs.handles, inodeId) - return nil - }) + return nil } -func (dir *Dir) removeFolder(ctx context.Context, req *fuse.RemoveRequest) error { - - dir.wfs.listDirectoryEntriesCache.Delete(path.Join(dir.Path, req.Name)) +func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error { - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.DeleteEntryRequest{ - Directory: dir.Path, - Name: req.Name, - IsDeleteData: true, + dirFullPath := dir.FullPath() + glog.V(3).Infof("remove directory entry: %v", req) + ignoreRecursiveErr := true // ignore recursion error since the OS should manage it + err := filer_pb.Remove(dir.wfs, dirFullPath, req.Name, true, true, ignoreRecursiveErr, false, []int32{dir.wfs.signature}) + if err != nil { + glog.V(0).Infof("remove %s/%s: %v", dirFullPath, req.Name, err) + if strings.Contains(err.Error(), "non-empty") { + return fuse.EEXIST } + return fuse.ENOENT + } - glog.V(3).Infof("remove directory entry: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(3).Infof("remove %s/%s: %v", dir.Path, req.Name, err) - return fuse.ENOENT - } + t := util.NewFullPath(dirFullPath, req.Name) + dir.wfs.metaCache.DeleteEntry(context.Background(), t) - return nil - }) + return nil } func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - if err := dir.maybeLoadEntry(ctx); err != nil { + glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req) + + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - glog.V(3).Infof("%v dir setattr %+v, fh=%d", dir.Path, req, req.Handle) if req.Valid.Mode() { - dir.entry.Attributes.FileMode = uint32(req.Mode) + entry.Attributes.FileMode = uint32(req.Mode) } if req.Valid.Uid() { - dir.entry.Attributes.Uid = req.Uid + entry.Attributes.Uid = req.Uid } if req.Valid.Gid() { - dir.entry.Attributes.Gid = req.Gid + entry.Attributes.Gid = req.Gid } if req.Valid.Mtime() { - dir.entry.Attributes.Mtime = req.Mtime.Unix() + entry.Attributes.Mtime = req.Mtime.Unix() } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) - - return dir.saveEntry(ctx) + return dir.saveEntry(entry) } func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { - glog.V(4).Infof("dir Setxattr %s: %s", dir.Path, req.Name) + glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name) - if err := dir.maybeLoadEntry(ctx); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - if err := setxattr(dir.entry, req); err != nil { + if err := setxattr(entry, req); err != nil { return err } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) - - return dir.saveEntry(ctx) + return dir.saveEntry(entry) } func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { - glog.V(4).Infof("dir Removexattr %s: %s", dir.Path, req.Name) + glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name) - if err := dir.maybeLoadEntry(ctx); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - if err := removexattr(dir.entry, req); err != nil { + if err := removexattr(entry, req); err != nil { return err } - dir.wfs.listDirectoryEntriesCache.Delete(dir.Path) - - return dir.saveEntry(ctx) + return dir.saveEntry(entry) } func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { - glog.V(4).Infof("dir Listxattr %s", dir.Path) + glog.V(4).Infof("dir Listxattr %s", dir.FullPath()) - if err := dir.maybeLoadEntry(ctx); err != nil { + entry, err := dir.maybeLoadEntry() + if err != nil { return err } - if err := listxattr(dir.entry, req, resp); err != nil { + if err := listxattr(entry, req, resp); err != nil { return err } @@ -383,39 +539,66 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp } -func (dir *Dir) maybeLoadEntry(ctx context.Context) error { - if dir.entry == nil { - parentDirPath, name := filer2.FullPath(dir.Path).DirAndName() - entry, err := dir.wfs.maybeLoadEntry(ctx, parentDirPath, name) - if err != nil { - return err - } - if entry == nil { - return fuse.ENOENT - } - dir.entry = entry - } - return nil +func (dir *Dir) Forget() { + glog.V(4).Infof("Forget dir %s", dir.FullPath()) } -func (dir *Dir) saveEntry(ctx context.Context) error { +func (dir *Dir) maybeLoadEntry() (*filer_pb.Entry, error) { + parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName() + return dir.wfs.maybeLoadEntry(parentDirPath, name) +} + +func (dir *Dir) saveEntry(entry *filer_pb.Entry) error { - parentDir, name := filer2.FullPath(dir.Path).DirAndName() + parentDir, name := util.FullPath(dir.FullPath()).DirAndName() - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + dir.wfs.mapPbIdFromLocalToFiler(entry) + defer dir.wfs.mapPbIdFromFilerToLocal(entry) request := &filer_pb.UpdateEntryRequest{ - Directory: parentDir, - Entry: dir.entry, + Directory: parentDir, + Entry: entry, + Signatures: []int32{dir.wfs.signature}, } glog.V(1).Infof("save dir entry: %v", request) - _, err := client.UpdateEntry(ctx, request) + _, err := client.UpdateEntry(context.Background(), request) if err != nil { - glog.V(0).Infof("UpdateEntry dir %s/%s: %v", parentDir, name, err) + glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err) return fuse.EIO } + if err := dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil { + glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err) + return fuse.ESTALE + } + return nil }) } + +func (dir *Dir) FullPath() string { + var parts []string + for p := dir; p != nil; p = p.parent { + if strings.HasPrefix(p.name, "/") { + if len(p.name) > 1 { + parts = append(parts, p.name[1:]) + } + } else { + parts = append(parts, p.name) + } + } + + if len(parts) == 0 { + return "/" + } + + var buf bytes.Buffer + for i := len(parts) - 1; i >= 0; i-- { + buf.WriteString("/") + buf.WriteString(parts[i]) + } + return buf.String() +} diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go index 8e60872d3..acdcd2de4 100644 --- a/weed/filesys/dir_link.go +++ b/weed/filesys/dir_link.go @@ -2,25 +2,110 @@ package filesys import ( "context" + "github.com/chrislusf/seaweedfs/weed/util" "os" "syscall" "time" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" ) +var _ = fs.NodeLinker(&Dir{}) var _ = fs.NodeSymlinker(&Dir{}) var _ = fs.NodeReadlinker(&File{}) +const ( + HARD_LINK_MARKER = '\x01' +) + +func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (fs.Node, error) { + + oldFile, ok := old.(*File) + if !ok { + glog.Errorf("old node is not a file: %+v", old) + } + + glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName) + + oldEntry, err := oldFile.maybeLoadEntry(ctx) + if err != nil { + return nil, err + } + + if oldEntry == nil { + return nil, fuse.EIO + } + + // update old file to hardlink mode + if len(oldEntry.HardLinkId) == 0 { + oldEntry.HardLinkId = append(util.RandomBytes(16), HARD_LINK_MARKER) + oldEntry.HardLinkCounter = 1 + } + oldEntry.HardLinkCounter++ + updateOldEntryRequest := &filer_pb.UpdateEntryRequest{ + Directory: oldFile.dir.FullPath(), + Entry: oldEntry, + Signatures: []int32{dir.wfs.signature}, + } + + // CreateLink 1.2 : update new file to hardlink mode + request := &filer_pb.CreateEntryRequest{ + Directory: dir.FullPath(), + Entry: &filer_pb.Entry{ + Name: req.NewName, + IsDirectory: false, + Attributes: oldEntry.Attributes, + Chunks: oldEntry.Chunks, + Extended: oldEntry.Extended, + HardLinkId: oldEntry.HardLinkId, + HardLinkCounter: oldEntry.HardLinkCounter, + }, + Signatures: []int32{dir.wfs.signature}, + } + + // apply changes to the filer, and also apply to local metaCache + err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + dir.wfs.mapPbIdFromLocalToFiler(request.Entry) + defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry) + + if err := filer_pb.UpdateEntry(client, updateOldEntryRequest); err != nil { + glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err) + return fuse.EIO + } + dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(updateOldEntryRequest.Directory, updateOldEntryRequest.Entry)) + + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err) + return fuse.EIO + } + dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + + return nil + }) + + if err != nil { + return nil, fuse.EIO + } + + // create new file node + newNode := dir.newFile(req.NewName) + newFile := newNode.(*File) + + return newFile, err + +} + func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) { - glog.V(3).Infof("Symlink: %v/%v to %v", dir.Path, req.NewName, req.Target) + glog.V(4).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target) request := &filer_pb.CreateEntryRequest{ - Directory: dir.Path, + Directory: dir.FullPath(), Entry: &filer_pb.Entry{ Name: req.NewName, IsDirectory: false, @@ -33,17 +118,25 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, SymlinkTarget: req.Target, }, }, + Signatures: []int32{dir.wfs.signature}, } - err := dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("symlink %s/%s: %v", dir.Path, req.NewName, err) + err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + dir.wfs.mapPbIdFromLocalToFiler(request.Entry) + defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry) + + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err) return fuse.EIO } + + dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + return nil }) - symlink := dir.newFile(req.NewName, request.Entry) + symlink := dir.newFile(req.NewName) return symlink, err @@ -51,16 +144,17 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) { - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return "", err } - if os.FileMode(file.entry.Attributes.FileMode)&os.ModeSymlink == 0 { + if os.FileMode(entry.Attributes.FileMode)&os.ModeSymlink == 0 { return "", fuse.Errno(syscall.EINVAL) } - glog.V(3).Infof("Readlink: %v/%v => %v", file.dir.Path, file.Name, file.entry.Attributes.SymlinkTarget) + glog.V(4).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget) - return file.entry.Attributes.SymlinkTarget, nil + return entry.Attributes.SymlinkTarget, nil } diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go index e72a15758..b07710d17 100644 --- a/weed/filesys/dir_rename.go +++ b/weed/filesys/dir_rename.go @@ -2,32 +2,90 @@ package filesys import ( "context" - "fmt" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error { newDir := newDirectory.(*Dir) - return dir.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + newPath := util.NewFullPath(newDir.FullPath(), req.NewName) + oldPath := util.NewFullPath(dir.FullPath(), req.OldName) + + glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath) + + // find local old entry + oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath) + if err != nil { + glog.Errorf("dir Rename can not find source %s : %v", oldPath, err) + return fuse.ENOENT + } + + // update remote filer + err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() request := &filer_pb.AtomicRenameEntryRequest{ - OldDirectory: dir.Path, + OldDirectory: dir.FullPath(), OldName: req.OldName, - NewDirectory: newDir.Path, + NewDirectory: newDir.FullPath(), NewName: req.NewName, } _, err := client.AtomicRenameEntry(ctx, request) if err != nil { - return fmt.Errorf("renaming %s/%s => %s/%s: %v", dir.Path, req.OldName, newDir.Path, req.NewName, err) + glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err) + return fuse.EXDEV } return nil }) + if err != nil { + glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err) + return fuse.EIO + } + + // TODO: replicate renaming logic on filer + if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil { + glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err) + return fuse.EIO + } + oldEntry.FullPath = newPath + if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil { + glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err) + return fuse.EIO + } + + oldFsNode := NodeWithId(oldPath.AsInode()) + newFsNode := NodeWithId(newPath.AsInode()) + dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) { + if file, ok := internalNode.(*File); ok { + glog.V(4).Infof("internal node %s", file.Name) + file.Name = req.NewName + file.id = uint64(newFsNode) + } + }) + + // change file handle + dir.wfs.handlesLock.Lock() + defer dir.wfs.handlesLock.Unlock() + inodeId := oldPath.AsInode() + existingHandle, found := dir.wfs.handles[inodeId] + glog.V(4).Infof("has open filehandle %s: %v", oldPath, found) + if !found || existingHandle == nil { + return nil + } + glog.V(4).Infof("opened filehandle %s => %s", oldPath, newPath) + delete(dir.wfs.handles, inodeId) + dir.wfs.handles[newPath.AsInode()] = existingHandle + return nil } diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go index 35d8f249a..8888cff96 100644 --- a/weed/filesys/dirty_page.go +++ b/weed/filesys/dirty_page.go @@ -2,214 +2,117 @@ package filesys import ( "bytes" - "context" - "fmt" + "io" "sync" - "sync/atomic" "time" "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/security" ) type ContinuousDirtyPages struct { - hasData bool - Offset int64 - Size int64 - Data []byte - f *File - lock sync.Mutex + intervals *ContinuousIntervals + f *File + writeWaitGroup sync.WaitGroup + chunkAddLock sync.Mutex + lastErr error + collection string + replication string } func newDirtyPages(file *File) *ContinuousDirtyPages { - return &ContinuousDirtyPages{ - Data: nil, - f: file, + dirtyPages := &ContinuousDirtyPages{ + intervals: &ContinuousIntervals{}, + f: file, } + return dirtyPages } -func (pages *ContinuousDirtyPages) releaseResource() { - if pages.Data != nil { - pages.f.wfs.bufPool.Put(pages.Data) - pages.Data = nil - atomic.AddInt32(&counter, -1) - glog.V(3).Infof("%s/%s releasing resource %d", pages.f.dir.Path, pages.f.Name, counter) - } -} - -var counter = int32(0) +func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) { -func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { - - pages.lock.Lock() - defer pages.lock.Unlock() - - var chunk *filer_pb.FileChunk + glog.V(4).Infof("%s AddPage [%d,%d)", pages.f.fullpath(), offset, offset+int64(len(data))) if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) { // this is more than what buffer can hold. - return pages.flushAndSave(ctx, offset, data) + pages.flushAndSave(offset, data) } - if pages.Data == nil { - pages.Data = pages.f.wfs.bufPool.Get().([]byte) - atomic.AddInt32(&counter, 1) - glog.V(3).Infof("%s/%s acquire resource %d", pages.f.dir.Path, pages.f.Name, counter) - } + pages.intervals.AddInterval(data, offset) - if offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) || - pages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) { - // if the data is out of range, - // or buffer is full if adding new data, - // flush current buffer and add new data - - // println("offset", offset, "size", len(data), "existing offset", pages.Offset, "size", pages.Size) - - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s add save [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) - chunks = append(chunks, chunk) - } - } else { - glog.V(0).Infof("%s/%s add save [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) - return - } - pages.Offset = offset - copy(pages.Data, data) - pages.Size = int64(len(data)) - return + if pages.intervals.TotalSize() >= pages.f.wfs.option.ChunkSizeLimit { + pages.saveExistingLargestPageToStorage() } - if offset != pages.Offset+pages.Size { - // when this happens, debug shows the data overlapping with existing data is empty - // the data is not just append - if offset == pages.Offset && int(pages.Size) < len(data) { - // glog.V(2).Infof("pages[%d,%d) pages.Data len=%v, data len=%d, pages.Size=%d", pages.Offset, pages.Offset+pages.Size, len(pages.Data), len(data), pages.Size) - copy(pages.Data[pages.Size:], data[pages.Size:]) - } else { - if pages.Size != 0 { - glog.V(1).Infof("%s/%s add page: pages [%d, %d) write [%d, %d)", pages.f.dir.Path, pages.f.Name, pages.Offset, pages.Offset+pages.Size, offset, offset+int64(len(data))) - } - return pages.flushAndSave(ctx, offset, data) - } - } else { - copy(pages.Data[offset-pages.Offset:], data) - } - - pages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset) - return } -func (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) { - - var chunk *filer_pb.FileChunk +func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) { // flush existing - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s flush existing [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) - chunks = append(chunks, chunk) - } - } else { - glog.V(0).Infof("%s/%s failed to flush1 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) - return - } - pages.Size = 0 - pages.Offset = 0 + pages.saveExistingPagesToStorage() // flush the new page - if chunk, err = pages.saveToStorage(ctx, data, offset); err == nil { - if chunk != nil { - glog.V(4).Infof("%s/%s flush big request [%d,%d) to %s", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.FileId) - chunks = append(chunks, chunk) - } - } else { - glog.V(0).Infof("%s/%s failed to flush2 [%d,%d): %v", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err) - return - } + pages.saveToStorage(bytes.NewReader(data), offset, int64(len(data))) return } -func (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) { +func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() { + for pages.saveExistingLargestPageToStorage() { + } +} - pages.lock.Lock() - defer pages.lock.Unlock() +func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (hasSavedData bool) { - if pages.Size == 0 { - return nil, nil + maxList := pages.intervals.RemoveLargestIntervalLinkedList() + if maxList == nil { + return false } - if chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil { - pages.Size = 0 - pages.Offset = 0 - if chunk != nil { - glog.V(4).Infof("%s/%s flush [%d,%d)", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size)) - } + entry := pages.f.getEntry() + if entry == nil { + return false } - return -} -func (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) { + fileSize := int64(entry.Attributes.FileSize) - if pages.Size == 0 { - return nil, nil + chunkSize := min(maxList.Size(), fileSize-maxList.Offset()) + if chunkSize == 0 { + return false } - return pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset) -} - -func (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) { + pages.saveToStorage(maxList.ToReader(), maxList.Offset(), chunkSize) - var fileId, host string - var auth security.EncodedJwt + return true +} - if err := pages.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { +func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) { - request := &filer_pb.AssignVolumeRequest{ - Count: 1, - Replication: pages.f.wfs.option.Replication, - Collection: pages.f.wfs.option.Collection, - TtlSec: pages.f.wfs.option.TtlSec, - DataCenter: pages.f.wfs.option.DataCenter, - } + mtime := time.Now().UnixNano() + pages.writeWaitGroup.Add(1) + writer := func() { + defer pages.writeWaitGroup.Done() - resp, err := client.AssignVolume(ctx, request) + reader = io.LimitReader(reader, size) + chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset) if err != nil { - glog.V(0).Infof("assign volume failure %v: %v", request, err) - return err + glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err) + pages.lastErr = err + return } - - fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) - - return nil - }); err != nil { - return nil, fmt.Errorf("filerGrpcAddress assign volume: %v", err) + chunk.Mtime = mtime + pages.collection, pages.replication = collection, replication + pages.chunkAddLock.Lock() + defer pages.chunkAddLock.Unlock() + pages.f.addChunks([]*filer_pb.FileChunk{chunk}) + glog.V(3).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size) } - fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) - bufReader := bytes.NewReader(buf) - uploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, "", nil, auth) - if err != nil { - glog.V(0).Infof("upload data %v to %s: %v", pages.f.Name, fileUrl, err) - return nil, fmt.Errorf("upload data: %v", err) - } - if uploadResult.Error != "" { - glog.V(0).Infof("upload failure %v to %s: %v", pages.f.Name, fileUrl, err) - return nil, fmt.Errorf("upload result: %v", uploadResult.Error) + if pages.f.wfs.concurrentWriters != nil { + pages.f.wfs.concurrentWriters.Execute(writer) + } else { + go writer() } - - return &filer_pb.FileChunk{ - FileId: fileId, - Offset: offset, - Size: uint64(len(buf)), - Mtime: time.Now().UnixNano(), - ETag: uploadResult.ETag, - }, nil - } func max(x, y int64) int64 { @@ -218,3 +121,13 @@ func max(x, y int64) int64 { } return y } +func min(x, y int64) int64 { + if x < y { + return x + } + return y +} + +func (pages *ContinuousDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) { + return pages.intervals.ReadDataAt(data, startOffset) +} diff --git a/weed/filesys/dirty_page_interval.go b/weed/filesys/dirty_page_interval.go new file mode 100644 index 000000000..1404bf78c --- /dev/null +++ b/weed/filesys/dirty_page_interval.go @@ -0,0 +1,223 @@ +package filesys + +import ( + "bytes" + "io" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +type IntervalNode struct { + Data []byte + Offset int64 + Size int64 + Next *IntervalNode +} + +type IntervalLinkedList struct { + Head *IntervalNode + Tail *IntervalNode +} + +type ContinuousIntervals struct { + lists []*IntervalLinkedList +} + +func (list *IntervalLinkedList) Offset() int64 { + return list.Head.Offset +} +func (list *IntervalLinkedList) Size() int64 { + return list.Tail.Offset + list.Tail.Size - list.Head.Offset +} +func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) { + // glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size) + list.Tail.Next = node + list.Tail = node +} +func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) { + // glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size) + node.Next = list.Head + list.Head = node +} + +func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) { + t := list.Head + for { + + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart < nodeStop { + // glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop) + copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset]) + } + + if t.Next == nil { + break + } + t = t.Next + } +} + +func (c *ContinuousIntervals) TotalSize() (total int64) { + for _, list := range c.lists { + total += list.Size() + } + return +} + +func subList(list *IntervalLinkedList, start, stop int64) *IntervalLinkedList { + var nodes []*IntervalNode + for t := list.Head; t != nil; t = t.Next { + nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size) + if nodeStart >= nodeStop { + // skip non overlapping IntervalNode + continue + } + nodes = append(nodes, &IntervalNode{ + Data: t.Data[nodeStart-t.Offset : nodeStop-t.Offset], + Offset: nodeStart, + Size: nodeStop - nodeStart, + Next: nil, + }) + } + for i := 1; i < len(nodes); i++ { + nodes[i-1].Next = nodes[i] + } + return &IntervalLinkedList{ + Head: nodes[0], + Tail: nodes[len(nodes)-1], + } +} + +func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) { + + interval := &IntervalNode{Data: data, Offset: offset, Size: int64(len(data))} + + // append to the tail and return + if len(c.lists) == 1 { + lastSpan := c.lists[0] + if lastSpan.Tail.Offset+lastSpan.Tail.Size == offset { + lastSpan.addNodeToTail(interval) + return + } + } + + var newLists []*IntervalLinkedList + for _, list := range c.lists { + // if list is to the left of new interval, add to the new list + if list.Tail.Offset+list.Tail.Size <= interval.Offset { + newLists = append(newLists, list) + } + // if list is to the right of new interval, add to the new list + if interval.Offset+interval.Size <= list.Head.Offset { + newLists = append(newLists, list) + } + // if new interval overwrite the right part of the list + if list.Head.Offset < interval.Offset && interval.Offset < list.Tail.Offset+list.Tail.Size { + // create a new list of the left part of existing list + newLists = append(newLists, subList(list, list.Offset(), interval.Offset)) + } + // if new interval overwrite the left part of the list + if list.Head.Offset < interval.Offset+interval.Size && interval.Offset+interval.Size < list.Tail.Offset+list.Tail.Size { + // create a new list of the right part of existing list + newLists = append(newLists, subList(list, interval.Offset+interval.Size, list.Tail.Offset+list.Tail.Size)) + } + // skip anything that is fully overwritten by the new interval + } + + c.lists = newLists + // add the new interval to the lists, connecting neighbor lists + var prevList, nextList *IntervalLinkedList + + for _, list := range c.lists { + if list.Head.Offset == interval.Offset+interval.Size { + nextList = list + break + } + } + + for _, list := range c.lists { + if list.Head.Offset+list.Size() == offset { + list.addNodeToTail(interval) + prevList = list + break + } + } + + if prevList != nil && nextList != nil { + // glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size) + prevList.Tail.Next = nextList.Head + prevList.Tail = nextList.Tail + c.removeList(nextList) + } else if nextList != nil { + // add to head was not done when checking + nextList.addNodeToHead(interval) + } + if prevList == nil && nextList == nil { + c.lists = append(c.lists, &IntervalLinkedList{ + Head: interval, + Tail: interval, + }) + } + + return +} + +func (c *ContinuousIntervals) RemoveLargestIntervalLinkedList() *IntervalLinkedList { + var maxSize int64 + maxIndex := -1 + for k, list := range c.lists { + if maxSize <= list.Size() { + maxSize = list.Size() + maxIndex = k + } + } + if maxSize <= 0 { + return nil + } + + t := c.lists[maxIndex] + c.lists = append(c.lists[0:maxIndex], c.lists[maxIndex+1:]...) + return t + +} + +func (c *ContinuousIntervals) removeList(target *IntervalLinkedList) { + index := -1 + for k, list := range c.lists { + if list.Offset() == target.Offset() { + index = k + } + } + if index < 0 { + return + } + + c.lists = append(c.lists[0:index], c.lists[index+1:]...) + +} + +func (c *ContinuousIntervals) ReadDataAt(data []byte, startOffset int64) (maxStop int64) { + for _, list := range c.lists { + start := max(startOffset, list.Offset()) + stop := min(startOffset+int64(len(data)), list.Offset()+list.Size()) + if start < stop { + list.ReadData(data[start-startOffset:], start, stop) + maxStop = max(maxStop, stop) + } + } + return +} + +func (l *IntervalLinkedList) ToReader() io.Reader { + var readers []io.Reader + t := l.Head + readers = append(readers, util.NewBytesReader(t.Data)) + for t.Next != nil { + t = t.Next + readers = append(readers, bytes.NewReader(t.Data)) + } + if len(readers) == 1 { + return readers[0] + } + return io.MultiReader(readers...) +} diff --git a/weed/filesys/dirty_page_interval_test.go b/weed/filesys/dirty_page_interval_test.go new file mode 100644 index 000000000..d02ad27fd --- /dev/null +++ b/weed/filesys/dirty_page_interval_test.go @@ -0,0 +1,113 @@ +package filesys + +import ( + "bytes" + "math/rand" + "testing" +) + +func TestContinuousIntervals_AddIntervalAppend(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25 + c.AddInterval(getBytes(25, 3), 0) + // _, _, 23, 23, 23, 23 + c.AddInterval(getBytes(23, 4), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 23, 23) + +} + +func TestContinuousIntervals_AddIntervalInnerOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, 25, 25, 25, 25 + c.AddInterval(getBytes(25, 5), 0) + // _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 2) + + expectedData(t, c, 0, 25, 25, 23, 23, 25) + +} + +func TestContinuousIntervals_AddIntervalFullOverwrite(t *testing.T) { + + c := &ContinuousIntervals{} + + // 1, + c.AddInterval(getBytes(1, 1), 0) + // _, 2, + c.AddInterval(getBytes(2, 1), 1) + // _, _, 3, 3, 3 + c.AddInterval(getBytes(3, 3), 2) + // _, _, _, 4, 4, 4 + c.AddInterval(getBytes(4, 3), 3) + + expectedData(t, c, 0, 1, 2, 3, 4, 4, 4) + +} + +func TestContinuousIntervals_RealCase1(t *testing.T) { + + c := &ContinuousIntervals{} + + // 25, + c.AddInterval(getBytes(25, 1), 0) + // _, _, _, _, 23, 23 + c.AddInterval(getBytes(23, 2), 4) + // _, _, _, 24, 24, 24, 24 + c.AddInterval(getBytes(24, 4), 3) + + // _, 22, 22 + c.AddInterval(getBytes(22, 2), 1) + + expectedData(t, c, 0, 25, 22, 22, 24, 24, 24, 24) + +} + +func TestRandomWrites(t *testing.T) { + + c := &ContinuousIntervals{} + + data := make([]byte, 1024) + + for i := 0; i < 1024; i++ { + + start, stop := rand.Intn(len(data)), rand.Intn(len(data)) + if start > stop { + start, stop = stop, start + } + + rand.Read(data[start : stop+1]) + + c.AddInterval(data[start:stop+1], int64(start)) + + expectedData(t, c, 0, data...) + + } + +} + +func expectedData(t *testing.T, c *ContinuousIntervals, offset int, data ...byte) { + start, stop := int64(offset), int64(offset+len(data)) + for _, list := range c.lists { + nodeStart, nodeStop := max(start, list.Head.Offset), min(stop, list.Head.Offset+list.Size()) + if nodeStart < nodeStop { + buf := make([]byte, nodeStop-nodeStart) + list.ReadData(buf, nodeStart, nodeStop) + if bytes.Compare(buf, data[nodeStart-start:nodeStop-start]) != 0 { + t.Errorf("expected %v actual %v", data[nodeStart-start:nodeStop-start], buf) + } + } + } +} + +func getBytes(content byte, length int) []byte { + data := make([]byte, length) + for i := 0; i < length; i++ { + data[i] = content + } + return data +} diff --git a/weed/filesys/file.go b/weed/filesys/file.go index afe78ee0f..bb57988cd 100644 --- a/weed/filesys/file.go +++ b/weed/filesys/file.go @@ -3,20 +3,22 @@ package filesys import ( "context" "os" - "path/filepath" "sort" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) const blockSize = 512 var _ = fs.Node(&File{}) +var _ = fs.NodeIdentifier(&File{}) var _ = fs.NodeOpener(&File{}) var _ = fs.NodeFsyncer(&File{}) var _ = fs.NodeSetattrer(&File{}) @@ -24,35 +26,56 @@ var _ = fs.NodeGetxattrer(&File{}) var _ = fs.NodeSetxattrer(&File{}) var _ = fs.NodeRemovexattrer(&File{}) var _ = fs.NodeListxattrer(&File{}) +var _ = fs.NodeForgetter(&File{}) type File struct { - Name string - dir *Dir - wfs *WFS - entry *filer_pb.Entry - entryViewCache []filer2.VisibleInterval - isOpen bool + Name string + dir *Dir + wfs *WFS + entry *filer_pb.Entry + isOpen int + dirtyMetadata bool + id uint64 +} + +func (file *File) fullpath() util.FullPath { + return util.NewFullPath(file.dir.FullPath(), file.Name) } -func (file *File) fullpath() string { - return filepath.Join(file.dir.Path, file.Name) +func (file *File) Id() uint64 { + return file.id } -func (file *File) Attr(ctx context.Context, attr *fuse.Attr) error { +func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) { - glog.V(4).Infof("file Attr %s", file.fullpath()) + glog.V(4).Infof("file Attr %s, open:%v existing:%v", file.fullpath(), file.isOpen, attr) - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } - attr.Mode = os.FileMode(file.entry.Attributes.FileMode) - attr.Size = filer2.TotalSize(file.entry.Chunks) - attr.Mtime = time.Unix(file.entry.Attributes.Mtime, 0) - attr.Gid = file.entry.Attributes.Gid - attr.Uid = file.entry.Attributes.Uid + if entry == nil { + return fuse.ENOENT + } + + attr.Inode = file.Id() + attr.Valid = time.Second + attr.Mode = os.FileMode(entry.Attributes.FileMode) + attr.Size = filer.FileSize(entry) + if file.isOpen > 0 { + attr.Size = entry.Attributes.FileSize + glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size) + } + attr.Crtime = time.Unix(entry.Attributes.Crtime, 0) + attr.Mtime = time.Unix(entry.Attributes.Mtime, 0) + attr.Gid = entry.Attributes.Gid + attr.Uid = entry.Attributes.Uid attr.Blocks = attr.Size/blockSize + 1 attr.BlockSize = uint32(file.wfs.option.ChunkSizeLimit) + if entry.HardLinkCounter > 0 { + attr.Nlink = uint32(entry.HardLinkCounter) + } return nil @@ -62,24 +85,23 @@ func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp glog.V(4).Infof("file Getxattr %s", file.fullpath()) - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } - return getxattr(file.entry, req, resp) + return getxattr(entry, req, resp) } func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) { glog.V(4).Infof("file %v open %+v", file.fullpath(), req) - file.isOpen = true - handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid) resp.Handle = fuse.HandleID(handle.handle) - glog.V(3).Infof("%v file open handle id = %d", file.fullpath(), handle.handle) + glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle) return handle, nil @@ -87,48 +109,89 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error { - if err := file.maybeLoadEntry(ctx); err != nil { + glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req) + + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } + if file.isOpen > 0 { + file.wfs.handlesLock.Lock() + fileHandle := file.wfs.handles[file.Id()] + file.wfs.handlesLock.Unlock() + + if fileHandle != nil { + fileHandle.Lock() + defer fileHandle.Unlock() + } + } - glog.V(3).Infof("%v file setattr %+v, old:%+v", file.fullpath(), req, file.entry.Attributes) if req.Valid.Size() { - glog.V(3).Infof("%v file setattr set size=%v", file.fullpath(), req.Size) - if req.Size == 0 { + glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(entry.Chunks)) + if req.Size < filer.FileSize(entry) { // fmt.Printf("truncate %v \n", fullPath) - file.entry.Chunks = nil - file.entryViewCache = nil + var chunks []*filer_pb.FileChunk + var truncatedChunks []*filer_pb.FileChunk + for _, chunk := range entry.Chunks { + int64Size := int64(chunk.Size) + if chunk.Offset+int64Size > int64(req.Size) { + // this chunk is truncated + int64Size = int64(req.Size) - chunk.Offset + if int64Size > 0 { + chunks = append(chunks, chunk) + glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size) + chunk.Size = uint64(int64Size) + } else { + glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString()) + truncatedChunks = append(truncatedChunks, chunk) + } + } + } + entry.Chunks = chunks } - file.entry.Attributes.FileSize = req.Size + entry.Attributes.FileSize = req.Size + file.dirtyMetadata = true } + if req.Valid.Mode() { - file.entry.Attributes.FileMode = uint32(req.Mode) + entry.Attributes.FileMode = uint32(req.Mode) + file.dirtyMetadata = true } if req.Valid.Uid() { - file.entry.Attributes.Uid = req.Uid + entry.Attributes.Uid = req.Uid + file.dirtyMetadata = true } if req.Valid.Gid() { - file.entry.Attributes.Gid = req.Gid + entry.Attributes.Gid = req.Gid + file.dirtyMetadata = true } if req.Valid.Crtime() { - file.entry.Attributes.Crtime = req.Crtime.Unix() + entry.Attributes.Crtime = req.Crtime.Unix() + file.dirtyMetadata = true } if req.Valid.Mtime() { - file.entry.Attributes.Mtime = req.Mtime.Unix() + entry.Attributes.Mtime = req.Mtime.Unix() + file.dirtyMetadata = true + } + + if req.Valid.Handle() { + // fmt.Printf("file handle => %d\n", req.Handle) } - if file.isOpen { + if file.isOpen > 0 { return nil } - file.wfs.listDirectoryEntriesCache.Delete(file.fullpath()) + if !file.dirtyMetadata { + return nil + } - return file.saveEntry(ctx) + return file.saveEntry(entry) } @@ -136,17 +199,16 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name) - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } - if err := setxattr(file.entry, req); err != nil { + if err := setxattr(entry, req); err != nil { return err } - file.wfs.listDirectoryEntriesCache.Delete(file.fullpath()) - - return file.saveEntry(ctx) + return file.saveEntry(entry) } @@ -154,17 +216,16 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name) - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } - if err := removexattr(file.entry, req); err != nil { + if err := removexattr(entry, req); err != nil { return err } - file.wfs.listDirectoryEntriesCache.Delete(file.fullpath()) - - return file.saveEntry(ctx) + return file.saveEntry(entry) } @@ -172,11 +233,12 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res glog.V(4).Infof("file Listxattr %s", file.fullpath()) - if err := file.maybeLoadEntry(ctx); err != nil { + entry, err := file.maybeLoadEntry(ctx) + if err != nil { return err } - if err := listxattr(file.entry, req, resp); err != nil { + if err := listxattr(entry, req, resp); err != nil { return err } @@ -187,69 +249,112 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error { // fsync works at OS level // write the file chunks to the filerGrpcAddress - glog.V(3).Infof("%s/%s fsync file %+v", file.dir.Path, file.Name, req) + glog.V(4).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req) return nil } -func (file *File) maybeLoadEntry(ctx context.Context) error { - if file.entry == nil || !file.isOpen { - entry, err := file.wfs.maybeLoadEntry(ctx, file.dir.Path, file.Name) - if err != nil { - return err - } - if entry != nil { - file.setEntry(entry) +func (file *File) Forget() { + t := util.NewFullPath(file.dir.FullPath(), file.Name) + glog.V(4).Infof("Forget file %s", t) + file.wfs.ReleaseHandle(t, fuse.HandleID(t.AsInode())) +} + +func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, err error) { + + file.wfs.handlesLock.Lock() + handle, found := file.wfs.handles[file.Id()] + file.wfs.handlesLock.Unlock() + entry = file.entry + if found { + glog.V(4).Infof("maybeLoadEntry found opened file %s/%s: %v %v", file.dir.FullPath(), file.Name, handle.f.entry, entry) + entry = handle.f.entry + } + + if entry != nil { + if len(entry.HardLinkId) == 0 { + // only always reload hard link + return entry, nil } } - return nil + entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name) + if err != nil { + glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err) + return entry, err + } + if entry != nil { + // file.entry = entry + } else { + glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err) + } + return entry, nil } -func (file *File) addChunk(chunk *filer_pb.FileChunk) { - if chunk != nil { - file.addChunks([]*filer_pb.FileChunk{chunk}) +func lessThan(a, b *filer_pb.FileChunk) bool { + if a.Mtime == b.Mtime { + return a.Fid.FileKey < b.Fid.FileKey } + return a.Mtime < b.Mtime } func (file *File) addChunks(chunks []*filer_pb.FileChunk) { - sort.Slice(chunks, func(i, j int) bool { - return chunks[i].Mtime < chunks[j].Mtime - }) + // find the earliest incoming chunk + newChunks := chunks + earliestChunk := newChunks[0] + for i := 1; i < len(newChunks); i++ { + if lessThan(earliestChunk, newChunks[i]) { + earliestChunk = newChunks[i] + } + } - var newVisibles []filer2.VisibleInterval - for _, chunk := range chunks { - newVisibles = filer2.MergeIntoVisibles(file.entryViewCache, newVisibles, chunk) - t := file.entryViewCache[:0] - file.entryViewCache = newVisibles - newVisibles = t + entry := file.getEntry() + if entry == nil { + return } - glog.V(3).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks)) + // pick out-of-order chunks from existing chunks + for _, chunk := range entry.Chunks { + if lessThan(earliestChunk, chunk) { + chunks = append(chunks, chunk) + } + } - file.entry.Chunks = append(file.entry.Chunks, chunks...) -} + // sort incoming chunks + sort.Slice(chunks, func(i, j int) bool { + return lessThan(chunks[i], chunks[j]) + }) -func (file *File) setEntry(entry *filer_pb.Entry) { - file.entry = entry - file.entryViewCache = filer2.NonOverlappingVisibleIntervals(file.entry.Chunks) + glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(entry.Chunks), len(chunks)) + + entry.Chunks = append(entry.Chunks, newChunks...) } -func (file *File) saveEntry(ctx context.Context) error { - return file.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { +func (file *File) saveEntry(entry *filer_pb.Entry) error { + return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + file.wfs.mapPbIdFromLocalToFiler(entry) + defer file.wfs.mapPbIdFromFilerToLocal(entry) request := &filer_pb.UpdateEntryRequest{ - Directory: file.dir.Path, - Entry: file.entry, + Directory: file.dir.FullPath(), + Entry: entry, + Signatures: []int32{file.wfs.signature}, } - glog.V(1).Infof("save file entry: %v", request) - _, err := client.UpdateEntry(ctx, request) + glog.V(4).Infof("save file entry: %v", request) + _, err := client.UpdateEntry(context.Background(), request) if err != nil { - glog.V(0).Infof("UpdateEntry file %s/%s: %v", file.dir.Path, file.Name, err) + glog.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err) return fuse.EIO } + file.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + return nil }) } + +func (file *File) getEntry() *filer_pb.Entry { + return file.entry +} diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go index 101f5c056..27ffab6e1 100644 --- a/weed/filesys/filehandle.go +++ b/weed/filesys/filehandle.go @@ -3,39 +3,51 @@ package filesys import ( "context" "fmt" - "mime" - "path" + "io" + "math" + "net/http" + "os" + "sync" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gabriel-vasile/mimetype" "github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) type FileHandle struct { // cache file has been written to - dirtyPages *ContinuousDirtyPages - contentType string - dirtyMetadata bool - handle uint64 + dirtyPages *ContinuousDirtyPages + entryViewCache []filer.VisibleInterval + reader io.ReaderAt + contentType string + handle uint64 + sync.Mutex f *File RequestId fuse.RequestID // unique ID for request NodeId fuse.NodeID // file or directory the request is about Uid uint32 // user ID of process making request Gid uint32 // group ID of process making request + } func newFileHandle(file *File, uid, gid uint32) *FileHandle { - return &FileHandle{ + fh := &FileHandle{ f: file, dirtyPages: newDirtyPages(file), Uid: uid, Gid: gid, } + entry := fh.f.getEntry() + if entry != nil { + entry.Attributes.FileSize = filer.FileSize(entry) + } + + return fh } var _ = fs.Handle(&FileHandle{}) @@ -48,134 +60,263 @@ var _ = fs.HandleReleaser(&FileHandle{}) func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - glog.V(4).Infof("%s read fh %d: [%d,%d)", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size)) + glog.V(4).Infof("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data)) + fh.Lock() + defer fh.Unlock() - // this value should come from the filer instead of the old f - if len(fh.f.entry.Chunks) == 0 { - glog.V(1).Infof("empty fh %v/%v", fh.f.dir.Path, fh.f.Name) + if req.Size <= 0 { return nil } - buff := make([]byte, req.Size) - - if fh.f.entryViewCache == nil { - fh.f.entryViewCache = filer2.NonOverlappingVisibleIntervals(fh.f.entry.Chunks) + buff := resp.Data[:cap(resp.Data)] + if req.Size > cap(resp.Data) { + // should not happen + buff = make([]byte, req.Size) } - chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, req.Offset, req.Size) - - totalRead, err := filer2.ReadIntoBuffer(ctx, fh.f.wfs, fh.f.fullpath(), buff, chunkViews, req.Offset) + totalRead, err := fh.readFromChunks(buff, req.Offset) + if err == nil || err == io.EOF { + maxStop := fh.readFromDirtyPages(buff, req.Offset) + totalRead = max(maxStop-req.Offset, totalRead) + } - resp.Data = buff[:totalRead] + if err == io.EOF { + err = nil + } if err != nil { - glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err) + glog.Warningf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err) + return fuse.EIO + } + + if totalRead > int64(len(buff)) { + glog.Warningf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead) + totalRead = min(int64(len(buff)), totalRead) + } + if err == nil { + resp.Data = buff[:totalRead] } return err } -// Write to the file handle -func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { +func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (maxStop int64) { + maxStop = fh.dirtyPages.ReadDirtyDataAt(buff, startOffset) + return +} - // write the request to volume servers +func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) { - glog.V(4).Infof("%+v/%v write fh %d: [%d,%d)", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data))) + entry := fh.f.getEntry() + if entry == nil { + return 0, io.EOF + } - chunks, err := fh.dirtyPages.AddPage(ctx, req.Offset, req.Data) - if err != nil { - glog.Errorf("%+v/%v write fh %d: [%d,%d): %v", fh.f.dir.Path, fh.f.Name, fh.handle, req.Offset, req.Offset+int64(len(req.Data)), err) - return fmt.Errorf("write %s/%s at [%d,%d): %v", fh.f.dir.Path, fh.f.Name, req.Offset, req.Offset+int64(len(req.Data)), err) + fileSize := int64(filer.FileSize(entry)) + fileFullPath := fh.f.fullpath() + + if fileSize == 0 { + glog.V(1).Infof("empty fh %v", fileFullPath) + return 0, io.EOF } - resp.Size = len(req.Data) + if offset+int64(len(buff)) <= int64(len(entry.Content)) { + totalRead := copy(buff, entry.Content[offset:]) + glog.V(4).Infof("file handle read cached %s [%d,%d] %d", fileFullPath, offset, offset+int64(totalRead), totalRead) + return int64(totalRead), nil + } - if req.Offset == 0 { - // detect mime type - detectedMIME := mimetype.Detect(req.Data) - fh.contentType = detectedMIME.String() - if ext := path.Ext(fh.f.Name); ext != detectedMIME.Extension() { - fh.contentType = mime.TypeByExtension(ext) + var chunkResolveErr error + if fh.entryViewCache == nil { + fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks) + if chunkResolveErr != nil { + return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr) } + fh.reader = nil + } + + reader := fh.reader + if reader == nil { + chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, math.MaxInt64) + reader = filer.NewChunkReaderAtFromClient(fh.f.wfs.LookupFn(), chunkViews, fh.f.wfs.chunkCache, fileSize) + } + fh.reader = reader - fh.dirtyMetadata = true + totalRead, err := reader.ReadAt(buff, offset) + + if err != nil && err != io.EOF { + glog.Errorf("file handle read %s: %v", fileFullPath, err) } - if len(chunks) > 0 { + glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fileFullPath, offset, offset+int64(totalRead), totalRead, err) + + return int64(totalRead), err +} - fh.f.addChunks(chunks) +// Write to the file handle +func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { + + fh.Lock() + defer fh.Unlock() + + // write the request to volume servers + data := req.Data + if len(data) <= 512 { + // fuse message cacheable size + data = make([]byte, len(req.Data)) + copy(data, req.Data) + } - fh.dirtyMetadata = true + entry := fh.f.getEntry() + if entry == nil { + return fuse.EIO } + entry.Content = nil + entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(entry.Attributes.FileSize))) + glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data)) + + fh.dirtyPages.AddPage(req.Offset, data) + + resp.Size = len(data) + + if req.Offset == 0 { + // detect mime type + fh.contentType = http.DetectContentType(data) + fh.f.dirtyMetadata = true + } + + fh.f.dirtyMetadata = true + return nil } func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error { - glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle) + glog.V(4).Infof("Release %v fh %d open=%d", fh.f.fullpath(), fh.handle, fh.f.isOpen) + + fh.Lock() + defer fh.Unlock() - fh.dirtyPages.releaseResource() + fh.f.isOpen-- - fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + if fh.f.isOpen <= 0 { + fh.f.entry = nil + fh.entryViewCache = nil + fh.reader = nil - fh.f.isOpen = false + fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle)) + } + + if fh.f.isOpen < 0 { + glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0) + fh.f.isOpen = 0 + return nil + } return nil } func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error { - // fflush works at fh level - // send the data to the OS - glog.V(4).Infof("%s fh %d flush %v", fh.f.fullpath(), fh.handle, req) - chunk, err := fh.dirtyPages.FlushToStorage(ctx) - if err != nil { - glog.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) - return fmt.Errorf("flush %s/%s: %v", fh.f.dir.Path, fh.f.Name, err) + glog.V(4).Infof("Flush %v fh %d", fh.f.fullpath(), fh.handle) + + fh.Lock() + defer fh.Unlock() + + if err := fh.doFlush(ctx, req.Header); err != nil { + glog.Errorf("Flush doFlush %s: %v", fh.f.Name, err) + return err } - fh.f.addChunk(chunk) + glog.V(4).Infof("Flush %v fh %d success", fh.f.fullpath(), fh.handle) + return nil +} + +func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error { + // flush works at fh level + // send the data to the OS + glog.V(4).Infof("doFlush %s fh %d", fh.f.fullpath(), fh.handle) + + fh.dirtyPages.saveExistingPagesToStorage() + + fh.dirtyPages.writeWaitGroup.Wait() - if !fh.dirtyMetadata { + if fh.dirtyPages.lastErr != nil { + glog.Errorf("%v doFlush last err: %v", fh.f.fullpath(), fh.dirtyPages.lastErr) + return fuse.EIO + } + + if !fh.f.dirtyMetadata { return nil } - return fh.f.wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := fh.f.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + entry := fh.f.getEntry() + if entry == nil { + return nil + } - if fh.f.entry.Attributes != nil { - fh.f.entry.Attributes.Mime = fh.contentType - fh.f.entry.Attributes.Uid = req.Uid - fh.f.entry.Attributes.Gid = req.Gid - fh.f.entry.Attributes.Mtime = time.Now().Unix() - fh.f.entry.Attributes.Crtime = time.Now().Unix() - fh.f.entry.Attributes.FileMode = uint32(0777 &^ fh.f.wfs.option.Umask) + if entry.Attributes != nil { + entry.Attributes.Mime = fh.contentType + if entry.Attributes.Uid == 0 { + entry.Attributes.Uid = header.Uid + } + if entry.Attributes.Gid == 0 { + entry.Attributes.Gid = header.Gid + } + if entry.Attributes.Crtime == 0 { + entry.Attributes.Crtime = time.Now().Unix() + } + entry.Attributes.Mtime = time.Now().Unix() + entry.Attributes.FileMode = uint32(os.FileMode(entry.Attributes.FileMode) &^ fh.f.wfs.option.Umask) + entry.Attributes.Collection = fh.dirtyPages.collection + entry.Attributes.Replication = fh.dirtyPages.replication } request := &filer_pb.CreateEntryRequest{ - Directory: fh.f.dir.Path, - Entry: fh.f.entry, + Directory: fh.f.dir.FullPath(), + Entry: entry, + Signatures: []int32{fh.f.wfs.signature}, } - glog.V(3).Infof("%s/%s set chunks: %v", fh.f.dir.Path, fh.f.Name, len(fh.f.entry.Chunks)) - for i, chunk := range fh.f.entry.Chunks { - glog.V(3).Infof("%s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(entry.Chunks)) + for i, chunk := range entry.Chunks { + glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size)) } - chunks, garbages := filer2.CompactFileChunks(fh.f.entry.Chunks) - fh.f.entry.Chunks = chunks - // fh.f.entryViewCache = nil + manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.Chunks) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.Errorf("update fh: %v", err) - return fmt.Errorf("update fh: %v", err) + chunks, _ := filer.CompactFileChunks(fh.f.wfs.LookupFn(), nonManifestChunks) + chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks) + if manifestErr != nil { + // not good, but should be ok + glog.V(0).Infof("MaybeManifestize: %v", manifestErr) } + entry.Chunks = append(chunks, manifestChunks...) - fh.f.wfs.deleteFileChunks(ctx, garbages) - for i, chunk := range garbages { - glog.V(3).Infof("garbage %s/%s chunks %d: %v [%d,%d)", fh.f.dir.Path, fh.f.Name, i, chunk.FileId, chunk.Offset, chunk.Offset+int64(chunk.Size)) + fh.f.wfs.mapPbIdFromLocalToFiler(request.Entry) + defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry) + + if err := filer_pb.CreateEntry(client, request); err != nil { + glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) + return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err) } + fh.f.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)) + return nil }) + + if err == nil { + fh.f.dirtyMetadata = false + } + + if err != nil { + glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err) + return fuse.EIO + } + + return nil } diff --git a/weed/filesys/fscache.go b/weed/filesys/fscache.go new file mode 100644 index 000000000..6b1012090 --- /dev/null +++ b/weed/filesys/fscache.go @@ -0,0 +1,213 @@ +package filesys + +import ( + "sync" + + "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +type FsCache struct { + root *FsNode + sync.RWMutex +} +type FsNode struct { + parent *FsNode + node fs.Node + name string + childrenLock sync.RWMutex + children map[string]*FsNode +} + +func newFsCache(root fs.Node) *FsCache { + return &FsCache{ + root: &FsNode{ + node: root, + }, + } +} + +func (c *FsCache) GetFsNode(path util.FullPath) fs.Node { + + c.RLock() + defer c.RUnlock() + + return c.doGetFsNode(path) +} + +func (c *FsCache) doGetFsNode(path util.FullPath) fs.Node { + t := c.root + for _, p := range path.Split() { + t = t.findChild(p) + if t == nil { + return nil + } + } + return t.node +} + +func (c *FsCache) SetFsNode(path util.FullPath, node fs.Node) { + + c.Lock() + defer c.Unlock() + + c.doSetFsNode(path, node) +} + +func (c *FsCache) doSetFsNode(path util.FullPath, node fs.Node) { + t := c.root + for _, p := range path.Split() { + t = t.ensureChild(p) + } + t.node = node +} + +func (c *FsCache) EnsureFsNode(path util.FullPath, genNodeFn func() fs.Node) fs.Node { + + c.Lock() + defer c.Unlock() + + t := c.doGetFsNode(path) + if t != nil { + return t + } + t = genNodeFn() + c.doSetFsNode(path, t) + return t +} + +func (c *FsCache) DeleteFsNode(path util.FullPath) { + + c.Lock() + defer c.Unlock() + + t := c.root + for _, p := range path.Split() { + t = t.findChild(p) + if t == nil { + return + } + } + if t.parent != nil { + t.parent.disconnectChild(t) + } + t.deleteSelf() +} + +// oldPath and newPath are full path including the new name +func (c *FsCache) Move(oldPath util.FullPath, newPath util.FullPath) *FsNode { + + c.Lock() + defer c.Unlock() + + // find old node + src := c.root + for _, p := range oldPath.Split() { + src = src.findChild(p) + if src == nil { + return src + } + } + if src.parent != nil { + src.parent.disconnectChild(src) + } + + // find new node + target := c.root + for _, p := range newPath.Split() { + target = target.ensureChild(p) + } + parent := target.parent + if dir, ok := src.node.(*Dir); ok { + dir.name = target.name // target is not Dir, but a shortcut + } + if f, ok := src.node.(*File); ok { + f.Name = target.name + entry := f.getEntry() + if entry != nil { + entry.Name = f.Name + } + } + parent.disconnectChild(target) + + target.deleteSelf() + + src.name = target.name + src.connectToParent(parent) + + return src +} + +func (n *FsNode) connectToParent(parent *FsNode) { + n.parent = parent + oldNode := parent.findChild(n.name) + if oldNode != nil { + oldNode.deleteSelf() + } + if dir, ok := n.node.(*Dir); ok { + if parent.node != nil { + dir.parent = parent.node.(*Dir) + } + } + if f, ok := n.node.(*File); ok { + if parent.node != nil { + f.dir = parent.node.(*Dir) + } + } + n.childrenLock.Lock() + parent.children[n.name] = n + n.childrenLock.Unlock() +} + +func (n *FsNode) findChild(name string) *FsNode { + n.childrenLock.RLock() + defer n.childrenLock.RUnlock() + + child, found := n.children[name] + if found { + return child + } + return nil +} + +func (n *FsNode) ensureChild(name string) *FsNode { + n.childrenLock.Lock() + defer n.childrenLock.Unlock() + + if n.children == nil { + n.children = make(map[string]*FsNode) + } + child, found := n.children[name] + if found { + return child + } + t := &FsNode{ + parent: n, + node: nil, + name: name, + children: nil, + } + n.children[name] = t + return t +} + +func (n *FsNode) disconnectChild(child *FsNode) { + n.childrenLock.Lock() + delete(n.children, child.name) + n.childrenLock.Unlock() + child.parent = nil +} + +func (n *FsNode) deleteSelf() { + n.childrenLock.Lock() + for _, child := range n.children { + child.deleteSelf() + } + n.children = nil + n.childrenLock.Unlock() + + n.node = nil + n.parent = nil + +} diff --git a/weed/filesys/fscache_test.go b/weed/filesys/fscache_test.go new file mode 100644 index 000000000..1152eb32e --- /dev/null +++ b/weed/filesys/fscache_test.go @@ -0,0 +1,115 @@ +package filesys + +import ( + "testing" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TestPathSplit(t *testing.T) { + parts := util.FullPath("/").Split() + if len(parts) != 0 { + t.Errorf("expecting an empty list, but getting %d", len(parts)) + } + + parts = util.FullPath("/readme.md").Split() + if len(parts) != 1 { + t.Errorf("expecting an empty list, but getting %d", len(parts)) + } + +} + +func TestFsCache(t *testing.T) { + + cache := newFsCache(nil) + + x := cache.GetFsNode(util.FullPath("/y/x")) + if x != nil { + t.Errorf("wrong node!") + } + + p := util.FullPath("/a/b/c") + cache.SetFsNode(p, &File{Name: "cc"}) + tNode := cache.GetFsNode(p) + tFile := tNode.(*File) + if tFile.Name != "cc" { + t.Errorf("expecting a FsNode") + } + + cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"}) + cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"}) + cache.SetFsNode(util.FullPath("/a/b/f"), &File{Name: "ff"}) + cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"}) + cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"}) + + b := cache.GetFsNode(util.FullPath("/a/b")) + if b != nil { + t.Errorf("unexpected node!") + } + + a := cache.GetFsNode(util.FullPath("/a")) + if a == nil { + t.Errorf("missing node!") + } + + cache.DeleteFsNode(util.FullPath("/a")) + if b != nil { + t.Errorf("unexpected node!") + } + + a = cache.GetFsNode(util.FullPath("/a")) + if a != nil { + t.Errorf("wrong DeleteFsNode!") + } + + z := cache.GetFsNode(util.FullPath("/z")) + if z == nil { + t.Errorf("missing node!") + } + + y := cache.GetFsNode(util.FullPath("/x/y")) + if y != nil { + t.Errorf("wrong node!") + } + +} + +func TestFsCacheMove(t *testing.T) { + + cache := newFsCache(nil) + + cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"}) + cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"}) + cache.SetFsNode(util.FullPath("/z"), &File{Name: "zz"}) + cache.SetFsNode(util.FullPath("/a"), &File{Name: "aa"}) + + cache.Move(util.FullPath("/a/b"), util.FullPath("/z/x")) + + d := cache.GetFsNode(util.FullPath("/z/x/d")) + if d == nil { + t.Errorf("unexpected nil node!") + } + if d.(*File).Name != "dd" { + t.Errorf("unexpected non dd node!") + } + +} + +func TestFsCacheMove2(t *testing.T) { + + cache := newFsCache(nil) + + cache.SetFsNode(util.FullPath("/a/b/d"), &File{Name: "dd"}) + cache.SetFsNode(util.FullPath("/a/b/e"), &File{Name: "ee"}) + + cache.Move(util.FullPath("/a/b/d"), util.FullPath("/a/b/e")) + + d := cache.GetFsNode(util.FullPath("/a/b/e")) + if d == nil { + t.Errorf("unexpected nil node!") + } + if d.(*File).Name != "e" { + t.Errorf("unexpected node!") + } + +} diff --git a/weed/filesys/meta_cache/cache_config.go b/weed/filesys/meta_cache/cache_config.go new file mode 100644 index 000000000..e6593ebde --- /dev/null +++ b/weed/filesys/meta_cache/cache_config.go @@ -0,0 +1,32 @@ +package meta_cache + +import "github.com/chrislusf/seaweedfs/weed/util" + +var ( + _ = util.Configuration(&cacheConfig{}) +) + +// implementing util.Configuraion +type cacheConfig struct { + dir string +} + +func (c cacheConfig) GetString(key string) string { + return c.dir +} + +func (c cacheConfig) GetBool(key string) bool { + panic("implement me") +} + +func (c cacheConfig) GetInt(key string) int { + panic("implement me") +} + +func (c cacheConfig) GetStringSlice(key string) []string { + panic("implement me") +} + +func (c cacheConfig) SetDefault(key string, value interface{}) { + panic("implement me") +} diff --git a/weed/filesys/meta_cache/id_mapper.go b/weed/filesys/meta_cache/id_mapper.go new file mode 100644 index 000000000..4a2179f31 --- /dev/null +++ b/weed/filesys/meta_cache/id_mapper.go @@ -0,0 +1,101 @@ +package meta_cache + +import ( + "fmt" + "strconv" + "strings" +) + +type UidGidMapper struct { + uidMapper *IdMapper + gidMapper *IdMapper +} + +type IdMapper struct { + localToFiler map[uint32]uint32 + filerToLocal map[uint32]uint32 +} + +// UidGidMapper translates local uid/gid to filer uid/gid +// The local storage always persists the same as the filer. +// The local->filer translation happens when updating the filer first and later saving to meta_cache. +// And filer->local happens when reading from the meta_cache. +func NewUidGidMapper(uidPairsStr, gidPairStr string) (*UidGidMapper, error) { + uidMapper, err := newIdMapper(uidPairsStr) + if err != nil { + return nil, err + } + gidMapper, err := newIdMapper(gidPairStr) + if err != nil { + return nil, err + } + + return &UidGidMapper{ + uidMapper: uidMapper, + gidMapper: gidMapper, + }, nil +} + +func (m *UidGidMapper) LocalToFiler(uid, gid uint32) (uint32, uint32) { + return m.uidMapper.LocalToFiler(uid), m.gidMapper.LocalToFiler(gid) +} +func (m *UidGidMapper) FilerToLocal(uid, gid uint32) (uint32, uint32) { + return m.uidMapper.FilerToLocal(uid), m.gidMapper.FilerToLocal(gid) +} + +func (m *IdMapper) LocalToFiler(id uint32) uint32 { + value, found := m.localToFiler[id] + if found { + return value + } + return id +} +func (m *IdMapper) FilerToLocal(id uint32) uint32 { + value, found := m.filerToLocal[id] + if found { + return value + } + return id +} + +func newIdMapper(pairsStr string) (*IdMapper, error) { + + localToFiler, filerToLocal, err := parseUint32Pairs(pairsStr) + if err != nil { + return nil, err + } + + return &IdMapper{ + localToFiler: localToFiler, + filerToLocal: filerToLocal, + }, nil + +} + +func parseUint32Pairs(pairsStr string) (localToFiler, filerToLocal map[uint32]uint32, err error) { + + if pairsStr == "" { + return + } + + localToFiler = make(map[uint32]uint32) + filerToLocal = make(map[uint32]uint32) + for _, pairStr := range strings.Split(pairsStr, ",") { + pair := strings.Split(pairStr, ":") + localUidStr, filerUidStr := pair[0], pair[1] + localUid, localUidErr := strconv.Atoi(localUidStr) + if localUidErr != nil { + err = fmt.Errorf("failed to parse local %s: %v", localUidStr, localUidErr) + return + } + filerUid, filerUidErr := strconv.Atoi(filerUidStr) + if filerUidErr != nil { + err = fmt.Errorf("failed to parse remote %s: %v", filerUidStr, filerUidErr) + return + } + localToFiler[uint32(localUid)] = uint32(filerUid) + filerToLocal[uint32(filerUid)] = uint32(localUid) + } + + return +} diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go new file mode 100644 index 000000000..b9d4724c9 --- /dev/null +++ b/weed/filesys/meta_cache/meta_cache.go @@ -0,0 +1,152 @@ +package meta_cache + +import ( + "context" + "fmt" + "os" + "strings" + "sync" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/filer/leveldb" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "github.com/chrislusf/seaweedfs/weed/util/bounded_tree" +) + +// need to have logic similar to FilerStoreWrapper +// e.g. fill fileId field for chunks + +type MetaCache struct { + localStore filer.VirtualFilerStore + sync.RWMutex + visitedBoundary *bounded_tree.BoundedTree + uidGidMapper *UidGidMapper + invalidateFunc func(util.FullPath) +} + +func NewMetaCache(dbFolder string, baseDir util.FullPath, uidGidMapper *UidGidMapper, invalidateFunc func(util.FullPath)) *MetaCache { + return &MetaCache{ + localStore: openMetaStore(dbFolder), + visitedBoundary: bounded_tree.NewBoundedTree(baseDir), + uidGidMapper: uidGidMapper, + invalidateFunc: func(fullpath util.FullPath) { + if baseDir != "/" && strings.HasPrefix(string(fullpath), string(baseDir)) { + fullpath = fullpath[len(baseDir):] + } + invalidateFunc(fullpath) + }, + } +} + +func openMetaStore(dbFolder string) filer.VirtualFilerStore { + + os.RemoveAll(dbFolder) + os.MkdirAll(dbFolder, 0755) + + store := &leveldb.LevelDBStore{} + config := &cacheConfig{ + dir: dbFolder, + } + + if err := store.Initialize(config, ""); err != nil { + glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err) + } + + return filer.NewFilerStoreWrapper(store) + +} + +func (mc *MetaCache) InsertEntry(ctx context.Context, entry *filer.Entry) error { + mc.Lock() + defer mc.Unlock() + return mc.doInsertEntry(ctx, entry) +} + +func (mc *MetaCache) doInsertEntry(ctx context.Context, entry *filer.Entry) error { + return mc.localStore.InsertEntry(ctx, entry) +} + +func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath util.FullPath, newEntry *filer.Entry) error { + mc.Lock() + defer mc.Unlock() + + oldDir, _ := oldPath.DirAndName() + if mc.visitedBoundary.HasVisited(util.FullPath(oldDir)) { + if oldPath != "" { + if newEntry != nil && oldPath == newEntry.FullPath { + // skip the unnecessary deletion + // leave the update to the following InsertEntry operation + } else { + glog.V(3).Infof("DeleteEntry %s/%s", oldPath, oldPath.Name()) + if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil { + return err + } + } + } + } else { + // println("unknown old directory:", oldDir) + } + + if newEntry != nil { + newDir, _ := newEntry.DirAndName() + if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) { + glog.V(3).Infof("InsertEntry %s/%s", newDir, newEntry.Name()) + if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil { + return err + } + } + } + return nil +} + +func (mc *MetaCache) UpdateEntry(ctx context.Context, entry *filer.Entry) error { + mc.Lock() + defer mc.Unlock() + return mc.localStore.UpdateEntry(ctx, entry) +} + +func (mc *MetaCache) FindEntry(ctx context.Context, fp util.FullPath) (entry *filer.Entry, err error) { + mc.RLock() + defer mc.RUnlock() + entry, err = mc.localStore.FindEntry(ctx, fp) + if err != nil { + return nil, err + } + mc.mapIdFromFilerToLocal(entry) + return +} + +func (mc *MetaCache) DeleteEntry(ctx context.Context, fp util.FullPath) (err error) { + mc.Lock() + defer mc.Unlock() + return mc.localStore.DeleteEntry(ctx, fp) +} + +func (mc *MetaCache) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) error { + mc.RLock() + defer mc.RUnlock() + + if !mc.visitedBoundary.HasVisited(dirPath) { + return fmt.Errorf("unsynchronized dir: %v", dirPath) + } + + _, err := mc.localStore.ListDirectoryEntries(ctx, dirPath, startFileName, includeStartFile, limit, func(entry *filer.Entry) bool { + mc.mapIdFromFilerToLocal(entry) + return eachEntryFunc(entry) + }) + if err != nil { + return err + } + return err +} + +func (mc *MetaCache) Shutdown() { + mc.Lock() + defer mc.Unlock() + mc.localStore.Shutdown() +} + +func (mc *MetaCache) mapIdFromFilerToLocal(entry *filer.Entry) { + entry.Attr.Uid, entry.Attr.Gid = mc.uidGidMapper.FilerToLocal(entry.Attr.Uid, entry.Attr.Gid) +} diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go new file mode 100644 index 000000000..1ca3b16d5 --- /dev/null +++ b/weed/filesys/meta_cache/meta_cache_init.go @@ -0,0 +1,47 @@ +package meta_cache + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.FullPath) error { + + return mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) { + + glog.V(4).Infof("ReadDirAllEntries %s ...", path) + + util.Retry("ReadDirAllEntries", func() error { + err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error { + entry := filer.FromPbEntry(string(dirPath), pbEntry) + if IsHiddenSystemEntry(string(dirPath), entry.Name()) { + return nil + } + if err := mc.doInsertEntry(context.Background(), entry); err != nil { + glog.V(0).Infof("read %s: %v", entry.FullPath, err) + return err + } + if entry.IsDirectory() { + childDirectories = append(childDirectories, entry.Name()) + } + return nil + }) + return err + }) + + if err != nil { + err = fmt.Errorf("list %s: %v", dirPath, err) + } + + return + }) +} + +func IsHiddenSystemEntry(dir, name string) bool { + return dir == "/" && name == "topics" +} diff --git a/weed/filesys/meta_cache/meta_cache_subscribe.go b/weed/filesys/meta_cache/meta_cache_subscribe.go new file mode 100644 index 000000000..f9973f436 --- /dev/null +++ b/weed/filesys/meta_cache/meta_cache_subscribe.go @@ -0,0 +1,86 @@ +package meta_cache + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.FilerClient, dir string, lastTsNs int64) error { + + processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { + message := resp.EventNotification + + for _, sig := range message.Signatures { + if sig == selfSignature && selfSignature != 0 { + return nil + } + } + + dir := resp.Directory + var oldPath util.FullPath + var newEntry *filer.Entry + if message.OldEntry != nil { + oldPath = util.NewFullPath(dir, message.OldEntry.Name) + glog.V(4).Infof("deleting %v", oldPath) + } + + if message.NewEntry != nil { + if message.NewParentPath != "" { + dir = message.NewParentPath + } + key := util.NewFullPath(dir, message.NewEntry.Name) + glog.V(4).Infof("creating %v", key) + newEntry = filer.FromPbEntry(dir, message.NewEntry) + } + err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry) + if err == nil && message.OldEntry != nil && message.NewEntry != nil { + key := util.NewFullPath(dir, message.NewEntry.Name) + mc.invalidateFunc(key) + } + + return err + + } + + for { + err := client.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: "mount", + PathPrefix: dir, + SinceNs: lastTsNs, + Signature: selfSignature, + }) + if err != nil { + return fmt.Errorf("subscribe: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + glog.Fatalf("process %v: %v", resp, err) + } + lastTsNs = resp.TsNs + } + }) + if err != nil { + glog.Errorf("subscribing filer meta change: %v", err) + } + time.Sleep(time.Second) + } +} diff --git a/weed/filesys/unimplemented.go b/weed/filesys/unimplemented.go new file mode 100644 index 000000000..5c2dcf0e1 --- /dev/null +++ b/weed/filesys/unimplemented.go @@ -0,0 +1,22 @@ +package filesys + +import ( + "context" + + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" +) + +// https://github.com/bazil/fuse/issues/130 + +var _ = fs.NodeAccesser(&Dir{}) + +func (dir *Dir) Access(ctx context.Context, req *fuse.AccessRequest) error { + return fuse.ENOSYS +} + +var _ = fs.NodeAccesser(&File{}) + +func (file *File) Access(ctx context.Context, req *fuse.AccessRequest) error { + return fuse.ENOSYS +} diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go index e924783ec..42816d23d 100644 --- a/weed/filesys/wfs.go +++ b/weed/filesys/wfs.go @@ -3,32 +3,44 @@ package filesys import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/storage/types" + "github.com/chrislusf/seaweedfs/weed/wdclient" "math" "os" + "path" "sync" "time" - "github.com/karlseguin/ccache" "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/util/grace" + + "github.com/seaweedfs/fuse" + "github.com/seaweedfs/fuse/fs" + + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/seaweedfs/fuse" - "github.com/seaweedfs/fuse/fs" + "github.com/chrislusf/seaweedfs/weed/util/chunk_cache" ) type Option struct { + MountDirectory string + FilerAddress string FilerGrpcAddress string GrpcDialOption grpc.DialOption FilerMountRootPath string Collection string Replication string TtlSec int32 + DiskType types.DiskType ChunkSizeLimit int64 + ConcurrentWriters int + CacheDir string + CacheSizeMB int64 DataCenter string - DirListCacheLimit int64 - EntryCacheTtl time.Duration Umask os.FileMode MountUid uint32 @@ -36,22 +48,36 @@ type Option struct { MountMode os.FileMode MountCtime time.Time MountMtime time.Time + + VolumeServerAccess string // how to access volume servers + Cipher bool // whether encrypt data on volume server + UidGidMapper *meta_cache.UidGidMapper } var _ = fs.FS(&WFS{}) var _ = fs.FSStatfser(&WFS{}) type WFS struct { - option *Option - listDirectoryEntriesCache *ccache.Cache + option *Option + + // contains all open handles, protected by handlesLock + handlesLock sync.Mutex + handles map[uint64]*FileHandle - // contains all open handles - handles []*FileHandle - pathToHandleIndex map[string]int - pathToHandleLock sync.Mutex - bufPool sync.Pool + bufPool sync.Pool stats statsCache + + root fs.Node + fsNodeCache *FsCache + + chunkCache *chunk_cache.TieredChunkCache + metaCache *meta_cache.MetaCache + signature int32 + + // throttle writers + concurrentWriters *util.LimitedConcurrentExecutor + Server *fs.Server } type statsCache struct { filer_pb.StatisticsResponse @@ -60,72 +86,92 @@ type statsCache struct { func NewSeaweedFileSystem(option *Option) *WFS { wfs := &WFS{ - option: option, - listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(option.DirListCacheLimit * 3).ItemsToPrune(100)), - pathToHandleIndex: make(map[string]int), + option: option, + handles: make(map[uint64]*FileHandle), bufPool: sync.Pool{ New: func() interface{} { return make([]byte, option.ChunkSizeLimit) }, }, + signature: util.RandomInt32(), + } + cacheUniqueId := util.Md5String([]byte(option.MountDirectory + option.FilerGrpcAddress + option.FilerMountRootPath + util.Version()))[0:8] + cacheDir := path.Join(option.CacheDir, cacheUniqueId) + if option.CacheSizeMB > 0 { + os.MkdirAll(cacheDir, os.FileMode(0777)&^option.Umask) + wfs.chunkCache = chunk_cache.NewTieredChunkCache(256, cacheDir, option.CacheSizeMB, 1024*1024) } - return wfs -} + wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), util.FullPath(option.FilerMountRootPath), option.UidGidMapper, func(filePath util.FullPath) { -func (wfs *WFS) Root() (fs.Node, error) { - return &Dir{Path: wfs.option.FilerMountRootPath, wfs: wfs}, nil -} + fsNode := NodeWithId(filePath.AsInode()) + if err := wfs.Server.InvalidateNodeData(fsNode); err != nil { + glog.V(4).Infof("InvalidateNodeData %s : %v", filePath, err) + } -func (wfs *WFS) WithFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { + dir, name := filePath.DirAndName() + parent := NodeWithId(util.FullPath(dir).AsInode()) + if err := wfs.Server.InvalidateEntry(parent, name); err != nil { + glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err) + } + }) + startTime := time.Now() + go meta_cache.SubscribeMetaEvents(wfs.metaCache, wfs.signature, wfs, wfs.option.FilerMountRootPath, startTime.UnixNano()) + grace.OnInterrupt(func() { + wfs.metaCache.Shutdown() + }) + + wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs} + wfs.fsNodeCache = newFsCache(wfs.root) + + if wfs.option.ConcurrentWriters > 0 { + wfs.concurrentWriters = util.NewLimitedConcurrentExecutor(wfs.option.ConcurrentWriters) + } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { - client := filer_pb.NewSeaweedFilerClient(grpcConnection) - return fn(client) - }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) + return wfs +} +func (wfs *WFS) Root() (fs.Node, error) { + return wfs.root, nil } func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() fullpath := file.fullpath() + glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid) + + inodeId := file.Id() - index, found := wfs.pathToHandleIndex[fullpath] - if found && wfs.handles[index] != nil { - glog.V(2).Infoln(fullpath, "found fileHandle id", index) - return wfs.handles[index] + wfs.handlesLock.Lock() + existingHandle, found := wfs.handles[inodeId] + wfs.handlesLock.Unlock() + if found && existingHandle != nil { + existingHandle.f.isOpen++ + glog.V(4).Infof("Acquired Handle %s open %d", fullpath, existingHandle.f.isOpen) + return existingHandle } + entry, _ := file.maybeLoadEntry(context.Background()) + file.entry = entry fileHandle = newFileHandle(file, uid, gid) - for i, h := range wfs.handles { - if h == nil { - wfs.handles[i] = fileHandle - fileHandle.handle = uint64(i) - wfs.pathToHandleIndex[fullpath] = i - glog.V(4).Infoln(fullpath, "reuse fileHandle id", fileHandle.handle) - return - } - } + file.isOpen++ - wfs.handles = append(wfs.handles, fileHandle) - fileHandle.handle = uint64(len(wfs.handles) - 1) - glog.V(2).Infoln(fullpath, "new fileHandle id", fileHandle.handle) - wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle) + wfs.handlesLock.Lock() + wfs.handles[inodeId] = fileHandle + wfs.handlesLock.Unlock() + fileHandle.handle = inodeId + glog.V(4).Infof("Acquired new Handle %s open %d", fullpath, file.isOpen) return } -func (wfs *WFS) ReleaseHandle(fullpath string, handleId fuse.HandleID) { - wfs.pathToHandleLock.Lock() - defer wfs.pathToHandleLock.Unlock() +func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) { + wfs.handlesLock.Lock() + defer wfs.handlesLock.Unlock() - glog.V(4).Infof("%s releasing handle id %d current handles length %d", fullpath, handleId, len(wfs.handles)) - delete(wfs.pathToHandleIndex, fullpath) - if int(handleId) < len(wfs.handles) { - wfs.handles[int(handleId)] = nil - } + glog.V(4).Infof("ReleaseHandle %s id %d current handles length %d", fullpath, handleId, len(wfs.handles)) + + delete(wfs.handles, uint64(handleId)) return } @@ -137,16 +183,17 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. if wfs.stats.lastChecked < time.Now().Unix()-20 { - err := wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.StatisticsRequest{ Collection: wfs.option.Collection, Replication: wfs.option.Replication, Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec), + DiskType: string(wfs.option.DiskType), } glog.V(4).Infof("reading filer stats: %+v", request) - resp, err := client.Statistics(ctx, request) + resp, err := client.Statistics(context.Background(), request) if err != nil { glog.V(0).Infof("reading filer stats %v: %v", request, err) return err @@ -191,3 +238,34 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse. return nil } + +func (wfs *WFS) mapPbIdFromFilerToLocal(entry *filer_pb.Entry) { + if entry.Attributes == nil { + return + } + entry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.FilerToLocal(entry.Attributes.Uid, entry.Attributes.Gid) +} +func (wfs *WFS) mapPbIdFromLocalToFiler(entry *filer_pb.Entry) { + if entry.Attributes == nil { + return + } + entry.Attributes.Uid, entry.Attributes.Gid = wfs.option.UidGidMapper.LocalToFiler(entry.Attributes.Uid, entry.Attributes.Gid) +} + +func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType { + if wfs.option.VolumeServerAccess == "filerProxy" { + return func(fileId string) (targetUrls []string, err error) { + return []string{"http://" + wfs.option.FilerAddress + "/?proxyChunkId=" + fileId}, nil + } + } + return filer.LookupFn(wfs) + +} + +type NodeWithId uint64 +func (n NodeWithId) Id() uint64 { + return uint64(n) +} +func (n NodeWithId) Attr(ctx context.Context, attr *fuse.Attr) error { + return nil +} diff --git a/weed/filesys/wfs_deletion.go b/weed/filesys/wfs_deletion.go deleted file mode 100644 index 6e586b7df..000000000 --- a/weed/filesys/wfs_deletion.go +++ /dev/null @@ -1,69 +0,0 @@ -package filesys - -import ( - "context" - - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/operation" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "google.golang.org/grpc" -) - -func (wfs *WFS) deleteFileChunks(ctx context.Context, chunks []*filer_pb.FileChunk) { - if len(chunks) == 0 { - return - } - - var fileIds []string - for _, chunk := range chunks { - fileIds = append(fileIds, chunk.GetFileIdString()) - } - - wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - deleteFileIds(ctx, wfs.option.GrpcDialOption, client, fileIds) - return nil - }) -} - -func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error { - - var vids []string - for _, fileId := range fileIds { - vids = append(vids, filer2.VolumeId(fileId)) - } - - lookupFunc := func(vids []string) (map[string]operation.LookupResult, error) { - - m := make(map[string]operation.LookupResult) - - glog.V(4).Infof("remove file lookup volume id locations: %v", vids) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ - VolumeIds: vids, - }) - if err != nil { - return m, err - } - - for _, vid := range vids { - lr := operation.LookupResult{ - VolumeId: vid, - Locations: nil, - } - locations := resp.LocationsMap[vid] - for _, loc := range locations.Locations { - lr.Locations = append(lr.Locations, operation.Location{ - Url: loc.Url, - PublicUrl: loc.PublicUrl, - }) - } - m[vid] = lr - } - - return m, err - } - - _, err := operation.DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) - - return err -} diff --git a/weed/filesys/wfs_filer_client.go b/weed/filesys/wfs_filer_client.go new file mode 100644 index 000000000..671d20ba2 --- /dev/null +++ b/weed/filesys/wfs_filer_client.go @@ -0,0 +1,34 @@ +package filesys + +import ( + "github.com/chrislusf/seaweedfs/weed/util" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +var _ = filer_pb.FilerClient(&WFS{}) + +func (wfs *WFS) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + err := util.Retry("filer grpc "+wfs.option.FilerGrpcAddress, func() error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption) + }) + + if err == nil { + return nil + } + return err + +} + +func (wfs *WFS) AdjustedUrl(location *filer_pb.Location) string { + if wfs.option.VolumeServerAccess == "publicUrl" { + return location.PublicUrl + } + return location.Url +} diff --git a/weed/filesys/wfs_write.go b/weed/filesys/wfs_write.go new file mode 100644 index 000000000..dbec3bebc --- /dev/null +++ b/weed/filesys/wfs_write.go @@ -0,0 +1,75 @@ +package filesys + +import ( + "context" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType { + + return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) { + var fileId, host string + var auth security.EncodedJwt + + if err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: wfs.option.Replication, + Collection: wfs.option.Collection, + TtlSec: wfs.option.TtlSec, + DiskType: string(wfs.option.DiskType), + DataCenter: wfs.option.DataCenter, + Path: string(fullPath), + } + + resp, err := client.AssignVolume(context.Background(), request) + if err != nil { + glog.V(0).Infof("assign volume failure %v: %v", request, err) + return err + } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } + + fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth) + loc := &filer_pb.Location{ + Url: resp.Url, + PublicUrl: resp.PublicUrl, + } + host = wfs.AdjustedUrl(loc) + collection, replication = resp.Collection, resp.Replication + + return nil + }); err != nil { + return nil, "", "", fmt.Errorf("filerGrpcAddress assign volume: %v", err) + } + + fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) + if wfs.option.VolumeServerAccess == "filerProxy" { + fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", wfs.option.FilerAddress, fileId) + } + uploadResult, err, data := operation.Upload(fileUrl, filename, wfs.option.Cipher, reader, false, "", nil, auth) + if err != nil { + glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err) + return nil, "", "", fmt.Errorf("upload data: %v", err) + } + if uploadResult.Error != "" { + glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err) + return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error) + } + + wfs.chunkCache.SetChunk(fileId, data) + + chunk = uploadResult.ToPbFileChunk(fileId, offset) + return chunk, collection, replication, nil + } +} diff --git a/weed/filesys/xattr.go b/weed/filesys/xattr.go index 3c0ba164a..92e43b675 100644 --- a/weed/filesys/xattr.go +++ b/weed/filesys/xattr.go @@ -2,11 +2,12 @@ package filesys import ( "context" - "path/filepath" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/seaweedfs/fuse" + + "github.com/chrislusf/seaweedfs/weed/filesys/meta_cache" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" ) func getxattr(entry *filer_pb.Entry, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { @@ -107,36 +108,16 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis } -func (wfs *WFS) maybeLoadEntry(ctx context.Context, dir, name string) (entry *filer_pb.Entry, err error) { +func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) { - fullpath := filepath.Join(dir, name) - item := wfs.listDirectoryEntriesCache.Get(fullpath) - if item != nil && !item.Expired() { - entry = item.Value().(*filer_pb.Entry) - return - } - glog.V(3).Infof("read entry cache miss %s", fullpath) - - err = wfs.WithFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.LookupDirectoryEntryRequest{ - Name: name, - Directory: dir, - } + fullpath := util.NewFullPath(dir, name) + // glog.V(3).Infof("read entry cache miss %s", fullpath) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - glog.V(3).Infof("file attr read file %v: %v", request, err) - return fuse.ENOENT - } - - entry = resp.Entry - if entry != nil { - wfs.listDirectoryEntriesCache.Set(fullpath, entry, wfs.option.EntryCacheTtl) - } - - return nil - }) - - return + // read from async meta cache + meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir)) + cachedEntry, cacheErr := wfs.metaCache.FindEntry(context.Background(), fullpath) + if cacheErr == filer_pb.ErrNotFound { + return nil, fuse.ENOENT + } + return cachedEntry.ToProtoEntry(), cacheErr } diff --git a/weed/ftpd/ftp_server.go b/weed/ftpd/ftp_server.go new file mode 100644 index 000000000..4a0dca2c3 --- /dev/null +++ b/weed/ftpd/ftp_server.go @@ -0,0 +1,81 @@ +package ftpd + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + + ftpserver "github.com/fclairamb/ftpserverlib" + "google.golang.org/grpc" +) + +type FtpServerOption struct { + Filer string + IP string + IpBind string + Port int + FilerGrpcAddress string + FtpRoot string + GrpcDialOption grpc.DialOption + PassivePortStart int + PassivePortStop int +} + +type SftpServer struct { + option *FtpServerOption + ftpListener net.Listener +} + +var _ = ftpserver.MainDriver(&SftpServer{}) + +// NewServer returns a new FTP server driver +func NewFtpServer(ftpListener net.Listener, option *FtpServerOption) (*SftpServer, error) { + var err error + server := &SftpServer{ + option: option, + ftpListener: ftpListener, + } + return server, err +} + +// GetSettings returns some general settings around the server setup +func (s *SftpServer) GetSettings() (*ftpserver.Settings, error) { + var portRange *ftpserver.PortRange + if s.option.PassivePortStart > 0 && s.option.PassivePortStop > s.option.PassivePortStart { + portRange = &ftpserver.PortRange{ + Start: s.option.PassivePortStart, + End: s.option.PassivePortStop, + } + } + + return &ftpserver.Settings{ + Listener: s.ftpListener, + ListenAddr: fmt.Sprintf("%s:%d", s.option.IpBind, s.option.Port), + PublicHost: s.option.IP, + PassiveTransferPortRange: portRange, + ActiveTransferPortNon20: true, + IdleTimeout: -1, + ConnectionTimeout: 20, + }, nil +} + +// ClientConnected is called to send the very first welcome message +func (s *SftpServer) ClientConnected(cc ftpserver.ClientContext) (string, error) { + return "Welcome to SeaweedFS FTP Server", nil +} + +// ClientDisconnected is called when the user disconnects, even if he never authenticated +func (s *SftpServer) ClientDisconnected(cc ftpserver.ClientContext) { +} + +// AuthUser authenticates the user and selects an handling driver +func (s *SftpServer) AuthUser(cc ftpserver.ClientContext, username, password string) (ftpserver.ClientDriver, error) { + return nil, nil +} + +// GetTLSConfig returns a TLS Certificate to use +// The certificate could frequently change if we use something like "let's encrypt" +func (s *SftpServer) GetTLSConfig() (*tls.Config, error) { + return nil, errors.New("no TLS certificate configured") +} diff --git a/weed/glog/glog.go b/weed/glog/glog.go index f46632f1c..352a7e185 100644 --- a/weed/glog/glog.go +++ b/weed/glog/glog.go @@ -74,8 +74,8 @@ import ( "bufio" "bytes" "errors" - "flag" "fmt" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" "io" stdLog "log" "os" @@ -398,7 +398,7 @@ type flushSyncWriter interface { func init() { flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", true, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.verbosity, "v", "log levels [0|1|2|3|4], default to 0") flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") diff --git a/weed/glog/glog_file.go b/weed/glog/glog_file.go index bb8e6902f..3f700d8fc 100644 --- a/weed/glog/glog_file.go +++ b/weed/glog/glog_file.go @@ -20,8 +20,8 @@ package glog import ( "errors" - "flag" "fmt" + flag "github.com/chrislusf/seaweedfs/weed/util/fla9" "os" "os/user" "path/filepath" diff --git a/weed/iamapi/iamapi_handlers.go b/weed/iamapi/iamapi_handlers.go new file mode 100644 index 000000000..2e5f709f3 --- /dev/null +++ b/weed/iamapi/iamapi_handlers.go @@ -0,0 +1,105 @@ +package iamapi + +import ( + "bytes" + "encoding/xml" + "fmt" + "strconv" + + "net/http" + "net/url" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + + "github.com/aws/aws-sdk-go/service/iam" +) + +type mimeType string + +const ( + mimeNone mimeType = "" + mimeXML mimeType = "application/xml" +) + +func setCommonHeaders(w http.ResponseWriter) { + w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set("Accept-Ranges", "bytes") +} + +// Encodes the response headers into XML format. +func encodeResponse(response interface{}) []byte { + var bytesBuffer bytes.Buffer + bytesBuffer.WriteString(xml.Header) + e := xml.NewEncoder(&bytesBuffer) + e.Encode(response) + return bytesBuffer.Bytes() +} + +// If none of the http routes match respond with MethodNotAllowed +func notFoundHandler(w http.ResponseWriter, r *http.Request) { + glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI) + writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL) +} + +func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) { + apiError := s3err.GetAPIError(errorCode) + errorResponse := getRESTErrorResponse(apiError, reqURL.Path) + encodedErrorResponse := encodeResponse(errorResponse) + writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML) +} + +func writeIamErrorResponse(w http.ResponseWriter, err error, object string, value string, msg error) { + errCode := err.Error() + errorResp := ErrorResponse{} + errorResp.Error.Type = "Sender" + errorResp.Error.Code = &errCode + if msg != nil { + errMsg := msg.Error() + errorResp.Error.Message = &errMsg + } + glog.Errorf("Response %+v", err) + switch errCode { + case iam.ErrCodeNoSuchEntityException: + msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value) + errorResp.Error.Message = &msg + writeResponse(w, http.StatusNotFound, encodeResponse(errorResp), mimeXML) + case iam.ErrCodeServiceFailureException: + writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML) + default: + writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML) + } +} + +func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse { + return s3err.RESTErrorResponse{ + Code: err.Code, + Message: err.Description, + Resource: resource, + RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), + } +} + +func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { + setCommonHeaders(w) + if response != nil { + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + } + if mType != mimeNone { + w.Header().Set("Content-Type", string(mType)) + } + w.WriteHeader(statusCode) + if response != nil { + glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response)) + _, err := w.Write(response) + if err != nil { + glog.V(0).Infof("write err: %v", err) + } + w.(http.Flusher).Flush() + } +} + +func writeSuccessResponseXML(w http.ResponseWriter, response []byte) { + writeResponse(w, http.StatusOK, response, mimeXML) +} diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go new file mode 100644 index 000000000..b00ada234 --- /dev/null +++ b/weed/iamapi/iamapi_management_handlers.go @@ -0,0 +1,449 @@ +package iamapi + +import ( + "crypto/sha1" + "encoding/json" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "math/rand" + "net/http" + "net/url" + "reflect" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go/service/iam" +) + +const ( + charsetUpper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + charset = charsetUpper + "abcdefghijklmnopqrstuvwxyz/" + policyDocumentVersion = "2012-10-17" + StatementActionAdmin = "*" + StatementActionWrite = "Put*" + StatementActionRead = "Get*" + StatementActionList = "List*" + StatementActionTagging = "Tagging*" +) + +var ( + seededRand *rand.Rand = rand.New( + rand.NewSource(time.Now().UnixNano())) + policyDocuments = map[string]*PolicyDocument{} + policyLock = sync.RWMutex{} +) + +func MapToStatementAction(action string) string { + switch action { + case StatementActionAdmin: + return s3_constants.ACTION_ADMIN + case StatementActionWrite: + return s3_constants.ACTION_WRITE + case StatementActionRead: + return s3_constants.ACTION_READ + case StatementActionList: + return s3_constants.ACTION_LIST + case StatementActionTagging: + return s3_constants.ACTION_TAGGING + default: + return "" + } +} + +func MapToIdentitiesAction(action string) string { + switch action { + case s3_constants.ACTION_ADMIN: + return StatementActionAdmin + case s3_constants.ACTION_WRITE: + return StatementActionWrite + case s3_constants.ACTION_READ: + return StatementActionRead + case s3_constants.ACTION_LIST: + return StatementActionList + case s3_constants.ACTION_TAGGING: + return StatementActionTagging + default: + return "" + } +} + +type Statement struct { + Effect string `json:"Effect"` + Action []string `json:"Action"` + Resource []string `json:"Resource"` +} + +type Policies struct { + Policies map[string]PolicyDocument `json:"policies"` +} + +type PolicyDocument struct { + Version string `json:"Version"` + Statement []*Statement `json:"Statement"` +} + +func (p PolicyDocument) String() string { + b, _ := json.Marshal(p) + return string(b) +} + +func Hash(s *string) string { + h := sha1.New() + h.Write([]byte(*s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func StringWithCharset(length int, charset string) string { + b := make([]byte, length) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + +func (iama *IamApiServer) ListUsers(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListUsersResponse) { + for _, ident := range s3cfg.Identities { + resp.ListUsersResult.Users = append(resp.ListUsersResult.Users, &iam.User{UserName: &ident.Name}) + } + return resp +} + +func (iama *IamApiServer) ListAccessKeys(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListAccessKeysResponse) { + status := iam.StatusTypeActive + for _, ident := range s3cfg.Identities { + for _, cred := range ident.Credentials { + resp.ListAccessKeysResult.AccessKeyMetadata = append(resp.ListAccessKeysResult.AccessKeyMetadata, + &iam.AccessKeyMetadata{UserName: &ident.Name, AccessKeyId: &cred.AccessKey, Status: &status}, + ) + } + } + return resp +} + +func (iama *IamApiServer) CreateUser(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateUserResponse) { + userName := values.Get("UserName") + resp.CreateUserResult.User.UserName = &userName + s3cfg.Identities = append(s3cfg.Identities, &iam_pb.Identity{Name: userName}) + return resp +} + +func (iama *IamApiServer) DeleteUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp DeleteUserResponse, err error) { + for i, ident := range s3cfg.Identities { + if userName == ident.Name { + s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...) + return resp, nil + } + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func (iama *IamApiServer) GetUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp GetUserResponse, err error) { + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + resp.GetUserResult.User = iam.User{UserName: &ident.Name} + return resp, nil + } + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func GetPolicyDocument(policy *string) (policyDocument PolicyDocument, err error) { + if err = json.Unmarshal([]byte(*policy), &policyDocument); err != nil { + return PolicyDocument{}, err + } + return policyDocument, err +} + +func (iama *IamApiServer) CreatePolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreatePolicyResponse, err error) { + policyName := values.Get("PolicyName") + policyDocumentString := values.Get("PolicyDocument") + policyDocument, err := GetPolicyDocument(&policyDocumentString) + if err != nil { + return CreatePolicyResponse{}, err + } + policyId := Hash(&policyDocumentString) + arn := fmt.Sprintf("arn:aws:iam:::policy/%s", policyName) + resp.CreatePolicyResult.Policy.PolicyName = &policyName + resp.CreatePolicyResult.Policy.Arn = &arn + resp.CreatePolicyResult.Policy.PolicyId = &policyId + policies := Policies{} + policyLock.Lock() + defer policyLock.Unlock() + if err = iama.s3ApiConfig.GetPolicies(&policies); err != nil { + return resp, err + } + policies.Policies[policyName] = policyDocument + if err = iama.s3ApiConfig.PutPolicies(&policies); err != nil { + return resp, err + } + return resp, nil +} + +func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) { + userName := values.Get("UserName") + policyName := values.Get("PolicyName") + policyDocumentString := values.Get("PolicyDocument") + policyDocument, err := GetPolicyDocument(&policyDocumentString) + if err != nil { + return PutUserPolicyResponse{}, err + } + policyDocuments[policyName] = &policyDocument + actions := GetActions(&policyDocument) + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + for _, action := range actions { + ident.Actions = append(ident.Actions, action) + } + break + } + } + return resp, nil +} + +func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp GetUserPolicyResponse, err error) { + userName := values.Get("UserName") + policyName := values.Get("PolicyName") + for _, ident := range s3cfg.Identities { + if userName != ident.Name { + continue + } + + resp.GetUserPolicyResult.UserName = userName + resp.GetUserPolicyResult.PolicyName = policyName + if len(ident.Actions) == 0 { + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) + } + + policyDocument := PolicyDocument{Version: policyDocumentVersion} + statements := make(map[string][]string) + for _, action := range ident.Actions { + // parse "Read:EXAMPLE-BUCKET" + act := strings.Split(action, ":") + + resource := "*" + if len(act) == 2 { + resource = fmt.Sprintf("arn:aws:s3:::%s/*", act[1]) + } + statements[resource] = append(statements[resource], + fmt.Sprintf("s3:%s", MapToIdentitiesAction(act[0])), + ) + } + for resource, actions := range statements { + isEqAction := false + for i, statement := range policyDocument.Statement { + if reflect.DeepEqual(statement.Action, actions) { + policyDocument.Statement[i].Resource = append( + policyDocument.Statement[i].Resource, resource) + isEqAction = true + break + } + } + if isEqAction { + continue + } + policyDocumentStatement := Statement{ + Effect: "Allow", + Action: actions, + } + policyDocumentStatement.Resource = append(policyDocumentStatement.Resource, resource) + policyDocument.Statement = append(policyDocument.Statement, &policyDocumentStatement) + } + resp.GetUserPolicyResult.PolicyDocument = policyDocument.String() + return resp, nil + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func (iama *IamApiServer) DeleteUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) { + userName := values.Get("UserName") + for i, ident := range s3cfg.Identities { + if ident.Name == userName { + s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...) + return resp, nil + } + } + return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException) +} + +func GetActions(policy *PolicyDocument) (actions []string) { + for _, statement := range policy.Statement { + if statement.Effect != "Allow" { + continue + } + for _, resource := range statement.Resource { + // Parse "arn:aws:s3:::my-bucket/shared/*" + res := strings.Split(resource, ":") + if len(res) != 6 || res[0] != "arn" || res[1] != "aws" || res[2] != "s3" { + glog.Infof("not match resource: %s", res) + continue + } + for _, action := range statement.Action { + // Parse "s3:Get*" + act := strings.Split(action, ":") + if len(act) != 2 || act[0] != "s3" { + glog.Infof("not match action: %s", act) + continue + } + statementAction := MapToStatementAction(act[1]) + if res[5] == "*" { + actions = append(actions, statementAction) + continue + } + // Parse my-bucket/shared/* + path := strings.Split(res[5], "/") + if len(path) != 2 || path[1] != "*" { + glog.Infof("not match bucket: %s", path) + continue + } + actions = append(actions, fmt.Sprintf("%s:%s", statementAction, path[0])) + } + } + } + return actions +} + +func (iama *IamApiServer) CreateAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateAccessKeyResponse) { + userName := values.Get("UserName") + status := iam.StatusTypeActive + accessKeyId := StringWithCharset(21, charsetUpper) + secretAccessKey := StringWithCharset(42, charset) + resp.CreateAccessKeyResult.AccessKey.AccessKeyId = &accessKeyId + resp.CreateAccessKeyResult.AccessKey.SecretAccessKey = &secretAccessKey + resp.CreateAccessKeyResult.AccessKey.UserName = &userName + resp.CreateAccessKeyResult.AccessKey.Status = &status + changed := false + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + ident.Credentials = append(ident.Credentials, + &iam_pb.Credential{AccessKey: accessKeyId, SecretKey: secretAccessKey}) + changed = true + break + } + } + if !changed { + s3cfg.Identities = append(s3cfg.Identities, + &iam_pb.Identity{Name: userName, + Credentials: []*iam_pb.Credential{ + { + AccessKey: accessKeyId, + SecretKey: secretAccessKey, + }, + }, + }, + ) + } + return resp +} + +func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp DeleteAccessKeyResponse) { + userName := values.Get("UserName") + accessKeyId := values.Get("AccessKeyId") + for _, ident := range s3cfg.Identities { + if userName == ident.Name { + for i, cred := range ident.Credentials { + if cred.AccessKey == accessKeyId { + ident.Credentials = append(ident.Credentials[:i], ident.Credentials[i+1:]...) + break + } + } + break + } + } + return resp +} + +func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL) + return + } + values := r.PostForm + var s3cfgLock sync.RWMutex + s3cfgLock.RLock() + s3cfg := &iam_pb.S3ApiConfiguration{} + if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil { + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + s3cfgLock.RUnlock() + + glog.V(4).Infof("DoActions: %+v", values) + var response interface{} + var err error + changed := true + switch r.Form.Get("Action") { + case "ListUsers": + response = iama.ListUsers(s3cfg, values) + changed = false + case "ListAccessKeys": + response = iama.ListAccessKeys(s3cfg, values) + changed = false + case "CreateUser": + response = iama.CreateUser(s3cfg, values) + case "GetUser": + userName := values.Get("UserName") + response, err = iama.GetUser(s3cfg, userName) + if err != nil { + writeIamErrorResponse(w, err, "user", userName, nil) + return + } + changed = false + case "DeleteUser": + userName := values.Get("UserName") + response, err = iama.DeleteUser(s3cfg, userName) + if err != nil { + writeIamErrorResponse(w, err, "user", userName, nil) + return + } + case "CreateAccessKey": + response = iama.CreateAccessKey(s3cfg, values) + case "DeleteAccessKey": + response = iama.DeleteAccessKey(s3cfg, values) + case "CreatePolicy": + response, err = iama.CreatePolicy(s3cfg, values) + if err != nil { + glog.Errorf("CreatePolicy: %+v", err) + writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL) + return + } + case "PutUserPolicy": + response, err = iama.PutUserPolicy(s3cfg, values) + if err != nil { + glog.Errorf("PutUserPolicy: %+v", err) + writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL) + return + } + case "GetUserPolicy": + response, err = iama.GetUserPolicy(s3cfg, values) + if err != nil { + writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil) + return + } + changed = false + case "DeleteUserPolicy": + if response, err = iama.DeleteUserPolicy(s3cfg, values); err != nil { + writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil) + } + default: + errNotImplemented := s3err.GetAPIError(s3err.ErrNotImplemented) + errorResponse := ErrorResponse{} + errorResponse.Error.Code = &errNotImplemented.Code + errorResponse.Error.Message = &errNotImplemented.Description + writeResponse(w, errNotImplemented.HTTPStatusCode, encodeResponse(errorResponse), mimeXML) + return + } + if changed { + s3cfgLock.Lock() + err := iama.s3ApiConfig.PutS3ApiConfiguration(s3cfg) + s3cfgLock.Unlock() + if err != nil { + writeIamErrorResponse(w, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err) + return + } + } + writeSuccessResponseXML(w, encodeResponse(response)) +} diff --git a/weed/iamapi/iamapi_response.go b/weed/iamapi/iamapi_response.go new file mode 100644 index 000000000..77328b608 --- /dev/null +++ b/weed/iamapi/iamapi_response.go @@ -0,0 +1,103 @@ +package iamapi + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/service/iam" +) + +type CommonResponse struct { + ResponseMetadata struct { + RequestId string `xml:"RequestId"` + } `xml:"ResponseMetadata"` +} + +type ListUsersResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListUsersResponse"` + ListUsersResult struct { + Users []*iam.User `xml:"Users>member"` + IsTruncated bool `xml:"IsTruncated"` + } `xml:"ListUsersResult"` +} + +type ListAccessKeysResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListAccessKeysResponse"` + ListAccessKeysResult struct { + AccessKeyMetadata []*iam.AccessKeyMetadata `xml:"AccessKeyMetadata>member"` + IsTruncated bool `xml:"IsTruncated"` + } `xml:"ListAccessKeysResult"` +} + +type DeleteAccessKeyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteAccessKeyResponse"` +} + +type CreatePolicyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreatePolicyResponse"` + CreatePolicyResult struct { + Policy iam.Policy `xml:"Policy"` + } `xml:"CreatePolicyResult"` +} + +type CreateUserResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateUserResponse"` + CreateUserResult struct { + User iam.User `xml:"User"` + } `xml:"CreateUserResult"` +} + +type DeleteUserResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteUserResponse"` +} + +type GetUserResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserResponse"` + GetUserResult struct { + User iam.User `xml:"User"` + } `xml:"GetUserResult"` +} + +type CreateAccessKeyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateAccessKeyResponse"` + CreateAccessKeyResult struct { + AccessKey iam.AccessKey `xml:"AccessKey"` + } `xml:"CreateAccessKeyResult"` +} + +type PutUserPolicyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ PutUserPolicyResponse"` +} + +type GetUserPolicyResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserPolicyResponse"` + GetUserPolicyResult struct { + UserName string `xml:"UserName"` + PolicyName string `xml:"PolicyName"` + PolicyDocument string `xml:"PolicyDocument"` + } `xml:"GetUserPolicyResult"` +} + +type ErrorResponse struct { + CommonResponse + XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ErrorResponse"` + Error struct { + iam.ErrorDetails + Type string `xml:"Type"` + } `xml:"Error"` +} + +func (r *CommonResponse) SetRequestId() { + r.ResponseMetadata.RequestId = fmt.Sprintf("%d", time.Now().UnixNano()) +} diff --git a/weed/iamapi/iamapi_server.go b/weed/iamapi/iamapi_server.go new file mode 100644 index 000000000..18af1a919 --- /dev/null +++ b/weed/iamapi/iamapi_server.go @@ -0,0 +1,149 @@ +package iamapi + +// https://docs.aws.amazon.com/cli/latest/reference/iam/list-roles.html + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/chrislusf/seaweedfs/weed/s3api" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "github.com/gorilla/mux" + "google.golang.org/grpc" + "net/http" + "strings" +) + +type IamS3ApiConfig interface { + GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) + PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) + GetPolicies(policies *Policies) (err error) + PutPolicies(policies *Policies) (err error) +} + +type IamS3ApiConfigure struct { + option *IamServerOption + masterClient *wdclient.MasterClient +} + +type IamServerOption struct { + Masters string + Filer string + Port int + FilerGrpcAddress string + GrpcDialOption grpc.DialOption +} + +type IamApiServer struct { + s3ApiConfig IamS3ApiConfig + iam *s3api.IdentityAccessManagement +} + +var s3ApiConfigure IamS3ApiConfig + +func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) { + s3ApiConfigure = IamS3ApiConfigure{ + option: option, + masterClient: wdclient.NewMasterClient(option.GrpcDialOption, pb.AdminShellClient, "", 0, "", strings.Split(option.Masters, ",")), + } + s3Option := s3api.S3ApiServerOption{Filer: option.Filer} + iamApiServer = &IamApiServer{ + s3ApiConfig: s3ApiConfigure, + iam: s3api.NewIdentityAccessManagement(&s3Option), + } + + iamApiServer.registerRouter(router) + + return iamApiServer, nil +} + +func (iama *IamApiServer) registerRouter(router *mux.Router) { + // API Router + apiRouter := router.PathPrefix("/").Subrouter() + // ListBuckets + + // apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST")) + apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN)) + // + // NotFound + apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler) +} + +func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + var buf bytes.Buffer + err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamIdentityFile, &buf); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + if buf.Len() > 0 { + if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), s3cfg); err != nil { + return err + } + } + return nil +} + +func (iam IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + buf := bytes.Buffer{} + if err := filer.S3ConfigurationToText(&buf, s3cfg); err != nil { + return fmt.Errorf("S3ConfigurationToText: %s", err) + } + return pb.WithGrpcFilerClient( + iam.option.FilerGrpcAddress, + iam.option.GrpcDialOption, + func(client filer_pb.SeaweedFilerClient) error { + if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile, buf.Bytes()); err != nil { + return err + } + return nil + }, + ) +} + +func (iam IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) { + var buf bytes.Buffer + err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamPoliciesFile, &buf); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + if buf.Len() == 0 { + policies.Policies = make(map[string]PolicyDocument) + return nil + } + if err := json.Unmarshal(buf.Bytes(), policies); err != nil { + return err + } + return nil +} + +func (iam IamS3ApiConfigure) PutPolicies(policies *Policies) (err error) { + var b []byte + if b, err = json.Marshal(policies); err != nil { + return err + } + return pb.WithGrpcFilerClient( + iam.option.FilerGrpcAddress, + iam.option.GrpcDialOption, + func(client filer_pb.SeaweedFilerClient) error { + if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamPoliciesFile, b); err != nil { + return err + } + return nil + }, + ) +} diff --git a/weed/iamapi/iamapi_test.go b/weed/iamapi/iamapi_test.go new file mode 100644 index 000000000..09aaf0ac8 --- /dev/null +++ b/weed/iamapi/iamapi_test.go @@ -0,0 +1,181 @@ +package iamapi + +import ( + "encoding/xml" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + "github.com/gorilla/mux" + "github.com/jinzhu/copier" + "github.com/stretchr/testify/assert" + "net/http" + "net/http/httptest" + "testing" +) + +var GetS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error) +var PutS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error) +var GetPolicies func(policies *Policies) (err error) +var PutPolicies func(policies *Policies) (err error) + +var s3config = iam_pb.S3ApiConfiguration{} +var policiesFile = Policies{Policies: make(map[string]PolicyDocument)} +var ias = IamApiServer{s3ApiConfig: iamS3ApiConfigureMock{}} + +type iamS3ApiConfigureMock struct{} + +func (iam iamS3ApiConfigureMock) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + _ = copier.Copy(&s3cfg.Identities, &s3config.Identities) + return nil +} + +func (iam iamS3ApiConfigureMock) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) { + _ = copier.Copy(&s3config.Identities, &s3cfg.Identities) + return nil +} + +func (iam iamS3ApiConfigureMock) GetPolicies(policies *Policies) (err error) { + _ = copier.Copy(&policies, &policiesFile) + return nil +} + +func (iam iamS3ApiConfigureMock) PutPolicies(policies *Policies) (err error) { + _ = copier.Copy(&policiesFile, &policies) + return nil +} + +func TestCreateUser(t *testing.T) { + userName := aws.String("Test") + params := &iam.CreateUserInput{UserName: userName} + req, _ := iam.New(session.New()).CreateUserRequest(params) + _ = req.Build() + out := CreateUserResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) + //assert.Equal(t, out.XMLName, "lol") +} + +func TestListUsers(t *testing.T) { + params := &iam.ListUsersInput{} + req, _ := iam.New(session.New()).ListUsersRequest(params) + _ = req.Build() + out := ListUsersResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestListAccessKeys(t *testing.T) { + svc := iam.New(session.New()) + params := &iam.ListAccessKeysInput{} + req, _ := svc.ListAccessKeysRequest(params) + _ = req.Build() + out := ListAccessKeysResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestGetUser(t *testing.T) { + userName := aws.String("Test") + params := &iam.GetUserInput{UserName: userName} + req, _ := iam.New(session.New()).GetUserRequest(params) + _ = req.Build() + out := GetUserResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +// Todo flat statement +func TestCreatePolicy(t *testing.T) { + params := &iam.CreatePolicyInput{ + PolicyName: aws.String("S3-read-only-example-bucket"), + PolicyDocument: aws.String(` + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:Get*", + "s3:List*" + ], + "Resource": [ + "arn:aws:s3:::EXAMPLE-BUCKET", + "arn:aws:s3:::EXAMPLE-BUCKET/*" + ] + } + ] + }`), + } + req, _ := iam.New(session.New()).CreatePolicyRequest(params) + _ = req.Build() + out := CreatePolicyResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestPutUserPolicy(t *testing.T) { + userName := aws.String("Test") + params := &iam.PutUserPolicyInput{ + UserName: userName, + PolicyName: aws.String("S3-read-only-example-bucket"), + PolicyDocument: aws.String( + `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:Get*", + "s3:List*" + ], + "Resource": [ + "arn:aws:s3:::EXAMPLE-BUCKET", + "arn:aws:s3:::EXAMPLE-BUCKET/*" + ] + } + ] + }`), + } + req, _ := iam.New(session.New()).PutUserPolicyRequest(params) + _ = req.Build() + out := PutUserPolicyResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestGetUserPolicy(t *testing.T) { + userName := aws.String("Test") + params := &iam.GetUserPolicyInput{UserName: userName, PolicyName: aws.String("S3-read-only-example-bucket")} + req, _ := iam.New(session.New()).GetUserPolicyRequest(params) + _ = req.Build() + out := GetUserPolicyResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func TestDeleteUser(t *testing.T) { + userName := aws.String("Test") + params := &iam.DeleteUserInput{UserName: userName} + req, _ := iam.New(session.New()).DeleteUserRequest(params) + _ = req.Build() + out := DeleteUserResponse{} + response, err := executeRequest(req.HTTPRequest, out) + assert.Equal(t, nil, err) + assert.Equal(t, http.StatusOK, response.Code) +} + +func executeRequest(req *http.Request, v interface{}) (*httptest.ResponseRecorder, error) { + rr := httptest.NewRecorder() + apiRouter := mux.NewRouter().SkipClean(true) + apiRouter.Path("/").Methods("POST").HandlerFunc(ias.DoActions) + apiRouter.ServeHTTP(rr, req) + return rr, xml.Unmarshal(rr.Body.Bytes(), &v) +} diff --git a/weed/images/orientation.go b/weed/images/orientation.go index 4bff89311..a592a7d8b 100644 --- a/weed/images/orientation.go +++ b/weed/images/orientation.go @@ -7,7 +7,7 @@ import ( "image/jpeg" "log" - "github.com/rwcarlsen/goexif/exif" + "github.com/seaweedfs/goexif/exif" ) //many code is copied from http://camlistore.org/pkg/images/images.go diff --git a/weed/images/resizing.go b/weed/images/resizing.go index ff0eff5e1..b048daa1c 100644 --- a/weed/images/resizing.go +++ b/weed/images/resizing.go @@ -6,10 +6,11 @@ import ( "image/gif" "image/jpeg" "image/png" + "io" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/disintegration/imaging" - "io" + + "github.com/chrislusf/seaweedfs/weed/glog" ) func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) { @@ -35,6 +36,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re } } } else { + read.Seek(0, 0) return read, bounds.Dx(), bounds.Dy() } var buf bytes.Buffer diff --git a/weed/messaging/broker/broker_append.go b/weed/messaging/broker/broker_append.go new file mode 100644 index 000000000..8e5b56fd0 --- /dev/null +++ b/weed/messaging/broker/broker_append.go @@ -0,0 +1,113 @@ +package broker + +import ( + "context" + "fmt" + "io" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messaging_pb.TopicConfiguration, data []byte) error { + + assignResult, uploadResult, err2 := broker.assignAndUpload(topicConfig, data) + if err2 != nil { + return err2 + } + + dir, name := util.FullPath(targetFile).DirAndName() + + // append the chunk + if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AppendToEntryRequest{ + Directory: dir, + EntryName: name, + Chunks: []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(assignResult.Fid, 0)}, + } + + _, err := client.AppendToEntry(context.Background(), request) + if err != nil { + glog.V(0).Infof("append to file %v: %v", request, err) + return err + } + + return nil + }); err != nil { + return fmt.Errorf("append to file %v: %v", targetFile, err) + } + + return nil +} + +func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConfiguration, data []byte) (*operation.AssignResult, *operation.UploadResult, error) { + + var assignResult = &operation.AssignResult{} + + // assign a volume location + if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + request := &filer_pb.AssignVolumeRequest{ + Count: 1, + Replication: topicConfig.Replication, + Collection: topicConfig.Collection, + } + + resp, err := client.AssignVolume(context.Background(), request) + if err != nil { + glog.V(0).Infof("assign volume failure %v: %v", request, err) + return err + } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } + + assignResult.Auth = security.EncodedJwt(resp.Auth) + assignResult.Fid = resp.FileId + assignResult.Url = resp.Url + assignResult.PublicUrl = resp.PublicUrl + assignResult.Count = uint64(resp.Count) + + return nil + }); err != nil { + return nil, nil, err + } + + // upload data + targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid) + uploadResult, err := operation.UploadData(targetUrl, "", broker.option.Cipher, data, false, "", nil, assignResult.Auth) + if err != nil { + return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err) + } + // println("uploaded to", targetUrl) + return assignResult, uploadResult, nil +} + +var _ = filer_pb.FilerClient(&MessageBroker{}) + +func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) (err error) { + + for _, filer := range broker.option.Filers { + if err = pb.WithFilerClient(filer, broker.grpcDialOption, fn); err != nil { + if err == io.EOF { + return + } + glog.V(0).Infof("fail to connect to %s: %v", filer, err) + } else { + break + } + } + + return + +} + +func (broker *MessageBroker) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} diff --git a/weed/messaging/broker/broker_grpc_server.go b/weed/messaging/broker/broker_grpc_server.go new file mode 100644 index 000000000..ba141fdd0 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server.go @@ -0,0 +1,37 @@ +package broker + +import ( + "context" + "fmt" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) ConfigureTopic(c context.Context, request *messaging_pb.ConfigureTopicRequest) (*messaging_pb.ConfigureTopicResponse, error) { + panic("implement me") +} + +func (broker *MessageBroker) DeleteTopic(c context.Context, request *messaging_pb.DeleteTopicRequest) (*messaging_pb.DeleteTopicResponse, error) { + resp := &messaging_pb.DeleteTopicResponse{} + dir, entry := genTopicDirEntry(request.Namespace, request.Topic) + if exists, err := filer_pb.Exists(broker, dir, entry, true); err != nil { + return nil, err + } else if exists { + err = filer_pb.Remove(broker, dir, entry, true, true, true, false, nil) + } + return resp, nil +} + +func (broker *MessageBroker) GetTopicConfiguration(c context.Context, request *messaging_pb.GetTopicConfigurationRequest) (*messaging_pb.GetTopicConfigurationResponse, error) { + panic("implement me") +} + +func genTopicDir(namespace, topic string) string { + return fmt.Sprintf("%s/%s/%s", filer.TopicsDir, namespace, topic) +} + +func genTopicDirEntry(namespace, topic string) (dir, entry string) { + return fmt.Sprintf("%s/%s", filer.TopicsDir, namespace), topic +} diff --git a/weed/messaging/broker/broker_grpc_server_discovery.go b/weed/messaging/broker/broker_grpc_server_discovery.go new file mode 100644 index 000000000..3c14f3220 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_discovery.go @@ -0,0 +1,116 @@ +package broker + +import ( + "context" + "fmt" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +/* +Topic discovery: + +When pub or sub connects, it ask for the whole broker list, and run consistent hashing to find the broker. + +The broker will check peers whether it is already hosted by some other broker, if that broker is alive and acknowledged alive, redirect to it. +Otherwise, just host the topic. + +So, if the pub or sub connects around the same time, they would connect to the same broker. Everyone is happy. +If one of the pub or sub connects very late, and the system topo changed quite a bit with new servers added or old servers died, checking peers will help. + +*/ + +func (broker *MessageBroker) FindBroker(c context.Context, request *messaging_pb.FindBrokerRequest) (*messaging_pb.FindBrokerResponse, error) { + + t := &messaging_pb.FindBrokerResponse{} + var peers []string + + targetTopicPartition := fmt.Sprintf(TopicPartitionFmt, request.Namespace, request.Topic, request.Parition) + + for _, filer := range broker.option.Filers { + err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.LocateBroker(context.Background(), &filer_pb.LocateBrokerRequest{ + Resource: targetTopicPartition, + }) + if err != nil { + return err + } + if resp.Found && len(resp.Resources) > 0 { + t.Broker = resp.Resources[0].GrpcAddresses + return nil + } + for _, b := range resp.Resources { + peers = append(peers, b.GrpcAddresses) + } + return nil + }) + if err != nil { + return nil, err + } + } + + t.Broker = PickMember(peers, []byte(targetTopicPartition)) + + return t, nil + +} + +func (broker *MessageBroker) checkFilers() { + + // contact a filer about masters + var masters []string + found := false + for !found { + for _, filer := range broker.option.Filers { + err := broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}) + if err != nil { + return err + } + masters = append(masters, resp.Masters...) + return nil + }) + if err == nil { + found = true + break + } + glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err) + time.Sleep(time.Second) + } + } + glog.V(0).Infof("received master list: %s", masters) + + // contact each masters for filers + var filers []string + found = false + for !found { + for _, master := range masters { + err := broker.withMasterClient(master, func(client master_pb.SeaweedClient) error { + resp, err := client.ListMasterClients(context.Background(), &master_pb.ListMasterClientsRequest{ + ClientType: "filer", + }) + if err != nil { + return err + } + + filers = append(filers, resp.GrpcAddresses...) + + return nil + }) + if err == nil { + found = true + break + } + glog.V(0).Infof("failed to list filers: %v", err) + time.Sleep(time.Second) + } + } + glog.V(0).Infof("received filer list: %s", filers) + + broker.option.Filers = filers + +} diff --git a/weed/messaging/broker/broker_grpc_server_publish.go b/weed/messaging/broker/broker_grpc_server_publish.go new file mode 100644 index 000000000..6e6b723d1 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_publish.go @@ -0,0 +1,112 @@ +package broker + +import ( + "crypto/md5" + "fmt" + "io" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_PublishServer) error { + + // process initial request + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + // TODO look it up + topicConfig := &messaging_pb.TopicConfiguration{ + // IsTransient: true, + } + + // send init response + initResponse := &messaging_pb.PublishResponse{ + Config: nil, + Redirect: nil, + } + err = stream.Send(initResponse) + if err != nil { + return err + } + if initResponse.Redirect != nil { + return nil + } + + // get lock + tp := TopicPartition{ + Namespace: in.Init.Namespace, + Topic: in.Init.Topic, + Partition: in.Init.Partition, + } + + tpDir := fmt.Sprintf("%s/%s/%s", filer.TopicsDir, tp.Namespace, tp.Topic) + md5File := fmt.Sprintf("p%02d.md5", tp.Partition) + // println("chan data stored under", tpDir, "as", md5File) + + if exists, err := filer_pb.Exists(broker, tpDir, md5File, false); err == nil && exists { + return fmt.Errorf("channel is already closed") + } + + tl := broker.topicManager.RequestLock(tp, topicConfig, true) + defer broker.topicManager.ReleaseLock(tp, true) + + md5hash := md5.New() + // process each message + for { + // println("recv") + in, err := stream.Recv() + // glog.V(0).Infof("recieved %v err: %v", in, err) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + if in.Data == nil { + continue + } + + // fmt.Printf("received: %d : %s\n", len(in.Data.Value), string(in.Data.Value)) + + data, err := proto.Marshal(in.Data) + if err != nil { + glog.Errorf("marshall error: %v\n", err) + continue + } + + tl.logBuffer.AddToBuffer(in.Data.Key, data, in.Data.EventTimeNs) + + if in.Data.IsClose { + // println("server received closing") + break + } + + md5hash.Write(in.Data.Value) + + } + + if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil { + glog.V(0).Infof("err writing %s: %v", md5File, err) + } + + // fmt.Printf("received md5 %X\n", md5hash.Sum(nil)) + + // send the close ack + // println("server send ack closing") + if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil { + glog.V(0).Infof("err sending close response: %v", err) + } + return nil + +} diff --git a/weed/messaging/broker/broker_grpc_server_subscribe.go b/weed/messaging/broker/broker_grpc_server_subscribe.go new file mode 100644 index 000000000..3021473e5 --- /dev/null +++ b/weed/messaging/broker/broker_grpc_server_subscribe.go @@ -0,0 +1,177 @@ +package broker + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" + "io" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_SubscribeServer) error { + + // process initial request + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + var processedTsNs int64 + var messageCount int64 + subscriberId := in.Init.SubscriberId + + // TODO look it up + topicConfig := &messaging_pb.TopicConfiguration{ + // IsTransient: true, + } + + // get lock + tp := TopicPartition{ + Namespace: in.Init.Namespace, + Topic: in.Init.Topic, + Partition: in.Init.Partition, + } + fmt.Printf("+ subscriber %s for %s\n", subscriberId, tp.String()) + defer func() { + fmt.Printf("- subscriber %s for %s %d messages last %v\n", subscriberId, tp.String(), messageCount, time.Unix(0, processedTsNs)) + }() + + lock := broker.topicManager.RequestLock(tp, topicConfig, false) + defer broker.topicManager.ReleaseLock(tp, false) + + isConnected := true + go func() { + for isConnected { + if _, err := stream.Recv(); err != nil { + // println("disconnecting connection to", subscriberId, tp.String()) + isConnected = false + lock.cond.Signal() + } + } + }() + + lastReadTime := time.Now() + switch in.Init.StartPosition { + case messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP: + lastReadTime = time.Unix(0, in.Init.TimestampNs) + case messaging_pb.SubscriberMessage_InitMessage_LATEST: + case messaging_pb.SubscriberMessage_InitMessage_EARLIEST: + lastReadTime = time.Unix(0, 0) + } + + // how to process each message + // an error returned will end the subscription + eachMessageFn := func(m *messaging_pb.Message) error { + err := stream.Send(&messaging_pb.BrokerMessage{ + Data: m, + }) + if err != nil { + glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err) + } + return err + } + + eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error { + m := &messaging_pb.Message{} + if err = proto.Unmarshal(logEntry.Data, m); err != nil { + glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err) + return err + } + // fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs) + if err = eachMessageFn(m); err != nil { + glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err) + return err + } + if m.IsClose { + // println("processed EOF") + return io.EOF + } + processedTsNs = logEntry.TsNs + messageCount++ + return nil + } + + // fmt.Printf("subscriber %s read %d on disk log %v\n", subscriberId, messageCount, lastReadTime) + + for { + + if err = broker.readPersistedLogBuffer(&tp, lastReadTime, eachLogEntryFn); err != nil { + if err != io.EOF { + // println("stopping from persisted logs", err.Error()) + return err + } + } + + if processedTsNs != 0 { + lastReadTime = time.Unix(0, processedTsNs) + } + + lastReadTime, err = lock.logBuffer.LoopProcessLogData(lastReadTime, func() bool { + lock.Mutex.Lock() + lock.cond.Wait() + lock.Mutex.Unlock() + return isConnected + }, eachLogEntryFn) + if err != nil { + if err == log_buffer.ResumeFromDiskError { + continue + } + glog.Errorf("processed to %v: %v", lastReadTime, err) + time.Sleep(3127 * time.Millisecond) + if err != log_buffer.ResumeError { + break + } + } + } + + return err + +} + +func (broker *MessageBroker) readPersistedLogBuffer(tp *TopicPartition, startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (err error) { + startTime = startTime.UTC() + startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day()) + startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute()) + + sizeBuf := make([]byte, 4) + startTsNs := startTime.UnixNano() + + topicDir := genTopicDir(tp.Namespace, tp.Topic) + partitionSuffix := fmt.Sprintf(".part%02d", tp.Partition) + + return filer_pb.List(broker, topicDir, "", func(dayEntry *filer_pb.Entry, isLast bool) error { + dayDir := fmt.Sprintf("%s/%s", topicDir, dayEntry.Name) + return filer_pb.List(broker, dayDir, "", func(hourMinuteEntry *filer_pb.Entry, isLast bool) error { + if dayEntry.Name == startDate { + if strings.Compare(hourMinuteEntry.Name, startHourMinute) < 0 { + return nil + } + } + if !strings.HasSuffix(hourMinuteEntry.Name, partitionSuffix) { + return nil + } + // println("partition", tp.Partition, "processing", dayDir, "/", hourMinuteEntry.Name) + chunkedFileReader := filer.NewChunkStreamReader(broker, hourMinuteEntry.Chunks) + defer chunkedFileReader.Close() + if _, err := filer.ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil { + chunkedFileReader.Close() + if err == io.EOF { + return err + } + return fmt.Errorf("reading %s/%s: %v", dayDir, hourMinuteEntry.Name, err) + } + return nil + }, "", false, 24*60) + }, startDate, true, 366) + +} diff --git a/weed/messaging/broker/broker_server.go b/weed/messaging/broker/broker_server.go new file mode 100644 index 000000000..06162471c --- /dev/null +++ b/weed/messaging/broker/broker_server.go @@ -0,0 +1,114 @@ +package broker + +import ( + "context" + "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" +) + +type MessageBrokerOption struct { + Filers []string + DefaultReplication string + MaxMB int + Ip string + Port int + Cipher bool +} + +type MessageBroker struct { + option *MessageBrokerOption + grpcDialOption grpc.DialOption + topicManager *TopicManager +} + +func NewMessageBroker(option *MessageBrokerOption, grpcDialOption grpc.DialOption) (messageBroker *MessageBroker, err error) { + + messageBroker = &MessageBroker{ + option: option, + grpcDialOption: grpcDialOption, + } + + messageBroker.topicManager = NewTopicManager(messageBroker) + + messageBroker.checkFilers() + + go messageBroker.keepConnectedToOneFiler() + + return messageBroker, nil +} + +func (broker *MessageBroker) keepConnectedToOneFiler() { + + for { + for _, filer := range broker.option.Filers { + broker.withFilerClient(filer, func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.KeepConnected(ctx) + if err != nil { + glog.V(0).Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + + initRequest := &filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + } + for _, tp := range broker.topicManager.ListTopicPartitions() { + initRequest.Resources = append(initRequest.Resources, tp.String()) + } + if err := stream.Send(&filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + }); err != nil { + glog.V(0).Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + + // TODO send events of adding/removing topics + + glog.V(0).Infof("conntected with filer: %v", filer) + for { + if err := stream.Send(&filer_pb.KeepConnectedRequest{ + Name: broker.option.Ip, + GrpcPort: uint32(broker.option.Port), + }); err != nil { + glog.V(0).Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + // println("send heartbeat") + if _, err := stream.Recv(); err != nil { + glog.V(0).Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err) + return err + } + // println("received reply") + time.Sleep(11 * time.Second) + // println("woke up") + } + return nil + }) + time.Sleep(3 * time.Second) + } + } + +} + +func (broker *MessageBroker) withFilerClient(filer string, fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithFilerClient(filer, broker.grpcDialOption, fn) + +} + +func (broker *MessageBroker) withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error { + + return pb.WithMasterClient(master, broker.grpcDialOption, func(client master_pb.SeaweedClient) error { + return fn(client) + }) + +} diff --git a/weed/messaging/broker/consistent_distribution.go b/weed/messaging/broker/consistent_distribution.go new file mode 100644 index 000000000..465a2a8f2 --- /dev/null +++ b/weed/messaging/broker/consistent_distribution.go @@ -0,0 +1,38 @@ +package broker + +import ( + "github.com/buraksezer/consistent" + "github.com/cespare/xxhash" +) + +type Member string + +func (m Member) String() string { + return string(m) +} + +type hasher struct{} + +func (h hasher) Sum64(data []byte) uint64 { + return xxhash.Sum64(data) +} + +func PickMember(members []string, key []byte) string { + cfg := consistent.Config{ + PartitionCount: 9791, + ReplicationFactor: 2, + Load: 1.25, + Hasher: hasher{}, + } + + cmembers := []consistent.Member{} + for _, m := range members { + cmembers = append(cmembers, Member(m)) + } + + c := consistent.New(cmembers, cfg) + + m := c.LocateKey(key) + + return m.String() +} diff --git a/weed/messaging/broker/consistent_distribution_test.go b/weed/messaging/broker/consistent_distribution_test.go new file mode 100644 index 000000000..f58fe4e0e --- /dev/null +++ b/weed/messaging/broker/consistent_distribution_test.go @@ -0,0 +1,32 @@ +package broker + +import ( + "fmt" + "testing" +) + +func TestPickMember(t *testing.T) { + + servers := []string{ + "s1:port", + "s2:port", + "s3:port", + "s5:port", + "s4:port", + } + + total := 1000 + + distribution := make(map[string]int) + for i := 0; i < total; i++ { + tp := fmt.Sprintf("tp:%2d", i) + m := PickMember(servers, []byte(tp)) + // println(tp, "=>", m) + distribution[m]++ + } + + for member, count := range distribution { + fmt.Printf("member: %s, key count: %d load=%.2f\n", member, count, float64(count*100)/float64(total/len(servers))) + } + +} diff --git a/weed/messaging/broker/topic_manager.go b/weed/messaging/broker/topic_manager.go new file mode 100644 index 000000000..edddca813 --- /dev/null +++ b/weed/messaging/broker/topic_manager.go @@ -0,0 +1,124 @@ +package broker + +import ( + "fmt" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" +) + +type TopicPartition struct { + Namespace string + Topic string + Partition int32 +} + +const ( + TopicPartitionFmt = "%s/%s_%02d" +) + +func (tp *TopicPartition) String() string { + return fmt.Sprintf(TopicPartitionFmt, tp.Namespace, tp.Topic, tp.Partition) +} + +type TopicControl struct { + sync.Mutex + cond *sync.Cond + subscriberCount int + publisherCount int + logBuffer *log_buffer.LogBuffer +} + +type TopicManager struct { + sync.Mutex + topicControls map[TopicPartition]*TopicControl + broker *MessageBroker +} + +func NewTopicManager(messageBroker *MessageBroker) *TopicManager { + return &TopicManager{ + topicControls: make(map[TopicPartition]*TopicControl), + broker: messageBroker, + } +} + +func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topicConfig *messaging_pb.TopicConfiguration) *log_buffer.LogBuffer { + + flushFn := func(startTime, stopTime time.Time, buf []byte) { + + if topicConfig.IsTransient { + // return + } + + // fmt.Printf("flushing with topic config %+v\n", topicConfig) + + startTime, stopTime = startTime.UTC(), stopTime.UTC() + targetFile := fmt.Sprintf( + "%s/%s/%s/%04d-%02d-%02d/%02d-%02d.part%02d", + filer.TopicsDir, tp.Namespace, tp.Topic, + startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), + tp.Partition, + ) + + if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil { + glog.V(0).Infof("log write failed %s: %v", targetFile, err) + } + } + logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() { + tl.cond.Broadcast() + }) + + return logBuffer +} + +func (tm *TopicManager) RequestLock(partition TopicPartition, topicConfig *messaging_pb.TopicConfiguration, isPublisher bool) *TopicControl { + tm.Lock() + defer tm.Unlock() + + tc, found := tm.topicControls[partition] + if !found { + tc = &TopicControl{} + tc.cond = sync.NewCond(&tc.Mutex) + tm.topicControls[partition] = tc + tc.logBuffer = tm.buildLogBuffer(tc, partition, topicConfig) + } + if isPublisher { + tc.publisherCount++ + } else { + tc.subscriberCount++ + } + return tc +} + +func (tm *TopicManager) ReleaseLock(partition TopicPartition, isPublisher bool) { + tm.Lock() + defer tm.Unlock() + + lock, found := tm.topicControls[partition] + if !found { + return + } + if isPublisher { + lock.publisherCount-- + } else { + lock.subscriberCount-- + } + if lock.subscriberCount <= 0 && lock.publisherCount <= 0 { + delete(tm.topicControls, partition) + lock.logBuffer.Shutdown() + } +} + +func (tm *TopicManager) ListTopicPartitions() (tps []TopicPartition) { + tm.Lock() + defer tm.Unlock() + + for k := range tm.topicControls { + tps = append(tps, k) + } + return +} diff --git a/weed/messaging/msgclient/chan_config.go b/weed/messaging/msgclient/chan_config.go new file mode 100644 index 000000000..a75678815 --- /dev/null +++ b/weed/messaging/msgclient/chan_config.go @@ -0,0 +1,5 @@ +package msgclient + +func (mc *MessagingClient) DeleteChannel(chanName string) error { + return mc.DeleteTopic("chan", chanName) +} diff --git a/weed/messaging/msgclient/chan_pub.go b/weed/messaging/msgclient/chan_pub.go new file mode 100644 index 000000000..9bc88f7c0 --- /dev/null +++ b/weed/messaging/msgclient/chan_pub.go @@ -0,0 +1,76 @@ +package msgclient + +import ( + "crypto/md5" + "hash" + "io" + "log" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type PubChannel struct { + client messaging_pb.SeaweedMessaging_PublishClient + grpcConnection *grpc.ClientConn + md5hash hash.Hash +} + +func (mc *MessagingClient) NewPubChannel(chanName string) (*PubChannel, error) { + tp := broker.TopicPartition{ + Namespace: "chan", + Topic: chanName, + Partition: 0, + } + grpcConnection, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + pc, err := setupPublisherClient(grpcConnection, tp) + if err != nil { + return nil, err + } + return &PubChannel{ + client: pc, + grpcConnection: grpcConnection, + md5hash: md5.New(), + }, nil +} + +func (pc *PubChannel) Publish(m []byte) error { + err := pc.client.Send(&messaging_pb.PublishRequest{ + Data: &messaging_pb.Message{ + Value: m, + }, + }) + if err == nil { + pc.md5hash.Write(m) + } + return err +} +func (pc *PubChannel) Close() error { + + // println("send closing") + if err := pc.client.Send(&messaging_pb.PublishRequest{ + Data: &messaging_pb.Message{ + IsClose: true, + }, + }); err != nil { + log.Printf("err send close: %v", err) + } + // println("receive closing") + if _, err := pc.client.Recv(); err != nil && err != io.EOF { + log.Printf("err receive close: %v", err) + } + // println("close connection") + if err := pc.grpcConnection.Close(); err != nil { + log.Printf("err connection close: %v", err) + } + return nil +} + +func (pc *PubChannel) Md5() []byte { + return pc.md5hash.Sum(nil) +} diff --git a/weed/messaging/msgclient/chan_sub.go b/weed/messaging/msgclient/chan_sub.go new file mode 100644 index 000000000..213ff4666 --- /dev/null +++ b/weed/messaging/msgclient/chan_sub.go @@ -0,0 +1,85 @@ +package msgclient + +import ( + "context" + "crypto/md5" + "hash" + "io" + "log" + "time" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type SubChannel struct { + ch chan []byte + stream messaging_pb.SeaweedMessaging_SubscribeClient + md5hash hash.Hash + cancel context.CancelFunc +} + +func (mc *MessagingClient) NewSubChannel(subscriberId, chanName string) (*SubChannel, error) { + tp := broker.TopicPartition{ + Namespace: "chan", + Topic: chanName, + Partition: 0, + } + grpcConnection, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + sc, err := setupSubscriberClient(ctx, grpcConnection, tp, subscriberId, time.Unix(0, 0)) + if err != nil { + return nil, err + } + + t := &SubChannel{ + ch: make(chan []byte), + stream: sc, + md5hash: md5.New(), + cancel: cancel, + } + + go func() { + for { + resp, subErr := t.stream.Recv() + if subErr == io.EOF { + return + } + if subErr != nil { + log.Printf("fail to receive from netchan %s: %v", chanName, subErr) + return + } + if resp.Data == nil { + // this could be heartbeat from broker + continue + } + if resp.Data.IsClose { + t.stream.Send(&messaging_pb.SubscriberMessage{ + IsClose: true, + }) + close(t.ch) + cancel() + return + } + t.ch <- resp.Data.Value + t.md5hash.Write(resp.Data.Value) + } + }() + + return t, nil +} + +func (sc *SubChannel) Channel() chan []byte { + return sc.ch +} + +func (sc *SubChannel) Md5() []byte { + return sc.md5hash.Sum(nil) +} + +func (sc *SubChannel) Cancel() { + sc.cancel() +} diff --git a/weed/messaging/msgclient/client.go b/weed/messaging/msgclient/client.go new file mode 100644 index 000000000..4d7ef2b8e --- /dev/null +++ b/weed/messaging/msgclient/client.go @@ -0,0 +1,55 @@ +package msgclient + +import ( + "context" + "fmt" + "log" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type MessagingClient struct { + bootstrapBrokers []string + grpcConnections map[broker.TopicPartition]*grpc.ClientConn + grpcDialOption grpc.DialOption +} + +func NewMessagingClient(bootstrapBrokers ...string) *MessagingClient { + return &MessagingClient{ + bootstrapBrokers: bootstrapBrokers, + grpcConnections: make(map[broker.TopicPartition]*grpc.ClientConn), + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.msg_client"), + } +} + +func (mc *MessagingClient) findBroker(tp broker.TopicPartition) (*grpc.ClientConn, error) { + + for _, broker := range mc.bootstrapBrokers { + grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption) + if err != nil { + log.Printf("dial broker %s: %v", broker, err) + continue + } + defer grpcConnection.Close() + + resp, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).FindBroker(context.Background(), + &messaging_pb.FindBrokerRequest{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Parition: tp.Partition, + }) + if err != nil { + return nil, err + } + + targetBroker := resp.Broker + return pb.GrpcDial(context.Background(), targetBroker, mc.grpcDialOption) + } + return nil, fmt.Errorf("no broker found for %+v", tp) +} diff --git a/weed/messaging/msgclient/config.go b/weed/messaging/msgclient/config.go new file mode 100644 index 000000000..2b9eba1a8 --- /dev/null +++ b/weed/messaging/msgclient/config.go @@ -0,0 +1,63 @@ +package msgclient + +import ( + "context" + "log" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +func (mc *MessagingClient) configureTopic(tp broker.TopicPartition) error { + + return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error { + _, err := client.ConfigureTopic(context.Background(), + &messaging_pb.ConfigureTopicRequest{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Configuration: &messaging_pb.TopicConfiguration{ + PartitionCount: 0, + Collection: "", + Replication: "", + IsTransient: false, + Partitoning: 0, + }, + }) + return err + }) + +} + +func (mc *MessagingClient) DeleteTopic(namespace, topic string) error { + + return mc.withAnyBroker(func(client messaging_pb.SeaweedMessagingClient) error { + _, err := client.DeleteTopic(context.Background(), + &messaging_pb.DeleteTopicRequest{ + Namespace: namespace, + Topic: topic, + }) + return err + }) +} + +func (mc *MessagingClient) withAnyBroker(fn func(client messaging_pb.SeaweedMessagingClient) error) error { + + var lastErr error + for _, broker := range mc.bootstrapBrokers { + grpcConnection, err := pb.GrpcDial(context.Background(), broker, mc.grpcDialOption) + if err != nil { + log.Printf("dial broker %s: %v", broker, err) + continue + } + defer grpcConnection.Close() + + err = fn(messaging_pb.NewSeaweedMessagingClient(grpcConnection)) + if err == nil { + return nil + } + lastErr = err + } + + return lastErr +} diff --git a/weed/messaging/msgclient/publisher.go b/weed/messaging/msgclient/publisher.go new file mode 100644 index 000000000..1aa483ff8 --- /dev/null +++ b/weed/messaging/msgclient/publisher.go @@ -0,0 +1,118 @@ +package msgclient + +import ( + "context" + + "github.com/OneOfOne/xxhash" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +type Publisher struct { + publishClients []messaging_pb.SeaweedMessaging_PublishClient + topicConfiguration *messaging_pb.TopicConfiguration + messageCount uint64 + publisherId string +} + +func (mc *MessagingClient) NewPublisher(publisherId, namespace, topic string) (*Publisher, error) { + // read topic configuration + topicConfiguration := &messaging_pb.TopicConfiguration{ + PartitionCount: 4, + } + publishClients := make([]messaging_pb.SeaweedMessaging_PublishClient, topicConfiguration.PartitionCount) + for i := 0; i < int(topicConfiguration.PartitionCount); i++ { + tp := broker.TopicPartition{ + Namespace: namespace, + Topic: topic, + Partition: int32(i), + } + grpcClientConn, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + client, err := setupPublisherClient(grpcClientConn, tp) + if err != nil { + return nil, err + } + publishClients[i] = client + } + return &Publisher{ + publishClients: publishClients, + topicConfiguration: topicConfiguration, + }, nil +} + +func setupPublisherClient(grpcConnection *grpc.ClientConn, tp broker.TopicPartition) (messaging_pb.SeaweedMessaging_PublishClient, error) { + + stream, err := messaging_pb.NewSeaweedMessagingClient(grpcConnection).Publish(context.Background()) + if err != nil { + return nil, err + } + + // send init message + err = stream.Send(&messaging_pb.PublishRequest{ + Init: &messaging_pb.PublishRequest_InitMessage{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Partition: tp.Partition, + }, + }) + if err != nil { + return nil, err + } + + // process init response + initResponse, err := stream.Recv() + if err != nil { + return nil, err + } + if initResponse.Redirect != nil { + // TODO follow redirection + } + if initResponse.Config != nil { + } + + // setup looks for control messages + doneChan := make(chan error, 1) + go func() { + for { + in, err := stream.Recv() + if err != nil { + doneChan <- err + return + } + if in.Redirect != nil { + } + if in.Config != nil { + } + } + }() + + return stream, nil + +} + +func (p *Publisher) Publish(m *messaging_pb.Message) error { + hashValue := p.messageCount + p.messageCount++ + if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_NonNullKeyHash { + if m.Key != nil { + hashValue = xxhash.Checksum64(m.Key) + } + } else if p.topicConfiguration.Partitoning == messaging_pb.TopicConfiguration_KeyHash { + hashValue = xxhash.Checksum64(m.Key) + } else { + // round robin + } + + idx := int(hashValue) % len(p.publishClients) + if idx < 0 { + idx += len(p.publishClients) + } + return p.publishClients[idx].Send(&messaging_pb.PublishRequest{ + Data: m, + }) +} diff --git a/weed/messaging/msgclient/subscriber.go b/weed/messaging/msgclient/subscriber.go new file mode 100644 index 000000000..6c7dc1ab7 --- /dev/null +++ b/weed/messaging/msgclient/subscriber.go @@ -0,0 +1,120 @@ +package msgclient + +import ( + "context" + "io" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/messaging/broker" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" + "google.golang.org/grpc" +) + +type Subscriber struct { + subscriberClients []messaging_pb.SeaweedMessaging_SubscribeClient + subscriberCancels []context.CancelFunc + subscriberId string +} + +func (mc *MessagingClient) NewSubscriber(subscriberId, namespace, topic string, partitionId int, startTime time.Time) (*Subscriber, error) { + // read topic configuration + topicConfiguration := &messaging_pb.TopicConfiguration{ + PartitionCount: 4, + } + subscriberClients := make([]messaging_pb.SeaweedMessaging_SubscribeClient, topicConfiguration.PartitionCount) + subscriberCancels := make([]context.CancelFunc, topicConfiguration.PartitionCount) + + for i := 0; i < int(topicConfiguration.PartitionCount); i++ { + if partitionId >= 0 && i != partitionId { + continue + } + tp := broker.TopicPartition{ + Namespace: namespace, + Topic: topic, + Partition: int32(i), + } + grpcClientConn, err := mc.findBroker(tp) + if err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + client, err := setupSubscriberClient(ctx, grpcClientConn, tp, subscriberId, startTime) + if err != nil { + return nil, err + } + subscriberClients[i] = client + subscriberCancels[i] = cancel + } + + return &Subscriber{ + subscriberClients: subscriberClients, + subscriberCancels: subscriberCancels, + subscriberId: subscriberId, + }, nil +} + +func setupSubscriberClient(ctx context.Context, grpcConnection *grpc.ClientConn, tp broker.TopicPartition, subscriberId string, startTime time.Time) (stream messaging_pb.SeaweedMessaging_SubscribeClient, err error) { + stream, err = messaging_pb.NewSeaweedMessagingClient(grpcConnection).Subscribe(ctx) + if err != nil { + return + } + + // send init message + err = stream.Send(&messaging_pb.SubscriberMessage{ + Init: &messaging_pb.SubscriberMessage_InitMessage{ + Namespace: tp.Namespace, + Topic: tp.Topic, + Partition: tp.Partition, + StartPosition: messaging_pb.SubscriberMessage_InitMessage_TIMESTAMP, + TimestampNs: startTime.UnixNano(), + SubscriberId: subscriberId, + }, + }) + if err != nil { + return + } + + return stream, nil +} + +func doSubscribe(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient, processFn func(m *messaging_pb.Message)) error { + for { + resp, listenErr := subscriberClient.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + println(listenErr.Error()) + return listenErr + } + if resp.Data == nil { + // this could be heartbeat from broker + continue + } + processFn(resp.Data) + } +} + +// Subscribe starts goroutines to process the messages +func (s *Subscriber) Subscribe(processFn func(m *messaging_pb.Message)) { + var wg sync.WaitGroup + for i := 0; i < len(s.subscriberClients); i++ { + if s.subscriberClients[i] != nil { + wg.Add(1) + go func(subscriberClient messaging_pb.SeaweedMessaging_SubscribeClient) { + defer wg.Done() + doSubscribe(subscriberClient, processFn) + }(s.subscriberClients[i]) + } + } + wg.Wait() +} + +func (s *Subscriber) Shutdown() { + for i := 0; i < len(s.subscriberClients); i++ { + if s.subscriberCancels[i] != nil { + s.subscriberCancels[i]() + } + } +} diff --git a/weed/notification/aws_sqs/aws_sqs_pub.go b/weed/notification/aws_sqs/aws_sqs_pub.go index 4c1302abb..d881049dd 100644 --- a/weed/notification/aws_sqs/aws_sqs_pub.go +++ b/weed/notification/aws_sqs/aws_sqs_pub.go @@ -27,14 +27,14 @@ func (k *AwsSqsPub) GetName() string { return "aws_sqs" } -func (k *AwsSqsPub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } diff --git a/weed/notification/configuration.go b/weed/notification/configuration.go index 7f8765cc3..541a453e9 100644 --- a/weed/notification/configuration.go +++ b/weed/notification/configuration.go @@ -4,14 +4,13 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/util" "github.com/golang/protobuf/proto" - "github.com/spf13/viper" ) type MessageQueue interface { // GetName gets the name to locate the configuration in filer.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error + Initialize(configuration util.Configuration, prefix string) error SendMessage(key string, message proto.Message) error } @@ -21,7 +20,7 @@ var ( Queue MessageQueue ) -func LoadConfiguration(config *viper.Viper) { +func LoadConfiguration(config *util.ViperProxy, prefix string) { if config == nil { return @@ -30,9 +29,8 @@ func LoadConfiguration(config *viper.Viper) { validateOneEnabledQueue(config) for _, queue := range MessageQueues { - if config.GetBool(queue.GetName() + ".enabled") { - viperSub := config.Sub(queue.GetName()) - if err := queue.Initialize(viperSub); err != nil { + if config.GetBool(prefix + queue.GetName() + ".enabled") { + if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil { glog.Fatalf("Failed to initialize notification for %s: %+v", queue.GetName(), err) } @@ -44,7 +42,7 @@ func LoadConfiguration(config *viper.Viper) { } -func validateOneEnabledQueue(config *viper.Viper) { +func validateOneEnabledQueue(config *util.ViperProxy) { enabledQueue := "" for _, queue := range MessageQueues { if config.GetBool(queue.GetName() + ".enabled") { diff --git a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go index ebf44ea6f..01c4d901f 100644 --- a/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go +++ b/weed/notification/gocdk_pub_sub/gocdk_pub_sub.go @@ -17,23 +17,34 @@ package gocdk_pub_sub import ( "context" "fmt" + "github.com/golang/protobuf/proto" + "github.com/streadway/amqp" + "gocloud.dev/pubsub" + _ "gocloud.dev/pubsub/awssnssqs" + "gocloud.dev/pubsub/rabbitpubsub" + "net/url" + "path" + "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/notification" "github.com/chrislusf/seaweedfs/weed/util" - "github.com/golang/protobuf/proto" - "gocloud.dev/pubsub" - _ "gocloud.dev/pubsub/awssnssqs" // _ "gocloud.dev/pubsub/azuresb" _ "gocloud.dev/pubsub/gcppubsub" _ "gocloud.dev/pubsub/natspubsub" _ "gocloud.dev/pubsub/rabbitpubsub" + "os" ) func init() { notification.MessageQueues = append(notification.MessageQueues, &GoCDKPubSub{}) } +func getPath(rawUrl string) string { + parsedUrl, _ := url.Parse(rawUrl) + return path.Join(parsedUrl.Host, parsedUrl.Path) +} + type GoCDKPubSub struct { topicURL string topic *pubsub.Topic @@ -43,14 +54,37 @@ func (k *GoCDKPubSub) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSub) Initialize(config util.Configuration) error { - k.topicURL = config.GetString("topic_url") +func (k *GoCDKPubSub) doReconnect() { + var conn *amqp.Connection + if k.topic.As(&conn) { + go func() { + <-conn.NotifyClose(make(chan *amqp.Error)) + conn.Close() + k.topic.Shutdown(context.Background()) + for { + glog.Info("Try reconnect") + conn, err := amqp.Dial(os.Getenv("RABBIT_SERVER_URL")) + if err == nil { + k.topic = rabbitpubsub.OpenTopic(conn, getPath(k.topicURL), nil) + k.doReconnect() + break + } + glog.Error(err) + time.Sleep(time.Second) + } + }() + } +} + +func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error { + k.topicURL = configuration.GetString(prefix + "topic_url") glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL) topic, err := pubsub.OpenTopic(context.Background(), k.topicURL) if err != nil { glog.Fatalf("Failed to open topic: %v", err) } k.topic = topic + k.doReconnect() return nil } @@ -59,8 +93,7 @@ func (k *GoCDKPubSub) SendMessage(key string, message proto.Message) error { if err != nil { return err } - ctx := context.Background() - err = k.topic.Send(ctx, &pubsub.Message{ + err = k.topic.Send(context.Background(), &pubsub.Message{ Body: bytes, Metadata: map[string]string{"key": key}, }) diff --git a/weed/notification/google_pub_sub/google_pub_sub.go b/weed/notification/google_pub_sub/google_pub_sub.go index 7b26bfe38..363a86eb6 100644 --- a/weed/notification/google_pub_sub/google_pub_sub.go +++ b/weed/notification/google_pub_sub/google_pub_sub.go @@ -25,13 +25,13 @@ func (k *GooglePubSub) GetName() string { return "google_pub_sub" } -func (k *GooglePubSub) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/notification/kafka/kafka_queue.go b/weed/notification/kafka/kafka_queue.go index fd545722b..8d83b5892 100644 --- a/weed/notification/kafka/kafka_queue.go +++ b/weed/notification/kafka/kafka_queue.go @@ -21,12 +21,12 @@ func (k *KafkaQueue) GetName() string { return "kafka" } -func (k *KafkaQueue) Initialize(configuration util.Configuration) (err error) { - glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) { + glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), ) } diff --git a/weed/notification/log/log_queue.go b/weed/notification/log/log_queue.go index dcc038dfc..1ca4786a1 100644 --- a/weed/notification/log/log_queue.go +++ b/weed/notification/log/log_queue.go @@ -18,7 +18,7 @@ func (k *LogQueue) GetName() string { return "log" } -func (k *LogQueue) Initialize(configuration util.Configuration) (err error) { +func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (err error) { return nil } diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go index 2dfa44483..ffd3e4938 100644 --- a/weed/operation/assign_file_id.go +++ b/weed/operation/assign_file_id.go @@ -3,11 +3,14 @@ package operation import ( "context" "fmt" + "strings" + + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "strings" ) type VolumeAssignRequest struct { @@ -15,6 +18,7 @@ type VolumeAssignRequest struct { Replication string Collection string Ttl string + DiskType string DataCenter string Rack string DataNode string @@ -30,7 +34,7 @@ type AssignResult struct { Auth security.EncodedJwt `json:"auth,omitempty"` } -func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { +func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { var requests []*VolumeAssignRequest requests = append(requests, primaryRequest) @@ -44,17 +48,18 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum continue } - lastError = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + lastError = WithMasterServerClient(masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.AssignRequest{ - Count: primaryRequest.Count, - Replication: primaryRequest.Replication, - Collection: primaryRequest.Collection, - Ttl: primaryRequest.Ttl, - DataCenter: primaryRequest.DataCenter, - Rack: primaryRequest.Rack, - DataNode: primaryRequest.DataNode, - WritableVolumeCount: primaryRequest.WritableVolumeCount, + Count: request.Count, + Replication: request.Replication, + Collection: request.Collection, + Ttl: request.Ttl, + DiskType: request.DiskType, + DataCenter: request.DataCenter, + Rack: request.Rack, + DataNode: request.DataNode, + WritableVolumeCount: request.WritableVolumeCount, } resp, grpcErr := masterClient.Assign(context.Background(), req) if grpcErr != nil { @@ -81,6 +86,7 @@ func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *Volum continue } + break } return ret, lastError @@ -99,3 +105,44 @@ func LookupJwt(master string, fileId string) security.EncodedJwt { return security.EncodedJwt(tokenStr) } + +type StorageOption struct { + Replication string + DiskType string + Collection string + DataCenter string + Rack string + TtlSeconds int32 + Fsync bool + VolumeGrowthCount uint32 +} + +func (so *StorageOption) TtlString() string { + return needle.SecondsToTTL(so.TtlSeconds) +} + +func (so *StorageOption) ToAssignRequests(count int) (ar *VolumeAssignRequest, altRequest *VolumeAssignRequest) { + ar = &VolumeAssignRequest{ + Count: uint64(count), + Replication: so.Replication, + Collection: so.Collection, + Ttl: so.TtlString(), + DiskType: so.DiskType, + DataCenter: so.DataCenter, + Rack: so.Rack, + WritableVolumeCount: so.VolumeGrowthCount, + } + if so.DataCenter != "" || so.Rack != "" { + altRequest = &VolumeAssignRequest{ + Count: uint64(count), + Replication: so.Replication, + Collection: so.Collection, + Ttl: so.TtlString(), + DiskType: so.DiskType, + DataCenter: "", + Rack: "", + WritableVolumeCount: so.VolumeGrowthCount, + } + } + return +} diff --git a/weed/operation/buffer_pool.go b/weed/operation/buffer_pool.go new file mode 100644 index 000000000..9cbe4787f --- /dev/null +++ b/weed/operation/buffer_pool.go @@ -0,0 +1,24 @@ +package operation + +import ( + "github.com/valyala/bytebufferpool" + "sync/atomic" +) + +var bufferCounter int64 + +func GetBuffer() *bytebufferpool.ByteBuffer { + defer func() { + atomic.AddInt64(&bufferCounter, 1) + // println("+", bufferCounter) + }() + return bytebufferpool.Get() +} + +func PutBuffer(buf *bytebufferpool.ByteBuffer) { + defer func() { + atomic.AddInt64(&bufferCounter, -1) + // println("-", bufferCounter) + }() + bytebufferpool.Put(buf) +} diff --git a/weed/operation/chunked_file.go b/weed/operation/chunked_file.go index 295204dd8..8506e0518 100644 --- a/weed/operation/chunked_file.go +++ b/weed/operation/chunked_file.go @@ -8,11 +8,10 @@ import ( "io/ioutil" "net/http" "sort" + "sync" "google.golang.org/grpc" - "sync" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -41,23 +40,24 @@ type ChunkManifest struct { // seekable chunked file reader type ChunkedFileReader struct { - Manifest *ChunkManifest - Master string - pos int64 - pr *io.PipeReader - pw *io.PipeWriter - mutex sync.Mutex + totalSize int64 + chunkList []*ChunkInfo + master string + pos int64 + pr *io.PipeReader + pw *io.PipeWriter + mutex sync.Mutex } func (s ChunkList) Len() int { return len(s) } func (s ChunkList) Less(i, j int) bool { return s[i].Offset < s[j].Offset } func (s ChunkList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func LoadChunkManifest(buffer []byte, isGzipped bool) (*ChunkManifest, error) { - if isGzipped { +func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error) { + if isCompressed { var err error - if buffer, err = util.UnGzipData(buffer); err != nil { - return nil, err + if buffer, err = util.DecompressData(buffer); err != nil { + glog.V(0).Infof("fail to decompress chunk manifest: %v", err) } } cm := ChunkManifest{} @@ -72,12 +72,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) { return json.Marshal(cm) } -func (cm *ChunkManifest) DeleteChunks(master string, grpcDialOption grpc.DialOption) error { +func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption) error { var fileIds []string for _, ci := range cm.Chunks { fileIds = append(fileIds, ci.Fid) } - results, err := DeleteFiles(master, grpcDialOption, fileIds) + results, err := DeleteFiles(masterFn, usePublicUrl, grpcDialOption, fileIds) if err != nil { glog.V(0).Infof("delete %+v: %v", fileIds, err) return fmt.Errorf("chunk delete: %v", err) @@ -126,16 +126,29 @@ func readChunkNeedle(fileUrl string, w io.Writer, offset int64) (written int64, return io.Copy(w, resp.Body) } +func NewChunkedFileReader(chunkList []*ChunkInfo, master string) *ChunkedFileReader { + var totalSize int64 + for _, chunk := range chunkList { + totalSize += chunk.Size + } + sort.Sort(ChunkList(chunkList)) + return &ChunkedFileReader{ + totalSize: totalSize, + chunkList: chunkList, + master: master, + } +} + func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) { var err error switch whence { - case 0: - case 1: + case io.SeekStart: + case io.SeekCurrent: offset += cf.pos - case 2: - offset = cf.Manifest.Size - offset + case io.SeekEnd: + offset = cf.totalSize + offset } - if offset > cf.Manifest.Size { + if offset > cf.totalSize { err = ErrInvalidRange } if cf.pos != offset { @@ -146,10 +159,9 @@ func (cf *ChunkedFileReader) Seek(offset int64, whence int) (int64, error) { } func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) { - cm := cf.Manifest chunkIndex := -1 chunkStartOffset := int64(0) - for i, ci := range cm.Chunks { + for i, ci := range cf.chunkList { if cf.pos >= ci.Offset && cf.pos < ci.Offset+ci.Size { chunkIndex = i chunkStartOffset = cf.pos - ci.Offset @@ -159,10 +171,12 @@ func (cf *ChunkedFileReader) WriteTo(w io.Writer) (n int64, err error) { if chunkIndex < 0 { return n, ErrInvalidRange } - for ; chunkIndex < cm.Chunks.Len(); chunkIndex++ { - ci := cm.Chunks[chunkIndex] + for ; chunkIndex < len(cf.chunkList); chunkIndex++ { + ci := cf.chunkList[chunkIndex] // if we need read date from local volume server first? - fileUrl, lookupError := LookupFileId(cf.Master, ci.Fid) + fileUrl, lookupError := LookupFileId(func() string { + return cf.master + }, ci.Fid) if lookupError != nil { return n, lookupError } diff --git a/weed/operation/delete_content.go b/weed/operation/delete_content.go index 358399324..8f87882b1 100644 --- a/weed/operation/delete_content.go +++ b/weed/operation/delete_content.go @@ -4,12 +4,12 @@ import ( "context" "errors" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "google.golang.org/grpc" "net/http" "strings" "sync" + + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" ) type DeleteResult struct { @@ -28,10 +28,18 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) { } // DeleteFiles batch deletes a list of fileIds -func DeleteFiles(master string, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { - - lookupFunc := func(vids []string) (map[string]LookupResult, error) { - return LookupVolumeIds(master, grpcDialOption, vids) +func DeleteFiles(masterFn GetMasterFn, usePublicUrl bool, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { + + lookupFunc := func(vids []string) (results map[string]LookupResult, err error) { + results, err = LookupVolumeIds(masterFn, grpcDialOption, vids) + if err == nil && usePublicUrl { + for _, result := range results { + for _, loc := range result.Locations { + loc.Url = loc.PublicUrl + } + } + } + return } return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc) @@ -92,9 +100,9 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str go func(server string, fidList []string) { defer wg.Done() - if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil { + if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList, true); deleteErr != nil { err = deleteErr - } else { + } else if deleteResults != nil { resultChan <- deleteResults } @@ -107,18 +115,17 @@ func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []str ret = append(ret, result...) } - glog.V(1).Infof("deleted %d items", len(ret)) - return ret, err } // DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc -func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { +func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string, includeCookie bool) (ret []*volume_server_pb.DeleteResult, err error) { err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { req := &volume_server_pb.BatchDeleteRequest{ - FileIds: fileIds, + FileIds: fileIds, + SkipCookieCheck: !includeCookie, } resp, err := volumeServerClient.BatchDelete(context.Background(), req) diff --git a/weed/operation/grpc_client.go b/weed/operation/grpc_client.go index f6b2b69e9..025a65b38 100644 --- a/weed/operation/grpc_client.go +++ b/weed/operation/grpc_client.go @@ -1,27 +1,27 @@ package operation import ( - "context" "fmt" + "strconv" + "strings" + + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "strconv" - "strings" ) func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error { - ctx := context.Background() - grpcAddress, err := toVolumeServerGrpcAddress(volumeServer) if err != nil { - return err + return fmt.Errorf("failed to parse volume server %v: %v", volumeServer, err) } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := volume_server_pb.NewVolumeServerClient(grpcConnection) return fn(client) }, grpcAddress, grpcDialOption) @@ -40,16 +40,28 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err func WithMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error { - ctx := context.Background() - - masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer) + masterGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(masterServer) if parseErr != nil { - return fmt.Errorf("failed to parse master grpc %v: %v", masterServer, parseErr) + return fmt.Errorf("failed to parse master %v: %v", masterServer, parseErr) } - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := master_pb.NewSeaweedClient(grpcConnection) return fn(client) }, masterGrpcAddress, grpcDialOption) } + +func WithFilerServerClient(filerServer string, grpcDialOption grpc.DialOption, fn func(masterClient filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress, parseErr := pb.ParseServerToGrpcAddress(filerServer) + if parseErr != nil { + return fmt.Errorf("failed to parse filer %v: %v", filerGrpcAddress, parseErr) + } + + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, filerGrpcAddress, grpcDialOption) + +} diff --git a/weed/operation/lookup.go b/weed/operation/lookup.go index d0773e7fd..0372e47b0 100644 --- a/weed/operation/lookup.go +++ b/weed/operation/lookup.go @@ -33,10 +33,10 @@ var ( vc VidCache // caching of volume locations, re-check if after 10 minutes ) -func Lookup(server string, vid string) (ret *LookupResult, err error) { +func Lookup(masterFn GetMasterFn, vid string) (ret *LookupResult, err error) { locations, cache_err := vc.Get(vid) if cache_err != nil { - if ret, err = do_lookup(server, vid); err == nil { + if ret, err = do_lookup(masterFn, vid); err == nil { vc.Set(vid, ret.Locations, 10*time.Minute) } } else { @@ -45,9 +45,10 @@ func Lookup(server string, vid string) (ret *LookupResult, err error) { return } -func do_lookup(server string, vid string) (*LookupResult, error) { +func do_lookup(masterFn GetMasterFn, vid string) (*LookupResult, error) { values := make(url.Values) values.Add("volumeId", vid) + server := masterFn() jsonBlob, err := util.Post("http://"+server+"/dir/lookup", values) if err != nil { return nil, err @@ -63,12 +64,12 @@ func do_lookup(server string, vid string) (*LookupResult, error) { return &ret, nil } -func LookupFileId(server string, fileId string) (fullUrl string, err error) { +func LookupFileId(masterFn GetMasterFn, fileId string) (fullUrl string, err error) { parts := strings.Split(fileId, ",") if len(parts) != 2 { return "", errors.New("Invalid fileId " + fileId) } - lookup, lookupError := Lookup(server, parts[0]) + lookup, lookupError := Lookup(masterFn, parts[0]) if lookupError != nil { return "", lookupError } @@ -79,7 +80,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) { } // LookupVolumeIds find volume locations by cache and actual lookup -func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) { +func LookupVolumeIds(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) { ret := make(map[string]LookupResult) var unknown_vids []string @@ -99,7 +100,7 @@ func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []strin //only query unknown_vids - err := WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + err := WithMasterServerClient(masterFn(), grpcDialOption, func(masterClient master_pb.SeaweedClient) error { req := &master_pb.LookupVolumeRequest{ VolumeIds: unknown_vids, diff --git a/weed/operation/needle_parse_test.go b/weed/operation/needle_parse_test.go new file mode 100644 index 000000000..202374e1b --- /dev/null +++ b/weed/operation/needle_parse_test.go @@ -0,0 +1,131 @@ +package operation + +import ( + "bytes" + "fmt" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +type MockClient struct { + needleHandling func(n *needle.Needle, originalSize int, e error) +} + +func (m *MockClient) Do(req *http.Request) (*http.Response, error) { + n, originalSize, _, err := needle.CreateNeedleFromRequest(req, false, 1024*1024) + if m.needleHandling != nil { + m.needleHandling(n, originalSize, err) + } + return &http.Response{ + StatusCode: http.StatusNoContent, + }, io.EOF +} + +/* + +The mime type is always the value passed in. + +Compress or not depends on the content detection, file name extension, and compression ratio. + +If the content is already compressed, need to know the content size. + +*/ + +func TestCreateNeedleFromRequest(t *testing.T) { + mc := &MockClient{} + tmp := HttpClient + HttpClient = mc + defer func() { + HttpClient = tmp + }() + + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, true, n.IsCompressed(), "this should be compressed") + assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip") + fmt.Printf("needle: %v, originalSize: %d\n", n, originalSize) + } + uploadResult, err, data := Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader([]byte(textContent)), false, "", nil, "") + if len(data) != len(textContent) { + t.Errorf("data actual %d expected %d", len(data), len(textContent)) + } + if err != nil { + fmt.Printf("err: %v\n", err) + } + fmt.Printf("uploadResult: %+v\n", uploadResult) + } + + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, true, n.IsCompressed(), "this should be compressed") + assert.Equal(t, true, util.IsGzippedContent(n.Data), "this should be gzip") + fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize) + } + gzippedData, _ := util.GzipData([]byte(textContent)) + Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(gzippedData), true, "text/plain", nil, "") + } + + /* + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "text/plain", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, true, n.IsCompressed(), "this should be compressed") + assert.Equal(t, true, util.IsZstdContent(n.Data), "this should be zstd") + fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize) + } + zstdData, _ := util.ZstdData([]byte(textContent)) + Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(zstdData), true, "text/plain", nil, "") + } + + { + mc.needleHandling = func(n *needle.Needle, originalSize int, err error) { + assert.Equal(t, nil, err, "upload: %v", err) + assert.Equal(t, "application/zstd", string(n.Mime), "mime detection failed: %v", string(n.Mime)) + assert.Equal(t, false, n.IsCompressed(), "this should not be compressed") + assert.Equal(t, true, util.IsZstdContent(n.Data), "this should still be zstd") + fmt.Printf("needle: %v, dataSize:%d originalSize:%d\n", n, len(n.Data), originalSize) + } + zstdData, _ := util.ZstdData([]byte(textContent)) + Upload("http://localhost:8080/389,0f084d17353afda0", "t.txt", false, bytes.NewReader(zstdData), false, "application/zstd", nil, "") + } + */ + +} + +var textContent = `Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +` diff --git a/weed/operation/stats.go b/weed/operation/stats.go deleted file mode 100644 index b69a33750..000000000 --- a/weed/operation/stats.go +++ /dev/null @@ -1,26 +0,0 @@ -package operation - -import ( - "context" - "google.golang.org/grpc" - - "github.com/chrislusf/seaweedfs/weed/pb/master_pb" -) - -func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { - - err = WithMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - - grpcResponse, grpcErr := masterClient.Statistics(context.Background(), req) - if grpcErr != nil { - return grpcErr - } - - resp = grpcResponse - - return nil - - }) - - return -} diff --git a/weed/operation/submit.go b/weed/operation/submit.go index 62f067430..87c5e4279 100644 --- a/weed/operation/submit.go +++ b/weed/operation/submit.go @@ -1,8 +1,6 @@ package operation import ( - "bytes" - "google.golang.org/grpc" "io" "mime" "net/url" @@ -11,6 +9,8 @@ import ( "strconv" "strings" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/security" ) @@ -25,20 +25,23 @@ type FilePart struct { Collection string DataCenter string Ttl string + DiskType string Server string //this comes from assign result Fid string //this comes from assign result, but customizable + Fsync bool } type SubmitResult struct { FileName string `json:"fileName,omitempty"` - FileUrl string `json:"fileUrl,omitempty"` + FileUrl string `json:"url,omitempty"` Fid string `json:"fid,omitempty"` Size uint32 `json:"size,omitempty"` Error string `json:"error,omitempty"` } -func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart, - replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) { +type GetMasterFn func() string + +func SubmitFiles(masterFn GetMasterFn, grpcDialOption grpc.DialOption, files []FilePart, replication string, collection string, dataCenter string, ttl string, diskType string, maxMB int, usePublicUrl bool) ([]SubmitResult, error) { results := make([]SubmitResult, len(files)) for index, file := range files { results[index].FileName = file.FileName @@ -49,10 +52,11 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart Collection: collection, DataCenter: dataCenter, Ttl: ttl, + DiskType: diskType, } - ret, err := Assign(master, grpcDialOption, ar) + ret, err := Assign(masterFn, grpcDialOption, ar) if err != nil { - for index, _ := range files { + for index := range files { results[index].Error = err.Error() } return results, err @@ -63,10 +67,15 @@ func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart file.Fid = file.Fid + "_" + strconv.Itoa(index) } file.Server = ret.Url + if usePublicUrl { + file.Server = ret.PublicUrl + } file.Replication = replication file.Collection = collection file.DataCenter = dataCenter - results[index].Size, err = file.Upload(maxMB, master, ret.Auth, grpcDialOption) + file.Ttl = ttl + file.DiskType = diskType + results[index].Size, err = file.Upload(maxMB, masterFn, usePublicUrl, ret.Auth, grpcDialOption) if err != nil { results[index].Error = err.Error() } @@ -109,11 +118,14 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) { return ret, nil } -func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { +func (fi FilePart) Upload(maxMB int, masterFn GetMasterFn, usePublicUrl bool, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) { fileUrl := "http://" + fi.Server + "/" + fi.Fid if fi.ModTime != 0 { fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime)) } + if fi.Fsync { + fileUrl += "?fsync=true" + } if closer, ok := fi.Reader.(io.Closer); ok { defer closer.Close() } @@ -136,8 +148,9 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp Replication: fi.Replication, Collection: fi.Collection, Ttl: fi.Ttl, + DiskType: fi.DiskType, } - ret, err = Assign(master, grpcDialOption, ar) + ret, err = Assign(masterFn, grpcDialOption, ar) if err != nil { return } @@ -149,11 +162,12 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp Replication: fi.Replication, Collection: fi.Collection, Ttl: fi.Ttl, + DiskType: fi.DiskType, } - ret, err = Assign(master, grpcDialOption, ar) + ret, err = Assign(masterFn, grpcDialOption, ar) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master, grpcDialOption) + cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption) return } id = ret.Fid @@ -164,14 +178,17 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp } } fileUrl := "http://" + ret.Url + "/" + id + if usePublicUrl { + fileUrl = "http://" + ret.PublicUrl + "/" + id + } count, e := upload_one_chunk( baseName+"-"+strconv.FormatInt(i+1, 10), io.LimitReader(fi.Reader, chunkSize), - master, fileUrl, + masterFn, fileUrl, ret.Auth) if e != nil { // delete all uploaded chunks - cm.DeleteChunks(master, grpcDialOption) + cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption) return 0, e } cm.Chunks = append(cm.Chunks, @@ -186,10 +203,10 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp err = upload_chunked_file_manifest(fileUrl, &cm, jwt) if err != nil { // delete all uploaded chunks - cm.DeleteChunks(master, grpcDialOption) + cm.DeleteChunks(masterFn, usePublicUrl, grpcDialOption) } } else { - ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt) + ret, e, _ := Upload(fileUrl, baseName, false, fi.Reader, false, fi.MimeType, nil, jwt) if e != nil { return 0, e } @@ -198,12 +215,11 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grp return } -func upload_one_chunk(filename string, reader io.Reader, master, +func upload_one_chunk(filename string, reader io.Reader, masterFn GetMasterFn, fileUrl string, jwt security.EncodedJwt, ) (size uint32, e error) { glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...") - uploadResult, uploadError := Upload(fileUrl, filename, reader, false, - "", nil, jwt) + uploadResult, uploadError, _ := Upload(fileUrl, filename, false, reader, false, "", nil, jwt) if uploadError != nil { return 0, uploadError } @@ -215,12 +231,11 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s if e != nil { return e } - bufReader := bytes.NewReader(buf) glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...") u, _ := url.Parse(fileUrl) q := u.Query() q.Set("cm", "true") u.RawQuery = q.Encode() - _, e = Upload(u.String(), manifest.Name, bufReader, false, "application/json", nil, jwt) + _, e = UploadData(u.String(), manifest.Name, false, buf, false, "application/json", nil, jwt) return e } diff --git a/weed/operation/tail_volume.go b/weed/operation/tail_volume.go index b53f18ce1..045948274 100644 --- a/weed/operation/tail_volume.go +++ b/weed/operation/tail_volume.go @@ -5,14 +5,15 @@ import ( "fmt" "io" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb" "github.com/chrislusf/seaweedfs/weed/storage/needle" - "google.golang.org/grpc" ) -func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error { +func TailVolume(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, timeoutSeconds int, fn func(n *needle.Needle) error) error { // find volume location, replication, ttl info - lookup, err := Lookup(master, vid.String()) + lookup, err := Lookup(masterFn, vid.String()) if err != nil { return fmt.Errorf("look up volume %d: %v", vid, err) } @@ -27,8 +28,10 @@ func TailVolume(master string, grpcDialOption grpc.DialOption, vid needle.Volume func TailVolumeFromSource(volumeServer string, grpcDialOption grpc.DialOption, vid needle.VolumeId, sinceNs uint64, idleTimeoutSeconds int, fn func(n *needle.Needle) error) error { return WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - stream, err := client.VolumeTailSender(context.Background(), &volume_server_pb.VolumeTailSenderRequest{ + stream, err := client.VolumeTailSender(ctx, &volume_server_pb.VolumeTailSenderRequest{ VolumeId: uint32(vid), SinceNs: sinceNs, IdleTimeoutSeconds: uint32(idleTimeoutSeconds), diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go index c387d0230..944186eeb 100644 --- a/weed/operation/upload_content.go +++ b/weed/operation/upload_content.go @@ -2,10 +2,7 @@ package operation import ( "bytes" - "compress/flate" - "compress/gzip" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -15,73 +12,188 @@ import ( "net/textproto" "path/filepath" "strings" + "time" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/util" ) type UploadResult struct { - Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` - Error string `json:"error,omitempty"` - ETag string `json:"eTag,omitempty"` + Name string `json:"name,omitempty"` + Size uint32 `json:"size,omitempty"` + Error string `json:"error,omitempty"` + ETag string `json:"eTag,omitempty"` + CipherKey []byte `json:"cipherKey,omitempty"` + Mime string `json:"mime,omitempty"` + Gzip uint32 `json:"gzip,omitempty"` + ContentMd5 string `json:"contentMd5,omitempty"` + RetryCount int `json:"-"` +} + +func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *filer_pb.FileChunk { + fid, _ := filer_pb.ToFileIdObject(fileId) + return &filer_pb.FileChunk{ + FileId: fileId, + Offset: offset, + Size: uint64(uploadResult.Size), + Mtime: time.Now().UnixNano(), + ETag: uploadResult.ETag, + CipherKey: uploadResult.CipherKey, + IsCompressed: uploadResult.Gzip > 0, + Fid: fid, + } +} + +// HTTPClient interface for testing +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) } var ( - client *http.Client + HttpClient HTTPClient ) func init() { - client = &http.Client{Transport: &http.Transport{ + HttpClient = &http.Client{Transport: &http.Transport{ + MaxIdleConns: 1024, MaxIdleConnsPerHost: 1024, }} } -var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") +var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`) // Upload sends a POST request to a volume server to upload the content with adjustable compression level -func UploadWithLocalCompressionLevel(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt, compressionLevel int) (*UploadResult, error) { - if compressionLevel < 1 { - compressionLevel = 1 - } - if compressionLevel > 9 { - compressionLevel = 9 - } - return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, compressionLevel, jwt) +func UploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + uploadResult, err = retriedUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) + return } // Upload sends a POST request to a volume server to upload the content with fast compression -func Upload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - return doUpload(uploadUrl, filename, reader, isGzipped, mtype, pairMap, flate.BestSpeed, jwt) +func Upload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) { + uploadResult, err, data = doUpload(uploadUrl, filename, cipher, reader, isInputCompressed, mtype, pairMap, jwt) + return +} + +func doUpload(uploadUrl string, filename string, cipher bool, reader io.Reader, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error, data []byte) { + bytesReader, ok := reader.(*util.BytesReader) + if ok { + data = bytesReader.Bytes + } else { + data, err = ioutil.ReadAll(reader) + if err != nil { + err = fmt.Errorf("read input: %v", err) + return + } + } + uploadResult, uploadErr := retriedUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) + return uploadResult, uploadErr, data } -func doUpload(uploadUrl string, filename string, reader io.Reader, isGzipped bool, mtype string, pairMap map[string]string, compression int, jwt security.EncodedJwt) (*UploadResult, error) { - contentIsGzipped := isGzipped +func retriedUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + for i := 0; i < 3; i++ { + uploadResult, err = doUploadData(uploadUrl, filename, cipher, data, isInputCompressed, mtype, pairMap, jwt) + if err == nil { + uploadResult.RetryCount = i + return + } else { + glog.Warningf("uploading to %s: %v", uploadUrl, err) + } + time.Sleep(time.Millisecond * time.Duration(237*(i+1))) + } + return +} + +func doUploadData(uploadUrl string, filename string, cipher bool, data []byte, isInputCompressed bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (uploadResult *UploadResult, err error) { + contentIsGzipped := isInputCompressed shouldGzipNow := false - if !isGzipped { - if shouldBeZipped, iAmSure := util.IsGzippableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeZipped { + if !isInputCompressed { + if mtype == "" { + mtype = http.DetectContentType(data) + // println("detect1 mimetype to", mtype) + if mtype == "application/octet-stream" { + mtype = "" + } + } + if shouldBeCompressed, iAmSure := util.IsCompressableFileType(filepath.Base(filename), mtype); iAmSure && shouldBeCompressed { shouldGzipNow = true + } else if !iAmSure && mtype == "" && len(data) > 16*1024 { + var compressed []byte + compressed, err = util.GzipData(data[0:128]) + shouldGzipNow = len(compressed)*10 < 128*9 // can not compress to less than 90% + } + } + + var clearDataLen int + + // gzip if possible + // this could be double copying + clearDataLen = len(data) + clearData := data + if shouldGzipNow && !cipher { + compressed, compressErr := util.GzipData(data) + // fmt.Printf("data is compressed from %d ==> %d\n", len(data), len(compressed)) + if compressErr == nil { + data = compressed contentIsGzipped = true } + } else if isInputCompressed { + // just to get the clear data length + clearData, err = util.DecompressData(data) + if err == nil { + clearDataLen = len(clearData) + } } - return upload_content(uploadUrl, func(w io.Writer) (err error) { - if shouldGzipNow { - gzWriter, _ := gzip.NewWriterLevel(w, compression) - _, err = io.Copy(gzWriter, reader) - gzWriter.Close() - } else { - _, err = io.Copy(w, reader) + + if cipher { + // encrypt(gzip(data)) + + // encrypt + cipherKey := util.GenCipherKey() + encryptedData, encryptionErr := util.Encrypt(clearData, cipherKey) + if encryptionErr != nil { + err = fmt.Errorf("encrypt input: %v", encryptionErr) + return } - return - }, filename, contentIsGzipped, mtype, pairMap, jwt) + + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(encryptedData) + return + }, "", false, len(encryptedData), "", nil, jwt) + if uploadResult == nil { + return + } + uploadResult.Name = filename + uploadResult.Mime = mtype + uploadResult.CipherKey = cipherKey + uploadResult.Size = uint32(clearDataLen) + } else { + // upload data + uploadResult, err = upload_content(uploadUrl, func(w io.Writer) (err error) { + _, err = w.Write(data) + return + }, filename, contentIsGzipped, len(data), mtype, pairMap, jwt) + if uploadResult == nil { + return + } + uploadResult.Size = uint32(clearDataLen) + if contentIsGzipped { + uploadResult.Gzip = 1 + } + } + + return uploadResult, err } -func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { - body_buf := bytes.NewBufferString("") - body_writer := multipart.NewWriter(body_buf) +func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error, filename string, isGzipped bool, originalDataSize int, mtype string, pairMap map[string]string, jwt security.EncodedJwt) (*UploadResult, error) { + buf := GetBuffer() + defer PutBuffer(buf) + body_writer := multipart.NewWriter(buf) h := make(textproto.MIMEHeader) h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, fileNameEscaper.Replace(filename))) + h.Set("Idempotency-Key", uploadUrl) if mtype == "" { mtype = mime.TypeByExtension(strings.ToLower(filepath.Ext(filename))) } @@ -107,10 +219,10 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error return nil, err } - req, postErr := http.NewRequest("POST", uploadUrl, body_buf) + req, postErr := http.NewRequest("POST", uploadUrl, bytes.NewReader(buf.Bytes())) if postErr != nil { - glog.V(0).Infoln("failing to upload to", uploadUrl, postErr.Error()) - return nil, postErr + glog.V(1).Infof("create upload request %s: %v", uploadUrl, postErr) + return nil, fmt.Errorf("create upload request %s: %v", uploadUrl, postErr) } req.Header.Set("Content-Type", content_type) for k, v := range pairMap { @@ -119,27 +231,42 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error if jwt != "" { req.Header.Set("Authorization", "BEARER "+string(jwt)) } - resp, post_err := client.Do(req) + // print("+") + resp, post_err := HttpClient.Do(req) if post_err != nil { - glog.V(0).Infoln("failing to upload to", uploadUrl, post_err.Error()) - return nil, post_err + if strings.Contains(post_err.Error(), "connection reset by peer") || + strings.Contains(post_err.Error(), "use of closed network connection") { + resp, post_err = HttpClient.Do(req) + } } - defer resp.Body.Close() + if post_err != nil { + return nil, fmt.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err) + } + // print("-") + defer util.CloseResponse(resp) + + var ret UploadResult etag := getEtag(resp) + if resp.StatusCode == http.StatusNoContent { + ret.ETag = etag + return &ret, nil + } + resp_body, ra_err := ioutil.ReadAll(resp.Body) if ra_err != nil { - return nil, ra_err + return nil, fmt.Errorf("read response body %v: %v", uploadUrl, ra_err) } - var ret UploadResult + unmarshal_err := json.Unmarshal(resp_body, &ret) if unmarshal_err != nil { - glog.V(0).Infoln("failing to read upload response", uploadUrl, string(resp_body)) - return nil, unmarshal_err + glog.Errorf("unmarshal %s: %v", uploadUrl, string(resp_body)) + return nil, fmt.Errorf("unmarshal %v: %v", uploadUrl, unmarshal_err) } if ret.Error != "" { - return nil, errors.New(ret.Error) + return nil, fmt.Errorf("unmarshalled error %v: %v", uploadUrl, ret.Error) } ret.ETag = etag + ret.ContentMd5 = resp.Header.Get("Content-MD5") return &ret, nil } diff --git a/weed/pb/Makefile b/weed/pb/Makefile index c50410574..d2618937b 100644 --- a/weed/pb/Makefile +++ b/weed/pb/Makefile @@ -3,8 +3,10 @@ all: gen .PHONY : gen gen: - protoc master.proto --go_out=plugins=grpc:./master_pb - protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb - protoc filer.proto --go_out=plugins=grpc:./filer_pb + protoc master.proto --go_out=plugins=grpc:./master_pb --go_opt=paths=source_relative + protoc volume_server.proto --go_out=plugins=grpc:./volume_server_pb --go_opt=paths=source_relative + protoc filer.proto --go_out=plugins=grpc:./filer_pb --go_opt=paths=source_relative + protoc iam.proto --go_out=plugins=grpc:./iam_pb --go_opt=paths=source_relative + protoc messaging.proto --go_out=plugins=grpc:./messaging_pb --go_opt=paths=source_relative # protoc filer.proto --java_out=../../other/java/client/src/main/java cp filer.proto ../../other/java/client/src/main/proto diff --git a/weed/pb/filer.proto b/weed/pb/filer.proto index ef847cbe7..ac4c9a0e7 100644 --- a/weed/pb/filer.proto +++ b/weed/pb/filer.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package filer_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"; option java_package = "seaweedfs.client"; option java_outer_classname = "FilerProto"; @@ -21,6 +22,9 @@ service SeaweedFiler { rpc UpdateEntry (UpdateEntryRequest) returns (UpdateEntryResponse) { } + rpc AppendToEntry (AppendToEntryRequest) returns (AppendToEntryResponse) { + } + rpc DeleteEntry (DeleteEntryRequest) returns (DeleteEntryResponse) { } @@ -33,6 +37,9 @@ service SeaweedFiler { rpc LookupVolume (LookupVolumeRequest) returns (LookupVolumeResponse) { } + rpc CollectionList (CollectionListRequest) returns (CollectionListResponse) { + } + rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) { } @@ -42,6 +49,24 @@ service SeaweedFiler { rpc GetFilerConfiguration (GetFilerConfigurationRequest) returns (GetFilerConfigurationResponse) { } + rpc SubscribeMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { + } + + rpc SubscribeLocalMetadata (SubscribeMetadataRequest) returns (stream SubscribeMetadataResponse) { + } + + rpc KeepConnected (stream KeepConnectedRequest) returns (stream KeepConnectedResponse) { + } + + rpc LocateBroker (LocateBrokerRequest) returns (LocateBrokerResponse) { + } + + rpc KvGet (KvGetRequest) returns (KvGetResponse) { + } + + rpc KvPut (KvPutRequest) returns (KvPutResponse) { + } + } ////////////////////////////////////////////////// @@ -73,6 +98,9 @@ message Entry { repeated FileChunk chunks = 3; FuseAttributes attributes = 4; map extended = 5; + bytes hard_link_id = 7; + int32 hard_link_counter = 8; // only exists in hard link meta data + bytes content = 9; // if not empty, the file content } message FullEntry { @@ -85,6 +113,8 @@ message EventNotification { Entry new_entry = 2; bool delete_chunks = 3; string new_parent_path = 4; + bool is_from_other_cluster = 5; + repeated int32 signatures = 6; } message FileChunk { @@ -96,6 +126,13 @@ message FileChunk { string source_file_id = 6; // to be deprecated FileId fid = 7; FileId source_fid = 8; + bytes cipher_key = 9; + bool is_compressed = 10; + bool is_chunk_manifest = 11; // content is a list of FileChunks +} + +message FileChunkManifest { + repeated FileChunk chunks = 1; } message FileId { @@ -118,23 +155,39 @@ message FuseAttributes { string user_name = 11; // for hdfs repeated string group_name = 12; // for hdfs string symlink_target = 13; + bytes md5 = 14; + string disk_type = 15; } message CreateEntryRequest { string directory = 1; Entry entry = 2; + bool o_excl = 3; + bool is_from_other_cluster = 4; + repeated int32 signatures = 5; } message CreateEntryResponse { + string error = 1; } message UpdateEntryRequest { string directory = 1; Entry entry = 2; + bool is_from_other_cluster = 3; + repeated int32 signatures = 4; } message UpdateEntryResponse { } +message AppendToEntryRequest { + string directory = 1; + string entry_name = 2; + repeated FileChunk chunks = 3; +} +message AppendToEntryResponse { +} + message DeleteEntryRequest { string directory = 1; string name = 2; @@ -142,9 +195,12 @@ message DeleteEntryRequest { bool is_delete_data = 4; bool is_recursive = 5; bool ignore_recursive_error = 6; + bool is_from_other_cluster = 7; + repeated int32 signatures = 8; } message DeleteEntryResponse { + string error = 1; } message AtomicRenameEntryRequest { @@ -163,6 +219,9 @@ message AssignVolumeRequest { string replication = 3; int32 ttl_sec = 4; string data_center = 5; + string path = 6; + string rack = 7; + string disk_type = 8; } message AssignVolumeResponse { @@ -171,6 +230,9 @@ message AssignVolumeResponse { string public_url = 3; int32 count = 4; string auth = 5; + string collection = 6; + string replication = 7; + string error = 8; } message LookupVolumeRequest { @@ -189,6 +251,16 @@ message LookupVolumeResponse { map locations_map = 1; } +message Collection { + string name = 1; +} +message CollectionListRequest { + bool include_normal_volumes = 1; + bool include_ec_volumes = 2; +} +message CollectionListResponse { + repeated Collection collections = 1; +} message DeleteCollectionRequest { string collection = 1; } @@ -200,11 +272,9 @@ message StatisticsRequest { string replication = 1; string collection = 2; string ttl = 3; + string disk_type = 4; } message StatisticsResponse { - string replication = 1; - string collection = 2; - string ttl = 3; uint64 total_size = 4; uint64 used_size = 5; uint64 file_count = 6; @@ -217,4 +287,80 @@ message GetFilerConfigurationResponse { string replication = 2; string collection = 3; uint32 max_mb = 4; + string dir_buckets = 5; + bool cipher = 7; + int32 signature = 8; + string metrics_address = 9; + int32 metrics_interval_sec = 10; +} + +message SubscribeMetadataRequest { + string client_name = 1; + string path_prefix = 2; + int64 since_ns = 3; + int32 signature = 4; +} +message SubscribeMetadataResponse { + string directory = 1; + EventNotification event_notification = 2; + int64 ts_ns = 3; +} + +message LogEntry { + int64 ts_ns = 1; + int32 partition_key_hash = 2; + bytes data = 3; +} + +message KeepConnectedRequest { + string name = 1; + uint32 grpc_port = 2; + repeated string resources = 3; +} +message KeepConnectedResponse { +} + +message LocateBrokerRequest { + string resource = 1; +} +message LocateBrokerResponse { + bool found = 1; + // if found, send the exact address + // if not found, send the full list of existing brokers + message Resource { + string grpc_addresses = 1; + int32 resource_count = 2; + } + repeated Resource resources = 2; +} + +// Key-Value operations +message KvGetRequest { + bytes key = 1; +} +message KvGetResponse { + bytes value = 1; + string error = 2; +} +message KvPutRequest { + bytes key = 1; + bytes value = 2; +} +message KvPutResponse { + string error = 1; +} + +// path-based configurations +message FilerConf { + int32 version = 1; + message PathConf { + string location_prefix = 1; + string collection = 2; + string replication = 3; + string ttl = 4; + string disk_type = 5; + bool fsync = 6; + uint32 volume_growth_count = 7; + } + repeated PathConf locations = 2; } diff --git a/weed/pb/filer_pb/filer.pb.go b/weed/pb/filer_pb/filer.pb.go index c8214aa94..902c39514 100644 --- a/weed/pb/filer_pb/filer.pb.go +++ b/weed/pb/filer_pb/filer.pb.go @@ -1,1064 +1,4535 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 // source: filer.proto -// DO NOT EDIT! - -/* -Package filer_pb is a generated protocol buffer package. - -It is generated from these files: - filer.proto - -It has these top-level messages: - LookupDirectoryEntryRequest - LookupDirectoryEntryResponse - ListEntriesRequest - ListEntriesResponse - Entry - FullEntry - EventNotification - FileChunk - FileId - FuseAttributes - CreateEntryRequest - CreateEntryResponse - UpdateEntryRequest - UpdateEntryResponse - DeleteEntryRequest - DeleteEntryResponse - AtomicRenameEntryRequest - AtomicRenameEntryResponse - AssignVolumeRequest - AssignVolumeResponse - LookupVolumeRequest - Locations - Location - LookupVolumeResponse - DeleteCollectionRequest - DeleteCollectionResponse - StatisticsRequest - StatisticsResponse - GetFilerConfigurationRequest - GetFilerConfigurationResponse -*/ -package filer_pb -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package filer_pb import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type LookupDirectoryEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *LookupDirectoryEntryRequest) Reset() { + *x = LookupDirectoryEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupDirectoryEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupDirectoryEntryRequest) ProtoMessage() {} + +func (x *LookupDirectoryEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LookupDirectoryEntryRequest) Reset() { *m = LookupDirectoryEntryRequest{} } -func (m *LookupDirectoryEntryRequest) String() string { return proto.CompactTextString(m) } -func (*LookupDirectoryEntryRequest) ProtoMessage() {} -func (*LookupDirectoryEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +// Deprecated: Use LookupDirectoryEntryRequest.ProtoReflect.Descriptor instead. +func (*LookupDirectoryEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{0} +} -func (m *LookupDirectoryEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *LookupDirectoryEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *LookupDirectoryEntryRequest) GetName() string { - if m != nil { - return m.Name +func (x *LookupDirectoryEntryRequest) GetName() string { + if x != nil { + return x.Name } return "" } type LookupDirectoryEntryResponse struct { - Entry *Entry `protobuf:"bytes,1,opt,name=entry" json:"entry,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *LookupDirectoryEntryResponse) Reset() { + *x = LookupDirectoryEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupDirectoryEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupDirectoryEntryResponse) ProtoMessage() {} + +func (x *LookupDirectoryEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LookupDirectoryEntryResponse) Reset() { *m = LookupDirectoryEntryResponse{} } -func (m *LookupDirectoryEntryResponse) String() string { return proto.CompactTextString(m) } -func (*LookupDirectoryEntryResponse) ProtoMessage() {} -func (*LookupDirectoryEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +// Deprecated: Use LookupDirectoryEntryResponse.ProtoReflect.Descriptor instead. +func (*LookupDirectoryEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{1} +} -func (m *LookupDirectoryEntryResponse) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *LookupDirectoryEntryResponse) GetEntry() *Entry { + if x != nil { + return x.Entry } return nil } type ListEntriesRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Prefix string `protobuf:"bytes,2,opt,name=prefix" json:"prefix,omitempty"` - StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName" json:"startFromFileName,omitempty"` - InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom" json:"inclusiveStartFrom,omitempty"` - Limit uint32 `protobuf:"varint,5,opt,name=limit" json:"limit,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"` + StartFromFileName string `protobuf:"bytes,3,opt,name=startFromFileName,proto3" json:"startFromFileName,omitempty"` + InclusiveStartFrom bool `protobuf:"varint,4,opt,name=inclusiveStartFrom,proto3" json:"inclusiveStartFrom,omitempty"` + Limit uint32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *ListEntriesRequest) Reset() { + *x = ListEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesRequest) ProtoMessage() {} + +func (x *ListEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *ListEntriesRequest) Reset() { *m = ListEntriesRequest{} } -func (m *ListEntriesRequest) String() string { return proto.CompactTextString(m) } -func (*ListEntriesRequest) ProtoMessage() {} -func (*ListEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +// Deprecated: Use ListEntriesRequest.ProtoReflect.Descriptor instead. +func (*ListEntriesRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{2} +} -func (m *ListEntriesRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *ListEntriesRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *ListEntriesRequest) GetPrefix() string { - if m != nil { - return m.Prefix +func (x *ListEntriesRequest) GetPrefix() string { + if x != nil { + return x.Prefix } return "" } -func (m *ListEntriesRequest) GetStartFromFileName() string { - if m != nil { - return m.StartFromFileName +func (x *ListEntriesRequest) GetStartFromFileName() string { + if x != nil { + return x.StartFromFileName } return "" } -func (m *ListEntriesRequest) GetInclusiveStartFrom() bool { - if m != nil { - return m.InclusiveStartFrom +func (x *ListEntriesRequest) GetInclusiveStartFrom() bool { + if x != nil { + return x.InclusiveStartFrom } return false } -func (m *ListEntriesRequest) GetLimit() uint32 { - if m != nil { - return m.Limit +func (x *ListEntriesRequest) GetLimit() uint32 { + if x != nil { + return x.Limit } return 0 } type ListEntriesResponse struct { - Entry *Entry `protobuf:"bytes,1,opt,name=entry" json:"entry,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *Entry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *ListEntriesResponse) Reset() { + *x = ListEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesResponse) ProtoMessage() {} + +func (x *ListEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *ListEntriesResponse) Reset() { *m = ListEntriesResponse{} } -func (m *ListEntriesResponse) String() string { return proto.CompactTextString(m) } -func (*ListEntriesResponse) ProtoMessage() {} -func (*ListEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +// Deprecated: Use ListEntriesResponse.ProtoReflect.Descriptor instead. +func (*ListEntriesResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{3} +} -func (m *ListEntriesResponse) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *ListEntriesResponse) GetEntry() *Entry { + if x != nil { + return x.Entry } return nil } type Entry struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory" json:"is_directory,omitempty"` - Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks" json:"chunks,omitempty"` - Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes" json:"attributes,omitempty"` - Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + IsDirectory bool `protobuf:"varint,2,opt,name=is_directory,json=isDirectory,proto3" json:"is_directory,omitempty"` + Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` + Attributes *FuseAttributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + Extended map[string][]byte `protobuf:"bytes,5,rep,name=extended,proto3" json:"extended,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + HardLinkId []byte `protobuf:"bytes,7,opt,name=hard_link_id,json=hardLinkId,proto3" json:"hard_link_id,omitempty"` + HardLinkCounter int32 `protobuf:"varint,8,opt,name=hard_link_counter,json=hardLinkCounter,proto3" json:"hard_link_counter,omitempty"` // only exists in hard link meta data + Content []byte `protobuf:"bytes,9,opt,name=content,proto3" json:"content,omitempty"` // if not empty, the file content +} + +func (x *Entry) Reset() { + *x = Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entry) ProtoMessage() {} + +func (x *Entry) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Entry) Reset() { *m = Entry{} } -func (m *Entry) String() string { return proto.CompactTextString(m) } -func (*Entry) ProtoMessage() {} -func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +// Deprecated: Use Entry.ProtoReflect.Descriptor instead. +func (*Entry) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{4} +} -func (m *Entry) GetName() string { - if m != nil { - return m.Name +func (x *Entry) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Entry) GetIsDirectory() bool { - if m != nil { - return m.IsDirectory +func (x *Entry) GetIsDirectory() bool { + if x != nil { + return x.IsDirectory } return false } -func (m *Entry) GetChunks() []*FileChunk { - if m != nil { - return m.Chunks +func (x *Entry) GetChunks() []*FileChunk { + if x != nil { + return x.Chunks } return nil } -func (m *Entry) GetAttributes() *FuseAttributes { - if m != nil { - return m.Attributes +func (x *Entry) GetAttributes() *FuseAttributes { + if x != nil { + return x.Attributes } return nil } -func (m *Entry) GetExtended() map[string][]byte { - if m != nil { - return m.Extended +func (x *Entry) GetExtended() map[string][]byte { + if x != nil { + return x.Extended } return nil } -type FullEntry struct { - Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` +func (x *Entry) GetHardLinkId() []byte { + if x != nil { + return x.HardLinkId + } + return nil } -func (m *FullEntry) Reset() { *m = FullEntry{} } -func (m *FullEntry) String() string { return proto.CompactTextString(m) } -func (*FullEntry) ProtoMessage() {} -func (*FullEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *FullEntry) GetDir() string { - if m != nil { - return m.Dir +func (x *Entry) GetHardLinkCounter() int32 { + if x != nil { + return x.HardLinkCounter } - return "" + return 0 } -func (m *FullEntry) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *Entry) GetContent() []byte { + if x != nil { + return x.Content } return nil } -type EventNotification struct { - OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry" json:"old_entry,omitempty"` - NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry" json:"new_entry,omitempty"` - DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks" json:"delete_chunks,omitempty"` - NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath" json:"new_parent_path,omitempty"` -} - -func (m *EventNotification) Reset() { *m = EventNotification{} } -func (m *EventNotification) String() string { return proto.CompactTextString(m) } -func (*EventNotification) ProtoMessage() {} -func (*EventNotification) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +type FullEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *EventNotification) GetOldEntry() *Entry { - if m != nil { - return m.OldEntry - } - return nil + Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` } -func (m *EventNotification) GetNewEntry() *Entry { - if m != nil { - return m.NewEntry +func (x *FullEntry) Reset() { + *x = FullEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *EventNotification) GetDeleteChunks() bool { - if m != nil { - return m.DeleteChunks - } - return false +func (x *FullEntry) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *EventNotification) GetNewParentPath() string { - if m != nil { - return m.NewParentPath +func (*FullEntry) ProtoMessage() {} + +func (x *FullEntry) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -type FileChunk struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Offset int64 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"` - Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` - Mtime int64 `protobuf:"varint,4,opt,name=mtime" json:"mtime,omitempty"` - ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag" json:"e_tag,omitempty"` - SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId" json:"source_file_id,omitempty"` - Fid *FileId `protobuf:"bytes,7,opt,name=fid" json:"fid,omitempty"` - SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid" json:"source_fid,omitempty"` +// Deprecated: Use FullEntry.ProtoReflect.Descriptor instead. +func (*FullEntry) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{5} } -func (m *FileChunk) Reset() { *m = FileChunk{} } -func (m *FileChunk) String() string { return proto.CompactTextString(m) } -func (*FileChunk) ProtoMessage() {} -func (*FileChunk) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *FileChunk) GetFileId() string { - if m != nil { - return m.FileId +func (x *FullEntry) GetDir() string { + if x != nil { + return x.Dir } return "" } -func (m *FileChunk) GetOffset() int64 { - if m != nil { - return m.Offset +func (x *FullEntry) GetEntry() *Entry { + if x != nil { + return x.Entry } - return 0 + return nil } -func (m *FileChunk) GetSize() uint64 { - if m != nil { - return m.Size - } - return 0 +type EventNotification struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OldEntry *Entry `protobuf:"bytes,1,opt,name=old_entry,json=oldEntry,proto3" json:"old_entry,omitempty"` + NewEntry *Entry `protobuf:"bytes,2,opt,name=new_entry,json=newEntry,proto3" json:"new_entry,omitempty"` + DeleteChunks bool `protobuf:"varint,3,opt,name=delete_chunks,json=deleteChunks,proto3" json:"delete_chunks,omitempty"` + NewParentPath string `protobuf:"bytes,4,opt,name=new_parent_path,json=newParentPath,proto3" json:"new_parent_path,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,5,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,6,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` } -func (m *FileChunk) GetMtime() int64 { - if m != nil { - return m.Mtime +func (x *EventNotification) Reset() { + *x = EventNotification{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *FileChunk) GetETag() string { - if m != nil { - return m.ETag - } - return "" +func (x *EventNotification) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *FileChunk) GetSourceFileId() string { - if m != nil { - return m.SourceFileId +func (*EventNotification) ProtoMessage() {} + +func (x *EventNotification) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use EventNotification.ProtoReflect.Descriptor instead. +func (*EventNotification) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{6} } -func (m *FileChunk) GetFid() *FileId { - if m != nil { - return m.Fid +func (x *EventNotification) GetOldEntry() *Entry { + if x != nil { + return x.OldEntry } return nil } -func (m *FileChunk) GetSourceFid() *FileId { - if m != nil { - return m.SourceFid +func (x *EventNotification) GetNewEntry() *Entry { + if x != nil { + return x.NewEntry } return nil } -type FileId struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` - Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie" json:"cookie,omitempty"` +func (x *EventNotification) GetDeleteChunks() bool { + if x != nil { + return x.DeleteChunks + } + return false } -func (m *FileId) Reset() { *m = FileId{} } -func (m *FileId) String() string { return proto.CompactTextString(m) } -func (*FileId) ProtoMessage() {} -func (*FileId) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *FileId) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *EventNotification) GetNewParentPath() string { + if x != nil { + return x.NewParentPath } - return 0 + return "" } -func (m *FileId) GetFileKey() uint64 { - if m != nil { - return m.FileKey +func (x *EventNotification) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster } - return 0 + return false } -func (m *FileId) GetCookie() uint32 { - if m != nil { - return m.Cookie +func (x *EventNotification) GetSignatures() []int32 { + if x != nil { + return x.Signatures } - return 0 + return nil } -type FuseAttributes struct { - FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` - Mtime int64 `protobuf:"varint,2,opt,name=mtime" json:"mtime,omitempty"` - FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode" json:"file_mode,omitempty"` - Uid uint32 `protobuf:"varint,4,opt,name=uid" json:"uid,omitempty"` - Gid uint32 `protobuf:"varint,5,opt,name=gid" json:"gid,omitempty"` - Crtime int64 `protobuf:"varint,6,opt,name=crtime" json:"crtime,omitempty"` - Mime string `protobuf:"bytes,7,opt,name=mime" json:"mime,omitempty"` - Replication string `protobuf:"bytes,8,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,9,opt,name=collection" json:"collection,omitempty"` - TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"` - UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName" json:"user_name,omitempty"` - GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName" json:"group_name,omitempty"` - SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget" json:"symlink_target,omitempty"` -} - -func (m *FuseAttributes) Reset() { *m = FuseAttributes{} } -func (m *FuseAttributes) String() string { return proto.CompactTextString(m) } -func (*FuseAttributes) ProtoMessage() {} -func (*FuseAttributes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -func (m *FuseAttributes) GetFileSize() uint64 { - if m != nil { - return m.FileSize - } - return 0 +type FileChunk struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` // to be deprecated + Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` + Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Mtime int64 `protobuf:"varint,4,opt,name=mtime,proto3" json:"mtime,omitempty"` + ETag string `protobuf:"bytes,5,opt,name=e_tag,json=eTag,proto3" json:"e_tag,omitempty"` + SourceFileId string `protobuf:"bytes,6,opt,name=source_file_id,json=sourceFileId,proto3" json:"source_file_id,omitempty"` // to be deprecated + Fid *FileId `protobuf:"bytes,7,opt,name=fid,proto3" json:"fid,omitempty"` + SourceFid *FileId `protobuf:"bytes,8,opt,name=source_fid,json=sourceFid,proto3" json:"source_fid,omitempty"` + CipherKey []byte `protobuf:"bytes,9,opt,name=cipher_key,json=cipherKey,proto3" json:"cipher_key,omitempty"` + IsCompressed bool `protobuf:"varint,10,opt,name=is_compressed,json=isCompressed,proto3" json:"is_compressed,omitempty"` + IsChunkManifest bool `protobuf:"varint,11,opt,name=is_chunk_manifest,json=isChunkManifest,proto3" json:"is_chunk_manifest,omitempty"` // content is a list of FileChunks } -func (m *FuseAttributes) GetMtime() int64 { - if m != nil { - return m.Mtime +func (x *FileChunk) Reset() { + *x = FileChunk{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *FuseAttributes) GetFileMode() uint32 { - if m != nil { - return m.FileMode +func (x *FileChunk) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileChunk) ProtoMessage() {} + +func (x *FileChunk) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *FuseAttributes) GetUid() uint32 { - if m != nil { - return m.Uid +// Deprecated: Use FileChunk.ProtoReflect.Descriptor instead. +func (*FileChunk) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{7} +} + +func (x *FileChunk) GetFileId() string { + if x != nil { + return x.FileId } - return 0 + return "" } -func (m *FuseAttributes) GetGid() uint32 { - if m != nil { - return m.Gid +func (x *FileChunk) GetOffset() int64 { + if x != nil { + return x.Offset } return 0 } -func (m *FuseAttributes) GetCrtime() int64 { - if m != nil { - return m.Crtime +func (x *FileChunk) GetSize() uint64 { + if x != nil { + return x.Size } return 0 } -func (m *FuseAttributes) GetMime() string { - if m != nil { - return m.Mime +func (x *FileChunk) GetMtime() int64 { + if x != nil { + return x.Mtime } - return "" + return 0 } -func (m *FuseAttributes) GetReplication() string { - if m != nil { - return m.Replication +func (x *FileChunk) GetETag() string { + if x != nil { + return x.ETag } return "" } -func (m *FuseAttributes) GetCollection() string { - if m != nil { - return m.Collection +func (x *FileChunk) GetSourceFileId() string { + if x != nil { + return x.SourceFileId } return "" } -func (m *FuseAttributes) GetTtlSec() int32 { - if m != nil { - return m.TtlSec +func (x *FileChunk) GetFid() *FileId { + if x != nil { + return x.Fid } - return 0 + return nil } -func (m *FuseAttributes) GetUserName() string { - if m != nil { - return m.UserName +func (x *FileChunk) GetSourceFid() *FileId { + if x != nil { + return x.SourceFid } - return "" + return nil } -func (m *FuseAttributes) GetGroupName() []string { - if m != nil { - return m.GroupName +func (x *FileChunk) GetCipherKey() []byte { + if x != nil { + return x.CipherKey } return nil } -func (m *FuseAttributes) GetSymlinkTarget() string { - if m != nil { - return m.SymlinkTarget +func (x *FileChunk) GetIsCompressed() bool { + if x != nil { + return x.IsCompressed } - return "" + return false } -type CreateEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` +func (x *FileChunk) GetIsChunkManifest() bool { + if x != nil { + return x.IsChunkManifest + } + return false } -func (m *CreateEntryRequest) Reset() { *m = CreateEntryRequest{} } -func (m *CreateEntryRequest) String() string { return proto.CompactTextString(m) } -func (*CreateEntryRequest) ProtoMessage() {} -func (*CreateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +type FileChunkManifest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *CreateEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory - } - return "" + Chunks []*FileChunk `protobuf:"bytes,1,rep,name=chunks,proto3" json:"chunks,omitempty"` } -func (m *CreateEntryRequest) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *FileChunkManifest) Reset() { + *x = FileChunkManifest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type CreateEntryResponse struct { +func (x *FileChunkManifest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateEntryResponse) Reset() { *m = CreateEntryResponse{} } -func (m *CreateEntryResponse) String() string { return proto.CompactTextString(m) } -func (*CreateEntryResponse) ProtoMessage() {} -func (*CreateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*FileChunkManifest) ProtoMessage() {} -type UpdateEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Entry *Entry `protobuf:"bytes,2,opt,name=entry" json:"entry,omitempty"` +func (x *FileChunkManifest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *UpdateEntryRequest) Reset() { *m = UpdateEntryRequest{} } -func (m *UpdateEntryRequest) String() string { return proto.CompactTextString(m) } -func (*UpdateEntryRequest) ProtoMessage() {} -func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *UpdateEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory - } - return "" +// Deprecated: Use FileChunkManifest.ProtoReflect.Descriptor instead. +func (*FileChunkManifest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{8} } -func (m *UpdateEntryRequest) GetEntry() *Entry { - if m != nil { - return m.Entry +func (x *FileChunkManifest) GetChunks() []*FileChunk { + if x != nil { + return x.Chunks } return nil } -type UpdateEntryResponse struct { +type FileId struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + FileKey uint64 `protobuf:"varint,2,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Cookie uint32 `protobuf:"fixed32,3,opt,name=cookie,proto3" json:"cookie,omitempty"` } -func (m *UpdateEntryResponse) Reset() { *m = UpdateEntryResponse{} } -func (m *UpdateEntryResponse) String() string { return proto.CompactTextString(m) } -func (*UpdateEntryResponse) ProtoMessage() {} -func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (x *FileId) Reset() { + *x = FileId{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type DeleteEntryRequest struct { - Directory string `protobuf:"bytes,1,opt,name=directory" json:"directory,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - // bool is_directory = 3; - IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData" json:"is_delete_data,omitempty"` - IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive" json:"is_recursive,omitempty"` - IgnoreRecursiveError bool `protobuf:"varint,6,opt,name=ignore_recursive_error,json=ignoreRecursiveError" json:"ignore_recursive_error,omitempty"` +func (x *FileId) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteEntryRequest) Reset() { *m = DeleteEntryRequest{} } -func (m *DeleteEntryRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteEntryRequest) ProtoMessage() {} -func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*FileId) ProtoMessage() {} -func (m *DeleteEntryRequest) GetDirectory() string { - if m != nil { - return m.Directory +func (x *FileId) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *DeleteEntryRequest) GetName() string { - if m != nil { - return m.Name - } - return "" +// Deprecated: Use FileId.ProtoReflect.Descriptor instead. +func (*FileId) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{9} } -func (m *DeleteEntryRequest) GetIsDeleteData() bool { - if m != nil { - return m.IsDeleteData +func (x *FileId) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } - return false + return 0 } -func (m *DeleteEntryRequest) GetIsRecursive() bool { - if m != nil { - return m.IsRecursive +func (x *FileId) GetFileKey() uint64 { + if x != nil { + return x.FileKey } - return false + return 0 } -func (m *DeleteEntryRequest) GetIgnoreRecursiveError() bool { - if m != nil { - return m.IgnoreRecursiveError +func (x *FileId) GetCookie() uint32 { + if x != nil { + return x.Cookie } - return false + return 0 } -type DeleteEntryResponse struct { +type FuseAttributes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileSize uint64 `protobuf:"varint,1,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` + Mtime int64 `protobuf:"varint,2,opt,name=mtime,proto3" json:"mtime,omitempty"` // unix time in seconds + FileMode uint32 `protobuf:"varint,3,opt,name=file_mode,json=fileMode,proto3" json:"file_mode,omitempty"` + Uid uint32 `protobuf:"varint,4,opt,name=uid,proto3" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,5,opt,name=gid,proto3" json:"gid,omitempty"` + Crtime int64 `protobuf:"varint,6,opt,name=crtime,proto3" json:"crtime,omitempty"` // unix time in seconds + Mime string `protobuf:"bytes,7,opt,name=mime,proto3" json:"mime,omitempty"` + Replication string `protobuf:"bytes,8,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,9,opt,name=collection,proto3" json:"collection,omitempty"` + TtlSec int32 `protobuf:"varint,10,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"` + UserName string `protobuf:"bytes,11,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` // for hdfs + GroupName []string `protobuf:"bytes,12,rep,name=group_name,json=groupName,proto3" json:"group_name,omitempty"` // for hdfs + SymlinkTarget string `protobuf:"bytes,13,opt,name=symlink_target,json=symlinkTarget,proto3" json:"symlink_target,omitempty"` + Md5 []byte `protobuf:"bytes,14,opt,name=md5,proto3" json:"md5,omitempty"` + DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } -func (m *DeleteEntryResponse) Reset() { *m = DeleteEntryResponse{} } -func (m *DeleteEntryResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteEntryResponse) ProtoMessage() {} -func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (x *FuseAttributes) Reset() { + *x = FuseAttributes{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type AtomicRenameEntryRequest struct { - OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory" json:"old_directory,omitempty"` - OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName" json:"old_name,omitempty"` - NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory" json:"new_directory,omitempty"` - NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName" json:"new_name,omitempty"` +func (x *FuseAttributes) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AtomicRenameEntryRequest) Reset() { *m = AtomicRenameEntryRequest{} } -func (m *AtomicRenameEntryRequest) String() string { return proto.CompactTextString(m) } -func (*AtomicRenameEntryRequest) ProtoMessage() {} -func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*FuseAttributes) ProtoMessage() {} -func (m *AtomicRenameEntryRequest) GetOldDirectory() string { - if m != nil { - return m.OldDirectory +func (x *FuseAttributes) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *AtomicRenameEntryRequest) GetOldName() string { - if m != nil { - return m.OldName - } - return "" +// Deprecated: Use FuseAttributes.ProtoReflect.Descriptor instead. +func (*FuseAttributes) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{10} } -func (m *AtomicRenameEntryRequest) GetNewDirectory() string { - if m != nil { - return m.NewDirectory +func (x *FuseAttributes) GetFileSize() uint64 { + if x != nil { + return x.FileSize } - return "" + return 0 } -func (m *AtomicRenameEntryRequest) GetNewName() string { - if m != nil { - return m.NewName +func (x *FuseAttributes) GetMtime() int64 { + if x != nil { + return x.Mtime } - return "" + return 0 } -type AtomicRenameEntryResponse struct { +func (x *FuseAttributes) GetFileMode() uint32 { + if x != nil { + return x.FileMode + } + return 0 } -func (m *AtomicRenameEntryResponse) Reset() { *m = AtomicRenameEntryResponse{} } -func (m *AtomicRenameEntryResponse) String() string { return proto.CompactTextString(m) } -func (*AtomicRenameEntryResponse) ProtoMessage() {} -func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -type AssignVolumeRequest struct { - Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` - TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec" json:"ttl_sec,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` +func (x *FuseAttributes) GetUid() uint32 { + if x != nil { + return x.Uid + } + return 0 } -func (m *AssignVolumeRequest) Reset() { *m = AssignVolumeRequest{} } -func (m *AssignVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeRequest) ProtoMessage() {} -func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *AssignVolumeRequest) GetCount() int32 { - if m != nil { - return m.Count +func (x *FuseAttributes) GetGid() uint32 { + if x != nil { + return x.Gid } return 0 } -func (m *AssignVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *FuseAttributes) GetCrtime() int64 { + if x != nil { + return x.Crtime } - return "" + return 0 } -func (m *AssignVolumeRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *FuseAttributes) GetMime() string { + if x != nil { + return x.Mime } return "" } -func (m *AssignVolumeRequest) GetTtlSec() int32 { - if m != nil { - return m.TtlSec +func (x *FuseAttributes) GetReplication() string { + if x != nil { + return x.Replication } - return 0 + return "" } -func (m *AssignVolumeRequest) GetDataCenter() string { - if m != nil { - return m.DataCenter +func (x *FuseAttributes) GetCollection() string { + if x != nil { + return x.Collection } return "" } -type AssignVolumeResponse struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - Count int32 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - Auth string `protobuf:"bytes,5,opt,name=auth" json:"auth,omitempty"` +func (x *FuseAttributes) GetTtlSec() int32 { + if x != nil { + return x.TtlSec + } + return 0 } -func (m *AssignVolumeResponse) Reset() { *m = AssignVolumeResponse{} } -func (m *AssignVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*AssignVolumeResponse) ProtoMessage() {} -func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *AssignVolumeResponse) GetFileId() string { - if m != nil { - return m.FileId +func (x *FuseAttributes) GetUserName() string { + if x != nil { + return x.UserName } return "" } -func (m *AssignVolumeResponse) GetUrl() string { - if m != nil { - return m.Url +func (x *FuseAttributes) GetGroupName() []string { + if x != nil { + return x.GroupName } - return "" + return nil } -func (m *AssignVolumeResponse) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *FuseAttributes) GetSymlinkTarget() string { + if x != nil { + return x.SymlinkTarget } return "" } -func (m *AssignVolumeResponse) GetCount() int32 { - if m != nil { - return m.Count +func (x *FuseAttributes) GetMd5() []byte { + if x != nil { + return x.Md5 } - return 0 + return nil } -func (m *AssignVolumeResponse) GetAuth() string { - if m != nil { - return m.Auth +func (x *FuseAttributes) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } -type LookupVolumeRequest struct { - VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` -} +type CreateEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } -func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` + OExcl bool `protobuf:"varint,3,opt,name=o_excl,json=oExcl,proto3" json:"o_excl,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,4,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,5,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` +} -func (m *LookupVolumeRequest) GetVolumeIds() []string { - if m != nil { - return m.VolumeIds +func (x *CreateEntryRequest) Reset() { + *x = CreateEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type Locations struct { - Locations []*Location `protobuf:"bytes,1,rep,name=locations" json:"locations,omitempty"` +func (x *CreateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Locations) Reset() { *m = Locations{} } -func (m *Locations) String() string { return proto.CompactTextString(m) } -func (*Locations) ProtoMessage() {} -func (*Locations) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (*CreateEntryRequest) ProtoMessage() {} -func (m *Locations) GetLocations() []*Location { - if m != nil { - return m.Locations +func (x *CreateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -type Location struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` +// Deprecated: Use CreateEntryRequest.ProtoReflect.Descriptor instead. +func (*CreateEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{11} } -func (m *Location) Reset() { *m = Location{} } -func (m *Location) String() string { return proto.CompactTextString(m) } -func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -func (m *Location) GetUrl() string { - if m != nil { - return m.Url +func (x *CreateEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *Location) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *CreateEntryRequest) GetEntry() *Entry { + if x != nil { + return x.Entry } - return "" + return nil } -type LookupVolumeResponse struct { - LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +func (x *CreateEntryRequest) GetOExcl() bool { + if x != nil { + return x.OExcl + } + return false } -func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } -func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (x *CreateEntryRequest) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster + } + return false +} -func (m *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { - if m != nil { - return m.LocationsMap +func (x *CreateEntryRequest) GetSignatures() []int32 { + if x != nil { + return x.Signatures } return nil } -type DeleteCollectionRequest struct { - Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"` -} +type CreateEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } -func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} -func (m *DeleteCollectionRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *CreateEntryResponse) Reset() { + *x = CreateEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type DeleteCollectionResponse struct { +func (x *CreateEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } -func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*CreateEntryResponse) ProtoMessage() {} -type StatisticsRequest struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` +func (x *CreateEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } -func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } -func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +// Deprecated: Use CreateEntryResponse.ProtoReflect.Descriptor instead. +func (*CreateEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{12} +} -func (m *StatisticsRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *CreateEntryResponse) GetError() string { + if x != nil { + return x.Error } return "" } -func (m *StatisticsRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +type UpdateEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Entry *Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,3,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,4,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` } -func (m *StatisticsRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *UpdateEntryRequest) Reset() { + *x = UpdateEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type StatisticsResponse struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` - TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` - UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize" json:"used_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` +func (x *UpdateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } -func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } -func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (*UpdateEntryRequest) ProtoMessage() {} -func (m *StatisticsResponse) GetReplication() string { - if m != nil { - return m.Replication +func (x *UpdateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEntryRequest.ProtoReflect.Descriptor instead. +func (*UpdateEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{13} } -func (m *StatisticsResponse) GetCollection() string { - if m != nil { - return m.Collection +func (x *UpdateEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory } return "" } -func (m *StatisticsResponse) GetTtl() string { - if m != nil { - return m.Ttl +func (x *UpdateEntryRequest) GetEntry() *Entry { + if x != nil { + return x.Entry } - return "" + return nil } -func (m *StatisticsResponse) GetTotalSize() uint64 { - if m != nil { - return m.TotalSize +func (x *UpdateEntryRequest) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster } - return 0 + return false } -func (m *StatisticsResponse) GetUsedSize() uint64 { - if m != nil { - return m.UsedSize +func (x *UpdateEntryRequest) GetSignatures() []int32 { + if x != nil { + return x.Signatures } - return 0 + return nil +} + +type UpdateEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *StatisticsResponse) GetFileCount() uint64 { - if m != nil { - return m.FileCount +func (x *UpdateEntryResponse) Reset() { + *x = UpdateEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type GetFilerConfigurationRequest struct { +func (x *UpdateEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetFilerConfigurationRequest) Reset() { *m = GetFilerConfigurationRequest{} } -func (m *GetFilerConfigurationRequest) String() string { return proto.CompactTextString(m) } -func (*GetFilerConfigurationRequest) ProtoMessage() {} -func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*UpdateEntryResponse) ProtoMessage() {} -type GetFilerConfigurationResponse struct { - Masters []string `protobuf:"bytes,1,rep,name=masters" json:"masters,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb" json:"max_mb,omitempty"` +func (x *UpdateEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateEntryResponse.ProtoReflect.Descriptor instead. +func (*UpdateEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{14} +} + +type AppendToEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + EntryName string `protobuf:"bytes,2,opt,name=entry_name,json=entryName,proto3" json:"entry_name,omitempty"` + Chunks []*FileChunk `protobuf:"bytes,3,rep,name=chunks,proto3" json:"chunks,omitempty"` +} + +func (x *AppendToEntryRequest) Reset() { + *x = AppendToEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppendToEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendToEntryRequest) ProtoMessage() {} + +func (x *AppendToEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendToEntryRequest.ProtoReflect.Descriptor instead. +func (*AppendToEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{15} +} + +func (x *AppendToEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory + } + return "" } -func (m *GetFilerConfigurationResponse) Reset() { *m = GetFilerConfigurationResponse{} } -func (m *GetFilerConfigurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetFilerConfigurationResponse) ProtoMessage() {} -func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (x *AppendToEntryRequest) GetEntryName() string { + if x != nil { + return x.EntryName + } + return "" +} -func (m *GetFilerConfigurationResponse) GetMasters() []string { - if m != nil { - return m.Masters +func (x *AppendToEntryRequest) GetChunks() []*FileChunk { + if x != nil { + return x.Chunks } return nil } -func (m *GetFilerConfigurationResponse) GetReplication() string { - if m != nil { - return m.Replication +type AppendToEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AppendToEntryResponse) Reset() { + *x = AppendToEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AppendToEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AppendToEntryResponse) ProtoMessage() {} + +func (x *AppendToEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AppendToEntryResponse.ProtoReflect.Descriptor instead. +func (*AppendToEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{16} +} + +type DeleteEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // bool is_directory = 3; + IsDeleteData bool `protobuf:"varint,4,opt,name=is_delete_data,json=isDeleteData,proto3" json:"is_delete_data,omitempty"` + IsRecursive bool `protobuf:"varint,5,opt,name=is_recursive,json=isRecursive,proto3" json:"is_recursive,omitempty"` + IgnoreRecursiveError bool `protobuf:"varint,6,opt,name=ignore_recursive_error,json=ignoreRecursiveError,proto3" json:"ignore_recursive_error,omitempty"` + IsFromOtherCluster bool `protobuf:"varint,7,opt,name=is_from_other_cluster,json=isFromOtherCluster,proto3" json:"is_from_other_cluster,omitempty"` + Signatures []int32 `protobuf:"varint,8,rep,packed,name=signatures,proto3" json:"signatures,omitempty"` +} + +func (x *DeleteEntryRequest) Reset() { + *x = DeleteEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteEntryRequest) ProtoMessage() {} + +func (x *DeleteEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteEntryRequest.ProtoReflect.Descriptor instead. +func (*DeleteEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{17} +} + +func (x *DeleteEntryRequest) GetDirectory() string { + if x != nil { + return x.Directory + } + return "" +} + +func (x *DeleteEntryRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteEntryRequest) GetIsDeleteData() bool { + if x != nil { + return x.IsDeleteData + } + return false +} + +func (x *DeleteEntryRequest) GetIsRecursive() bool { + if x != nil { + return x.IsRecursive + } + return false +} + +func (x *DeleteEntryRequest) GetIgnoreRecursiveError() bool { + if x != nil { + return x.IgnoreRecursiveError + } + return false +} + +func (x *DeleteEntryRequest) GetIsFromOtherCluster() bool { + if x != nil { + return x.IsFromOtherCluster + } + return false +} + +func (x *DeleteEntryRequest) GetSignatures() []int32 { + if x != nil { + return x.Signatures + } + return nil +} + +type DeleteEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *DeleteEntryResponse) Reset() { + *x = DeleteEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteEntryResponse) ProtoMessage() {} + +func (x *DeleteEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteEntryResponse.ProtoReflect.Descriptor instead. +func (*DeleteEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{18} +} + +func (x *DeleteEntryResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type AtomicRenameEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OldDirectory string `protobuf:"bytes,1,opt,name=old_directory,json=oldDirectory,proto3" json:"old_directory,omitempty"` + OldName string `protobuf:"bytes,2,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"` + NewDirectory string `protobuf:"bytes,3,opt,name=new_directory,json=newDirectory,proto3" json:"new_directory,omitempty"` + NewName string `protobuf:"bytes,4,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"` +} + +func (x *AtomicRenameEntryRequest) Reset() { + *x = AtomicRenameEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AtomicRenameEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AtomicRenameEntryRequest) ProtoMessage() {} + +func (x *AtomicRenameEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AtomicRenameEntryRequest.ProtoReflect.Descriptor instead. +func (*AtomicRenameEntryRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{19} +} + +func (x *AtomicRenameEntryRequest) GetOldDirectory() string { + if x != nil { + return x.OldDirectory + } + return "" +} + +func (x *AtomicRenameEntryRequest) GetOldName() string { + if x != nil { + return x.OldName + } + return "" +} + +func (x *AtomicRenameEntryRequest) GetNewDirectory() string { + if x != nil { + return x.NewDirectory + } + return "" +} + +func (x *AtomicRenameEntryRequest) GetNewName() string { + if x != nil { + return x.NewName + } + return "" +} + +type AtomicRenameEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AtomicRenameEntryResponse) Reset() { + *x = AtomicRenameEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AtomicRenameEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AtomicRenameEntryResponse) ProtoMessage() {} + +func (x *AtomicRenameEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AtomicRenameEntryResponse.ProtoReflect.Descriptor instead. +func (*AtomicRenameEntryResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{20} +} + +type AssignVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + TtlSec int32 `protobuf:"varint,4,opt,name=ttl_sec,json=ttlSec,proto3" json:"ttl_sec,omitempty"` + DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Path string `protobuf:"bytes,6,opt,name=path,proto3" json:"path,omitempty"` + Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"` + DiskType string `protobuf:"bytes,8,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *AssignVolumeRequest) Reset() { + *x = AssignVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignVolumeRequest) ProtoMessage() {} + +func (x *AssignVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignVolumeRequest.ProtoReflect.Descriptor instead. +func (*AssignVolumeRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{21} +} + +func (x *AssignVolumeRequest) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *AssignVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *AssignVolumeRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *AssignVolumeRequest) GetTtlSec() int32 { + if x != nil { + return x.TtlSec + } + return 0 +} + +func (x *AssignVolumeRequest) GetDataCenter() string { + if x != nil { + return x.DataCenter + } + return "" +} + +func (x *AssignVolumeRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *AssignVolumeRequest) GetRack() string { + if x != nil { + return x.Rack + } + return "" +} + +func (x *AssignVolumeRequest) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + +type AssignVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` + Auth string `protobuf:"bytes,5,opt,name=auth,proto3" json:"auth,omitempty"` + Collection string `protobuf:"bytes,6,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,7,opt,name=replication,proto3" json:"replication,omitempty"` + Error string `protobuf:"bytes,8,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *AssignVolumeResponse) Reset() { + *x = AssignVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignVolumeResponse) ProtoMessage() {} + +func (x *AssignVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignVolumeResponse.ProtoReflect.Descriptor instead. +func (*AssignVolumeResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{22} +} + +func (x *AssignVolumeResponse) GetFileId() string { + if x != nil { + return x.FileId + } + return "" +} + +func (x *AssignVolumeResponse) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *AssignVolumeResponse) GetPublicUrl() string { + if x != nil { + return x.PublicUrl + } + return "" +} + +func (x *AssignVolumeResponse) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *AssignVolumeResponse) GetAuth() string { + if x != nil { + return x.Auth + } + return "" +} + +func (x *AssignVolumeResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *AssignVolumeResponse) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *AssignVolumeResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type LookupVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` +} + +func (x *LookupVolumeRequest) Reset() { + *x = LookupVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupVolumeRequest) ProtoMessage() {} + +func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupVolumeRequest.ProtoReflect.Descriptor instead. +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{23} +} + +func (x *LookupVolumeRequest) GetVolumeIds() []string { + if x != nil { + return x.VolumeIds + } + return nil +} + +type Locations struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` +} + +func (x *Locations) Reset() { + *x = Locations{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Locations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Locations) ProtoMessage() {} + +func (x *Locations) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Locations.ProtoReflect.Descriptor instead. +func (*Locations) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{24} +} + +func (x *Locations) GetLocations() []*Location { + if x != nil { + return x.Locations + } + return nil +} + +type Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` +} + +func (x *Location) Reset() { + *x = Location{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Location) ProtoMessage() {} + +func (x *Location) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Location.ProtoReflect.Descriptor instead. +func (*Location) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{25} +} + +func (x *Location) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Location) GetPublicUrl() string { + if x != nil { + return x.PublicUrl + } + return "" +} + +type LookupVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LocationsMap map[string]*Locations `protobuf:"bytes,1,rep,name=locations_map,json=locationsMap,proto3" json:"locations_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *LookupVolumeResponse) Reset() { + *x = LookupVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupVolumeResponse) ProtoMessage() {} + +func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupVolumeResponse.ProtoReflect.Descriptor instead. +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{26} +} + +func (x *LookupVolumeResponse) GetLocationsMap() map[string]*Locations { + if x != nil { + return x.LocationsMap + } + return nil +} + +type Collection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Collection) Reset() { + *x = Collection{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Collection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Collection) ProtoMessage() {} + +func (x *Collection) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Collection.ProtoReflect.Descriptor instead. +func (*Collection) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{27} +} + +func (x *Collection) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type CollectionListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"` + IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"` +} + +func (x *CollectionListRequest) Reset() { + *x = CollectionListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionListRequest) ProtoMessage() {} + +func (x *CollectionListRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionListRequest.ProtoReflect.Descriptor instead. +func (*CollectionListRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{28} +} + +func (x *CollectionListRequest) GetIncludeNormalVolumes() bool { + if x != nil { + return x.IncludeNormalVolumes + } + return false +} + +func (x *CollectionListRequest) GetIncludeEcVolumes() bool { + if x != nil { + return x.IncludeEcVolumes + } + return false +} + +type CollectionListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` +} + +func (x *CollectionListResponse) Reset() { + *x = CollectionListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionListResponse) ProtoMessage() {} + +func (x *CollectionListResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionListResponse.ProtoReflect.Descriptor instead. +func (*CollectionListResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{29} +} + +func (x *CollectionListResponse) GetCollections() []*Collection { + if x != nil { + return x.Collections + } + return nil +} + +type DeleteCollectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *DeleteCollectionRequest) Reset() { + *x = DeleteCollectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteCollectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCollectionRequest) ProtoMessage() {} + +func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead. +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{30} +} + +func (x *DeleteCollectionRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +type DeleteCollectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteCollectionResponse) Reset() { + *x = DeleteCollectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteCollectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCollectionResponse) ProtoMessage() {} + +func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead. +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{31} +} + +type StatisticsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *StatisticsRequest) Reset() { + *x = StatisticsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsRequest) ProtoMessage() {} + +func (x *StatisticsRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead. +func (*StatisticsRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{32} +} + +func (x *StatisticsRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *StatisticsRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *StatisticsRequest) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +func (x *StatisticsRequest) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + +type StatisticsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` +} + +func (x *StatisticsResponse) Reset() { + *x = StatisticsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsResponse) ProtoMessage() {} + +func (x *StatisticsResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead. +func (*StatisticsResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{33} +} + +func (x *StatisticsResponse) GetTotalSize() uint64 { + if x != nil { + return x.TotalSize + } + return 0 +} + +func (x *StatisticsResponse) GetUsedSize() uint64 { + if x != nil { + return x.UsedSize + } + return 0 +} + +func (x *StatisticsResponse) GetFileCount() uint64 { + if x != nil { + return x.FileCount + } + return 0 +} + +type GetFilerConfigurationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFilerConfigurationRequest) Reset() { + *x = GetFilerConfigurationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFilerConfigurationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFilerConfigurationRequest) ProtoMessage() {} + +func (x *GetFilerConfigurationRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFilerConfigurationRequest.ProtoReflect.Descriptor instead. +func (*GetFilerConfigurationRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{34} +} + +type GetFilerConfigurationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Masters []string `protobuf:"bytes,1,rep,name=masters,proto3" json:"masters,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + MaxMb uint32 `protobuf:"varint,4,opt,name=max_mb,json=maxMb,proto3" json:"max_mb,omitempty"` + DirBuckets string `protobuf:"bytes,5,opt,name=dir_buckets,json=dirBuckets,proto3" json:"dir_buckets,omitempty"` + Cipher bool `protobuf:"varint,7,opt,name=cipher,proto3" json:"cipher,omitempty"` + Signature int32 `protobuf:"varint,8,opt,name=signature,proto3" json:"signature,omitempty"` + MetricsAddress string `protobuf:"bytes,9,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSec int32 `protobuf:"varint,10,opt,name=metrics_interval_sec,json=metricsIntervalSec,proto3" json:"metrics_interval_sec,omitempty"` +} + +func (x *GetFilerConfigurationResponse) Reset() { + *x = GetFilerConfigurationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFilerConfigurationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFilerConfigurationResponse) ProtoMessage() {} + +func (x *GetFilerConfigurationResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFilerConfigurationResponse.ProtoReflect.Descriptor instead. +func (*GetFilerConfigurationResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{35} +} + +func (x *GetFilerConfigurationResponse) GetMasters() []string { + if x != nil { + return x.Masters + } + return nil +} + +func (x *GetFilerConfigurationResponse) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *GetFilerConfigurationResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *GetFilerConfigurationResponse) GetMaxMb() uint32 { + if x != nil { + return x.MaxMb + } + return 0 +} + +func (x *GetFilerConfigurationResponse) GetDirBuckets() string { + if x != nil { + return x.DirBuckets + } + return "" +} + +func (x *GetFilerConfigurationResponse) GetCipher() bool { + if x != nil { + return x.Cipher + } + return false +} + +func (x *GetFilerConfigurationResponse) GetSignature() int32 { + if x != nil { + return x.Signature + } + return 0 +} + +func (x *GetFilerConfigurationResponse) GetMetricsAddress() string { + if x != nil { + return x.MetricsAddress + } + return "" +} + +func (x *GetFilerConfigurationResponse) GetMetricsIntervalSec() int32 { + if x != nil { + return x.MetricsIntervalSec + } + return 0 +} + +type SubscribeMetadataRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientName string `protobuf:"bytes,1,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"` + PathPrefix string `protobuf:"bytes,2,opt,name=path_prefix,json=pathPrefix,proto3" json:"path_prefix,omitempty"` + SinceNs int64 `protobuf:"varint,3,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + Signature int32 `protobuf:"varint,4,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *SubscribeMetadataRequest) Reset() { + *x = SubscribeMetadataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscribeMetadataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscribeMetadataRequest) ProtoMessage() {} + +func (x *SubscribeMetadataRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscribeMetadataRequest.ProtoReflect.Descriptor instead. +func (*SubscribeMetadataRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{36} +} + +func (x *SubscribeMetadataRequest) GetClientName() string { + if x != nil { + return x.ClientName + } + return "" +} + +func (x *SubscribeMetadataRequest) GetPathPrefix() string { + if x != nil { + return x.PathPrefix + } + return "" +} + +func (x *SubscribeMetadataRequest) GetSinceNs() int64 { + if x != nil { + return x.SinceNs + } + return 0 +} + +func (x *SubscribeMetadataRequest) GetSignature() int32 { + if x != nil { + return x.Signature + } + return 0 +} + +type SubscribeMetadataResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Directory string `protobuf:"bytes,1,opt,name=directory,proto3" json:"directory,omitempty"` + EventNotification *EventNotification `protobuf:"bytes,2,opt,name=event_notification,json=eventNotification,proto3" json:"event_notification,omitempty"` + TsNs int64 `protobuf:"varint,3,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` +} + +func (x *SubscribeMetadataResponse) Reset() { + *x = SubscribeMetadataResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscribeMetadataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscribeMetadataResponse) ProtoMessage() {} + +func (x *SubscribeMetadataResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscribeMetadataResponse.ProtoReflect.Descriptor instead. +func (*SubscribeMetadataResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{37} +} + +func (x *SubscribeMetadataResponse) GetDirectory() string { + if x != nil { + return x.Directory + } + return "" +} + +func (x *SubscribeMetadataResponse) GetEventNotification() *EventNotification { + if x != nil { + return x.EventNotification + } + return nil +} + +func (x *SubscribeMetadataResponse) GetTsNs() int64 { + if x != nil { + return x.TsNs + } + return 0 +} + +type LogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TsNs int64 `protobuf:"varint,1,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"` + PartitionKeyHash int32 `protobuf:"varint,2,opt,name=partition_key_hash,json=partitionKeyHash,proto3" json:"partition_key_hash,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *LogEntry) Reset() { + *x = LogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogEntry) ProtoMessage() {} + +func (x *LogEntry) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead. +func (*LogEntry) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{38} +} + +func (x *LogEntry) GetTsNs() int64 { + if x != nil { + return x.TsNs + } + return 0 +} + +func (x *LogEntry) GetPartitionKeyHash() int32 { + if x != nil { + return x.PartitionKeyHash + } + return 0 +} + +func (x *LogEntry) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type KeepConnectedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` + Resources []string `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources,omitempty"` +} + +func (x *KeepConnectedRequest) Reset() { + *x = KeepConnectedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepConnectedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepConnectedRequest) ProtoMessage() {} + +func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead. +func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{39} +} + +func (x *KeepConnectedRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *KeepConnectedRequest) GetGrpcPort() uint32 { + if x != nil { + return x.GrpcPort + } + return 0 +} + +func (x *KeepConnectedRequest) GetResources() []string { + if x != nil { + return x.Resources + } + return nil +} + +type KeepConnectedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *KeepConnectedResponse) Reset() { + *x = KeepConnectedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepConnectedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeepConnectedResponse) ProtoMessage() {} + +func (x *KeepConnectedResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepConnectedResponse.ProtoReflect.Descriptor instead. +func (*KeepConnectedResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{40} +} + +type LocateBrokerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` +} + +func (x *LocateBrokerRequest) Reset() { + *x = LocateBrokerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocateBrokerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocateBrokerRequest) ProtoMessage() {} + +func (x *LocateBrokerRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocateBrokerRequest.ProtoReflect.Descriptor instead. +func (*LocateBrokerRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{41} +} + +func (x *LocateBrokerRequest) GetResource() string { + if x != nil { + return x.Resource + } + return "" +} + +type LocateBrokerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Found bool `protobuf:"varint,1,opt,name=found,proto3" json:"found,omitempty"` + Resources []*LocateBrokerResponse_Resource `protobuf:"bytes,2,rep,name=resources,proto3" json:"resources,omitempty"` +} + +func (x *LocateBrokerResponse) Reset() { + *x = LocateBrokerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocateBrokerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocateBrokerResponse) ProtoMessage() {} + +func (x *LocateBrokerResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocateBrokerResponse.ProtoReflect.Descriptor instead. +func (*LocateBrokerResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{42} +} + +func (x *LocateBrokerResponse) GetFound() bool { + if x != nil { + return x.Found + } + return false +} + +func (x *LocateBrokerResponse) GetResources() []*LocateBrokerResponse_Resource { + if x != nil { + return x.Resources + } + return nil +} + +// Key-Value operations +type KvGetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *KvGetRequest) Reset() { + *x = KvGetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KvGetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KvGetRequest) ProtoMessage() {} + +func (x *KvGetRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KvGetRequest.ProtoReflect.Descriptor instead. +func (*KvGetRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{43} +} + +func (x *KvGetRequest) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +type KvGetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *KvGetResponse) Reset() { + *x = KvGetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KvGetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KvGetResponse) ProtoMessage() {} + +func (x *KvGetResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KvGetResponse.ProtoReflect.Descriptor instead. +func (*KvGetResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{44} +} + +func (x *KvGetResponse) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *KvGetResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type KvPutRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KvPutRequest) Reset() { + *x = KvPutRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KvPutRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KvPutRequest) ProtoMessage() {} + +func (x *KvPutRequest) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KvPutRequest.ProtoReflect.Descriptor instead. +func (*KvPutRequest) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{45} +} + +func (x *KvPutRequest) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *KvPutRequest) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type KvPutResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *KvPutResponse) Reset() { + *x = KvPutResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KvPutResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KvPutResponse) ProtoMessage() {} + +func (x *KvPutResponse) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KvPutResponse.ProtoReflect.Descriptor instead. +func (*KvPutResponse) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{46} +} + +func (x *KvPutResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// path-based configurations +type FilerConf struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Locations []*FilerConf_PathConf `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` +} + +func (x *FilerConf) Reset() { + *x = FilerConf{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilerConf) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilerConf) ProtoMessage() {} + +func (x *FilerConf) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilerConf.ProtoReflect.Descriptor instead. +func (*FilerConf) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{47} +} + +func (x *FilerConf) GetVersion() int32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *FilerConf) GetLocations() []*FilerConf_PathConf { + if x != nil { + return x.Locations + } + return nil +} + +// if found, send the exact address +// if not found, send the full list of existing brokers +type LocateBrokerResponse_Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GrpcAddresses string `protobuf:"bytes,1,opt,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"` + ResourceCount int32 `protobuf:"varint,2,opt,name=resource_count,json=resourceCount,proto3" json:"resource_count,omitempty"` +} + +func (x *LocateBrokerResponse_Resource) Reset() { + *x = LocateBrokerResponse_Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocateBrokerResponse_Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocateBrokerResponse_Resource) ProtoMessage() {} + +func (x *LocateBrokerResponse_Resource) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocateBrokerResponse_Resource.ProtoReflect.Descriptor instead. +func (*LocateBrokerResponse_Resource) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{42, 0} +} + +func (x *LocateBrokerResponse_Resource) GetGrpcAddresses() string { + if x != nil { + return x.GrpcAddresses + } + return "" +} + +func (x *LocateBrokerResponse_Resource) GetResourceCount() int32 { + if x != nil { + return x.ResourceCount + } + return 0 +} + +type FilerConf_PathConf struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LocationPrefix string `protobuf:"bytes,1,opt,name=location_prefix,json=locationPrefix,proto3" json:"location_prefix,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,5,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` + Fsync bool `protobuf:"varint,6,opt,name=fsync,proto3" json:"fsync,omitempty"` + VolumeGrowthCount uint32 `protobuf:"varint,7,opt,name=volume_growth_count,json=volumeGrowthCount,proto3" json:"volume_growth_count,omitempty"` +} + +func (x *FilerConf_PathConf) Reset() { + *x = FilerConf_PathConf{} + if protoimpl.UnsafeEnabled { + mi := &file_filer_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilerConf_PathConf) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilerConf_PathConf) ProtoMessage() {} + +func (x *FilerConf_PathConf) ProtoReflect() protoreflect.Message { + mi := &file_filer_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilerConf_PathConf.ProtoReflect.Descriptor instead. +func (*FilerConf_PathConf) Descriptor() ([]byte, []int) { + return file_filer_proto_rawDescGZIP(), []int{47, 0} +} + +func (x *FilerConf_PathConf) GetLocationPrefix() string { + if x != nil { + return x.LocationPrefix + } + return "" +} + +func (x *FilerConf_PathConf) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *FilerConf_PathConf) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *FilerConf_PathConf) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +func (x *FilerConf_PathConf) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } -func (m *GetFilerConfigurationResponse) GetCollection() string { - if m != nil { - return m.Collection +func (x *FilerConf_PathConf) GetFsync() bool { + if x != nil { + return x.Fsync } - return "" + return false } -func (m *GetFilerConfigurationResponse) GetMaxMb() uint32 { - if m != nil { - return m.MaxMb +func (x *FilerConf_PathConf) GetVolumeGrowthCount() uint32 { + if x != nil { + return x.VolumeGrowthCount } return 0 } -func init() { - proto.RegisterType((*LookupDirectoryEntryRequest)(nil), "filer_pb.LookupDirectoryEntryRequest") - proto.RegisterType((*LookupDirectoryEntryResponse)(nil), "filer_pb.LookupDirectoryEntryResponse") - proto.RegisterType((*ListEntriesRequest)(nil), "filer_pb.ListEntriesRequest") - proto.RegisterType((*ListEntriesResponse)(nil), "filer_pb.ListEntriesResponse") - proto.RegisterType((*Entry)(nil), "filer_pb.Entry") - proto.RegisterType((*FullEntry)(nil), "filer_pb.FullEntry") - proto.RegisterType((*EventNotification)(nil), "filer_pb.EventNotification") - proto.RegisterType((*FileChunk)(nil), "filer_pb.FileChunk") - proto.RegisterType((*FileId)(nil), "filer_pb.FileId") - proto.RegisterType((*FuseAttributes)(nil), "filer_pb.FuseAttributes") - proto.RegisterType((*CreateEntryRequest)(nil), "filer_pb.CreateEntryRequest") - proto.RegisterType((*CreateEntryResponse)(nil), "filer_pb.CreateEntryResponse") - proto.RegisterType((*UpdateEntryRequest)(nil), "filer_pb.UpdateEntryRequest") - proto.RegisterType((*UpdateEntryResponse)(nil), "filer_pb.UpdateEntryResponse") - proto.RegisterType((*DeleteEntryRequest)(nil), "filer_pb.DeleteEntryRequest") - proto.RegisterType((*DeleteEntryResponse)(nil), "filer_pb.DeleteEntryResponse") - proto.RegisterType((*AtomicRenameEntryRequest)(nil), "filer_pb.AtomicRenameEntryRequest") - proto.RegisterType((*AtomicRenameEntryResponse)(nil), "filer_pb.AtomicRenameEntryResponse") - proto.RegisterType((*AssignVolumeRequest)(nil), "filer_pb.AssignVolumeRequest") - proto.RegisterType((*AssignVolumeResponse)(nil), "filer_pb.AssignVolumeResponse") - proto.RegisterType((*LookupVolumeRequest)(nil), "filer_pb.LookupVolumeRequest") - proto.RegisterType((*Locations)(nil), "filer_pb.Locations") - proto.RegisterType((*Location)(nil), "filer_pb.Location") - proto.RegisterType((*LookupVolumeResponse)(nil), "filer_pb.LookupVolumeResponse") - proto.RegisterType((*DeleteCollectionRequest)(nil), "filer_pb.DeleteCollectionRequest") - proto.RegisterType((*DeleteCollectionResponse)(nil), "filer_pb.DeleteCollectionResponse") - proto.RegisterType((*StatisticsRequest)(nil), "filer_pb.StatisticsRequest") - proto.RegisterType((*StatisticsResponse)(nil), "filer_pb.StatisticsResponse") - proto.RegisterType((*GetFilerConfigurationRequest)(nil), "filer_pb.GetFilerConfigurationRequest") - proto.RegisterType((*GetFilerConfigurationResponse)(nil), "filer_pb.GetFilerConfigurationResponse") +var File_filer_proto protoreflect.FileDescriptor + +var file_filer_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x4f, 0x0a, 0x1b, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x45, 0x0a, 0x1c, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, + 0xbe, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2c, 0x0a, 0x11, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, + 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x22, 0x3c, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x85, + 0x03, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x69, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x38, 0x0a, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x75, 0x73, 0x65, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, + 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, + 0x64, 0x12, 0x20, 0x0a, 0x0c, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x69, + 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x68, 0x61, 0x72, 0x64, 0x4c, 0x69, 0x6e, + 0x6b, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, + 0x68, 0x61, 0x72, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, + 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x64, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, 0x0a, 0x09, 0x46, 0x75, 0x6c, 0x6c, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x64, 0x69, 0x72, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x8f, 0x02, 0x0a, + 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x09, 0x6f, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6f, 0x6c, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x2c, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x68, 0x75, + 0x6e, 0x6b, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, + 0x77, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x31, 0x0a, 0x15, 0x69, + 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, + 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, + 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xe6, + 0x02, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x17, 0x0a, 0x07, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x65, 0x5f, 0x74, 0x61, 0x67, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x54, 0x61, 0x67, 0x12, 0x24, 0x0a, 0x0e, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, + 0x49, 0x64, 0x12, 0x22, 0x0a, 0x03, 0x66, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, + 0x64, 0x52, 0x03, 0x66, 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x66, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x52, 0x09, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x63, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, + 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x69, + 0x73, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x69, 0x73, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x4d, + 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x22, 0x40, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x06, + 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, + 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x22, 0x58, 0x0a, 0x06, 0x46, 0x69, 0x6c, + 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x63, + 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x07, 0x52, 0x06, 0x63, 0x6f, 0x6f, + 0x6b, 0x69, 0x65, 0x22, 0x9d, 0x03, 0x0a, 0x0e, 0x46, 0x75, 0x73, 0x65, 0x41, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x05, 0x6d, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x66, 0x69, + 0x6c, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x63, 0x72, 0x74, 0x69, + 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6d, 0x69, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, 0x6c, 0x5f, + 0x73, 0x65, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, 0x53, 0x65, + 0x63, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, + 0x0e, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x64, 0x35, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6d, 0x64, 0x35, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, + 0x79, 0x70, 0x65, 0x22, 0xc3, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x15, 0x0a, 0x06, 0x6f, 0x5f, 0x65, 0x78, 0x63, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x05, 0x6f, 0x45, 0x78, 0x63, 0x6c, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, + 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, + 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xac, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, + 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x80, 0x01, 0x0a, + 0x14, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, + 0x6c, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x22, + 0x17, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x98, 0x02, 0x0a, 0x12, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, + 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, + 0x73, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x31, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x69, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0x9a, 0x01, 0x0a, 0x18, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, + 0x0d, 0x6f, 0x6c, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x6c, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, + 0x19, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x01, 0x0a, 0x13, 0x41, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x74, + 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x74, 0x74, 0x6c, + 0x53, 0x65, 0x63, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, + 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, + 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xe2, 0x01, 0x0a, 0x14, 0x41, 0x73, + 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x34, + 0x0a, 0x13, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x64, 0x73, 0x22, 0x3d, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x30, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0x3b, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, + 0x22, 0xc3, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, + 0x1a, 0x54, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x20, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7b, 0x0a, 0x15, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x72, + 0x6d, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x45, 0x63, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x22, 0x50, 0x0a, 0x16, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x36, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x84, + 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, + 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, + 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, + 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc4, 0x02, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x46, 0x69, + 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x06, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x62, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x78, 0x4d, 0x62, 0x12, 0x1f, 0x0a, 0x0b, 0x64, + 0x69, 0x72, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x64, 0x69, 0x72, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, + 0x73, 0x65, 0x63, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x22, 0x95, 0x01, + 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, + 0x61, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x70, 0x61, 0x74, 0x68, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x19, 0x0a, 0x08, + 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x19, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, + 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, + 0x4e, 0x73, 0x22, 0x61, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x13, + 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, + 0x73, 0x4e, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, + 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x65, 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0x17, 0x0a, 0x15, + 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, + 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xcd, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x05, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x58, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, + 0x70, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x4b, 0x76, 0x47, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x3b, 0x0a, 0x0d, 0x4b, 0x76, + 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x36, 0x0a, 0x0c, 0x4b, 0x76, 0x50, 0x75, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x25, 0x0a, 0x0d, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xce, 0x02, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, + 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x52, + 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xea, 0x01, 0x0a, 0x08, 0x50, + 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x05, 0x66, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x77, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x77, + 0x74, 0x68, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x32, 0xdc, 0x0c, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, + 0x65, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x67, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x25, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, + 0x01, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4c, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, + 0x0d, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1e, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, + 0x54, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5e, 0x0a, 0x11, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4f, 0x0a, 0x0c, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, + 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x4f, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x12, 0x1d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x12, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x6a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, 0x66, 0x69, 0x6c, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x46, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x65, + 0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x56, 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1e, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4f, 0x0a, + 0x0c, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1d, 0x2e, + 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, + 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x42, 0x72, + 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, + 0x0a, 0x05, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x47, 0x65, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x4b, 0x76, + 0x50, 0x75, 0x74, 0x12, 0x16, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, + 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x66, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x76, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, + 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, + 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x66, + 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_filer_proto_rawDescOnce sync.Once + file_filer_proto_rawDescData = file_filer_proto_rawDesc +) + +func file_filer_proto_rawDescGZIP() []byte { + file_filer_proto_rawDescOnce.Do(func() { + file_filer_proto_rawDescData = protoimpl.X.CompressGZIP(file_filer_proto_rawDescData) + }) + return file_filer_proto_rawDescData +} + +var file_filer_proto_msgTypes = make([]protoimpl.MessageInfo, 52) +var file_filer_proto_goTypes = []interface{}{ + (*LookupDirectoryEntryRequest)(nil), // 0: filer_pb.LookupDirectoryEntryRequest + (*LookupDirectoryEntryResponse)(nil), // 1: filer_pb.LookupDirectoryEntryResponse + (*ListEntriesRequest)(nil), // 2: filer_pb.ListEntriesRequest + (*ListEntriesResponse)(nil), // 3: filer_pb.ListEntriesResponse + (*Entry)(nil), // 4: filer_pb.Entry + (*FullEntry)(nil), // 5: filer_pb.FullEntry + (*EventNotification)(nil), // 6: filer_pb.EventNotification + (*FileChunk)(nil), // 7: filer_pb.FileChunk + (*FileChunkManifest)(nil), // 8: filer_pb.FileChunkManifest + (*FileId)(nil), // 9: filer_pb.FileId + (*FuseAttributes)(nil), // 10: filer_pb.FuseAttributes + (*CreateEntryRequest)(nil), // 11: filer_pb.CreateEntryRequest + (*CreateEntryResponse)(nil), // 12: filer_pb.CreateEntryResponse + (*UpdateEntryRequest)(nil), // 13: filer_pb.UpdateEntryRequest + (*UpdateEntryResponse)(nil), // 14: filer_pb.UpdateEntryResponse + (*AppendToEntryRequest)(nil), // 15: filer_pb.AppendToEntryRequest + (*AppendToEntryResponse)(nil), // 16: filer_pb.AppendToEntryResponse + (*DeleteEntryRequest)(nil), // 17: filer_pb.DeleteEntryRequest + (*DeleteEntryResponse)(nil), // 18: filer_pb.DeleteEntryResponse + (*AtomicRenameEntryRequest)(nil), // 19: filer_pb.AtomicRenameEntryRequest + (*AtomicRenameEntryResponse)(nil), // 20: filer_pb.AtomicRenameEntryResponse + (*AssignVolumeRequest)(nil), // 21: filer_pb.AssignVolumeRequest + (*AssignVolumeResponse)(nil), // 22: filer_pb.AssignVolumeResponse + (*LookupVolumeRequest)(nil), // 23: filer_pb.LookupVolumeRequest + (*Locations)(nil), // 24: filer_pb.Locations + (*Location)(nil), // 25: filer_pb.Location + (*LookupVolumeResponse)(nil), // 26: filer_pb.LookupVolumeResponse + (*Collection)(nil), // 27: filer_pb.Collection + (*CollectionListRequest)(nil), // 28: filer_pb.CollectionListRequest + (*CollectionListResponse)(nil), // 29: filer_pb.CollectionListResponse + (*DeleteCollectionRequest)(nil), // 30: filer_pb.DeleteCollectionRequest + (*DeleteCollectionResponse)(nil), // 31: filer_pb.DeleteCollectionResponse + (*StatisticsRequest)(nil), // 32: filer_pb.StatisticsRequest + (*StatisticsResponse)(nil), // 33: filer_pb.StatisticsResponse + (*GetFilerConfigurationRequest)(nil), // 34: filer_pb.GetFilerConfigurationRequest + (*GetFilerConfigurationResponse)(nil), // 35: filer_pb.GetFilerConfigurationResponse + (*SubscribeMetadataRequest)(nil), // 36: filer_pb.SubscribeMetadataRequest + (*SubscribeMetadataResponse)(nil), // 37: filer_pb.SubscribeMetadataResponse + (*LogEntry)(nil), // 38: filer_pb.LogEntry + (*KeepConnectedRequest)(nil), // 39: filer_pb.KeepConnectedRequest + (*KeepConnectedResponse)(nil), // 40: filer_pb.KeepConnectedResponse + (*LocateBrokerRequest)(nil), // 41: filer_pb.LocateBrokerRequest + (*LocateBrokerResponse)(nil), // 42: filer_pb.LocateBrokerResponse + (*KvGetRequest)(nil), // 43: filer_pb.KvGetRequest + (*KvGetResponse)(nil), // 44: filer_pb.KvGetResponse + (*KvPutRequest)(nil), // 45: filer_pb.KvPutRequest + (*KvPutResponse)(nil), // 46: filer_pb.KvPutResponse + (*FilerConf)(nil), // 47: filer_pb.FilerConf + nil, // 48: filer_pb.Entry.ExtendedEntry + nil, // 49: filer_pb.LookupVolumeResponse.LocationsMapEntry + (*LocateBrokerResponse_Resource)(nil), // 50: filer_pb.LocateBrokerResponse.Resource + (*FilerConf_PathConf)(nil), // 51: filer_pb.FilerConf.PathConf +} +var file_filer_proto_depIdxs = []int32{ + 4, // 0: filer_pb.LookupDirectoryEntryResponse.entry:type_name -> filer_pb.Entry + 4, // 1: filer_pb.ListEntriesResponse.entry:type_name -> filer_pb.Entry + 7, // 2: filer_pb.Entry.chunks:type_name -> filer_pb.FileChunk + 10, // 3: filer_pb.Entry.attributes:type_name -> filer_pb.FuseAttributes + 48, // 4: filer_pb.Entry.extended:type_name -> filer_pb.Entry.ExtendedEntry + 4, // 5: filer_pb.FullEntry.entry:type_name -> filer_pb.Entry + 4, // 6: filer_pb.EventNotification.old_entry:type_name -> filer_pb.Entry + 4, // 7: filer_pb.EventNotification.new_entry:type_name -> filer_pb.Entry + 9, // 8: filer_pb.FileChunk.fid:type_name -> filer_pb.FileId + 9, // 9: filer_pb.FileChunk.source_fid:type_name -> filer_pb.FileId + 7, // 10: filer_pb.FileChunkManifest.chunks:type_name -> filer_pb.FileChunk + 4, // 11: filer_pb.CreateEntryRequest.entry:type_name -> filer_pb.Entry + 4, // 12: filer_pb.UpdateEntryRequest.entry:type_name -> filer_pb.Entry + 7, // 13: filer_pb.AppendToEntryRequest.chunks:type_name -> filer_pb.FileChunk + 25, // 14: filer_pb.Locations.locations:type_name -> filer_pb.Location + 49, // 15: filer_pb.LookupVolumeResponse.locations_map:type_name -> filer_pb.LookupVolumeResponse.LocationsMapEntry + 27, // 16: filer_pb.CollectionListResponse.collections:type_name -> filer_pb.Collection + 6, // 17: filer_pb.SubscribeMetadataResponse.event_notification:type_name -> filer_pb.EventNotification + 50, // 18: filer_pb.LocateBrokerResponse.resources:type_name -> filer_pb.LocateBrokerResponse.Resource + 51, // 19: filer_pb.FilerConf.locations:type_name -> filer_pb.FilerConf.PathConf + 24, // 20: filer_pb.LookupVolumeResponse.LocationsMapEntry.value:type_name -> filer_pb.Locations + 0, // 21: filer_pb.SeaweedFiler.LookupDirectoryEntry:input_type -> filer_pb.LookupDirectoryEntryRequest + 2, // 22: filer_pb.SeaweedFiler.ListEntries:input_type -> filer_pb.ListEntriesRequest + 11, // 23: filer_pb.SeaweedFiler.CreateEntry:input_type -> filer_pb.CreateEntryRequest + 13, // 24: filer_pb.SeaweedFiler.UpdateEntry:input_type -> filer_pb.UpdateEntryRequest + 15, // 25: filer_pb.SeaweedFiler.AppendToEntry:input_type -> filer_pb.AppendToEntryRequest + 17, // 26: filer_pb.SeaweedFiler.DeleteEntry:input_type -> filer_pb.DeleteEntryRequest + 19, // 27: filer_pb.SeaweedFiler.AtomicRenameEntry:input_type -> filer_pb.AtomicRenameEntryRequest + 21, // 28: filer_pb.SeaweedFiler.AssignVolume:input_type -> filer_pb.AssignVolumeRequest + 23, // 29: filer_pb.SeaweedFiler.LookupVolume:input_type -> filer_pb.LookupVolumeRequest + 28, // 30: filer_pb.SeaweedFiler.CollectionList:input_type -> filer_pb.CollectionListRequest + 30, // 31: filer_pb.SeaweedFiler.DeleteCollection:input_type -> filer_pb.DeleteCollectionRequest + 32, // 32: filer_pb.SeaweedFiler.Statistics:input_type -> filer_pb.StatisticsRequest + 34, // 33: filer_pb.SeaweedFiler.GetFilerConfiguration:input_type -> filer_pb.GetFilerConfigurationRequest + 36, // 34: filer_pb.SeaweedFiler.SubscribeMetadata:input_type -> filer_pb.SubscribeMetadataRequest + 36, // 35: filer_pb.SeaweedFiler.SubscribeLocalMetadata:input_type -> filer_pb.SubscribeMetadataRequest + 39, // 36: filer_pb.SeaweedFiler.KeepConnected:input_type -> filer_pb.KeepConnectedRequest + 41, // 37: filer_pb.SeaweedFiler.LocateBroker:input_type -> filer_pb.LocateBrokerRequest + 43, // 38: filer_pb.SeaweedFiler.KvGet:input_type -> filer_pb.KvGetRequest + 45, // 39: filer_pb.SeaweedFiler.KvPut:input_type -> filer_pb.KvPutRequest + 1, // 40: filer_pb.SeaweedFiler.LookupDirectoryEntry:output_type -> filer_pb.LookupDirectoryEntryResponse + 3, // 41: filer_pb.SeaweedFiler.ListEntries:output_type -> filer_pb.ListEntriesResponse + 12, // 42: filer_pb.SeaweedFiler.CreateEntry:output_type -> filer_pb.CreateEntryResponse + 14, // 43: filer_pb.SeaweedFiler.UpdateEntry:output_type -> filer_pb.UpdateEntryResponse + 16, // 44: filer_pb.SeaweedFiler.AppendToEntry:output_type -> filer_pb.AppendToEntryResponse + 18, // 45: filer_pb.SeaweedFiler.DeleteEntry:output_type -> filer_pb.DeleteEntryResponse + 20, // 46: filer_pb.SeaweedFiler.AtomicRenameEntry:output_type -> filer_pb.AtomicRenameEntryResponse + 22, // 47: filer_pb.SeaweedFiler.AssignVolume:output_type -> filer_pb.AssignVolumeResponse + 26, // 48: filer_pb.SeaweedFiler.LookupVolume:output_type -> filer_pb.LookupVolumeResponse + 29, // 49: filer_pb.SeaweedFiler.CollectionList:output_type -> filer_pb.CollectionListResponse + 31, // 50: filer_pb.SeaweedFiler.DeleteCollection:output_type -> filer_pb.DeleteCollectionResponse + 33, // 51: filer_pb.SeaweedFiler.Statistics:output_type -> filer_pb.StatisticsResponse + 35, // 52: filer_pb.SeaweedFiler.GetFilerConfiguration:output_type -> filer_pb.GetFilerConfigurationResponse + 37, // 53: filer_pb.SeaweedFiler.SubscribeMetadata:output_type -> filer_pb.SubscribeMetadataResponse + 37, // 54: filer_pb.SeaweedFiler.SubscribeLocalMetadata:output_type -> filer_pb.SubscribeMetadataResponse + 40, // 55: filer_pb.SeaweedFiler.KeepConnected:output_type -> filer_pb.KeepConnectedResponse + 42, // 56: filer_pb.SeaweedFiler.LocateBroker:output_type -> filer_pb.LocateBrokerResponse + 44, // 57: filer_pb.SeaweedFiler.KvGet:output_type -> filer_pb.KvGetResponse + 46, // 58: filer_pb.SeaweedFiler.KvPut:output_type -> filer_pb.KvPutResponse + 40, // [40:59] is the sub-list for method output_type + 21, // [21:40] is the sub-list for method input_type + 21, // [21:21] is the sub-list for extension type_name + 21, // [21:21] is the sub-list for extension extendee + 0, // [0:21] is the sub-list for field type_name +} + +func init() { file_filer_proto_init() } +func file_filer_proto_init() { + if File_filer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_filer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupDirectoryEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupDirectoryEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FullEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventNotification); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileChunk); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileChunkManifest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileId); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FuseAttributes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendToEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppendToEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AtomicRenameEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AtomicRenameEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Locations); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFilerConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFilerConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeMetadataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeMetadataResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvGetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvGetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvPutRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KvPutResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilerConf); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocateBrokerResponse_Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filer_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilerConf_PathConf); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_filer_proto_rawDesc, + NumEnums: 0, + NumMessages: 52, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_filer_proto_goTypes, + DependencyIndexes: file_filer_proto_depIdxs, + MessageInfos: file_filer_proto_msgTypes, + }.Build() + File_filer_proto = out.File + file_filer_proto_rawDesc = nil + file_filer_proto_goTypes = nil + file_filer_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for SeaweedFiler service +const _ = grpc.SupportPackageIsVersion6 +// SeaweedFilerClient is the client API for SeaweedFiler service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type SeaweedFilerClient interface { LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) + AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) + CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) + SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) + SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) + KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) + LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) + KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error) + KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error) } type seaweedFilerClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewSeaweedFilerClient(cc *grpc.ClientConn) SeaweedFilerClient { +func NewSeaweedFilerClient(cc grpc.ClientConnInterface) SeaweedFilerClient { return &seaweedFilerClient{cc} } func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *LookupDirectoryEntryRequest, opts ...grpc.CallOption) (*LookupDirectoryEntryResponse, error) { out := new(LookupDirectoryEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupDirectoryEntry", in, out, opts...) if err != nil { return nil, err } @@ -1066,7 +4537,7 @@ func (c *seaweedFilerClient) LookupDirectoryEntry(ctx context.Context, in *Looku } func (c *seaweedFilerClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (SeaweedFiler_ListEntriesClient, error) { - stream, err := grpc.NewClientStream(ctx, &_SeaweedFiler_serviceDesc.Streams[0], c.cc, "/filer_pb.SeaweedFiler/ListEntries", opts...) + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[0], "/filer_pb.SeaweedFiler/ListEntries", opts...) if err != nil { return nil, err } @@ -1099,7 +4570,7 @@ func (x *seaweedFilerListEntriesClient) Recv() (*ListEntriesResponse, error) { func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryRequest, opts ...grpc.CallOption) (*CreateEntryResponse, error) { out := new(CreateEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CreateEntry", in, out, opts...) if err != nil { return nil, err } @@ -1108,7 +4579,16 @@ func (c *seaweedFilerClient) CreateEntry(ctx context.Context, in *CreateEntryReq func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryRequest, opts ...grpc.CallOption) (*UpdateEntryResponse, error) { out := new(UpdateEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/UpdateEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) AppendToEntry(ctx context.Context, in *AppendToEntryRequest, opts ...grpc.CallOption) (*AppendToEntryResponse, error) { + out := new(AppendToEntryResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AppendToEntry", in, out, opts...) if err != nil { return nil, err } @@ -1117,7 +4597,7 @@ func (c *seaweedFilerClient) UpdateEntry(ctx context.Context, in *UpdateEntryReq func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryRequest, opts ...grpc.CallOption) (*DeleteEntryResponse, error) { out := new(DeleteEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteEntry", in, out, opts...) if err != nil { return nil, err } @@ -1126,7 +4606,7 @@ func (c *seaweedFilerClient) DeleteEntry(ctx context.Context, in *DeleteEntryReq func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRenameEntryRequest, opts ...grpc.CallOption) (*AtomicRenameEntryResponse, error) { out := new(AtomicRenameEntryResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AtomicRenameEntry", in, out, opts...) if err != nil { return nil, err } @@ -1135,7 +4615,7 @@ func (c *seaweedFilerClient) AtomicRenameEntry(ctx context.Context, in *AtomicRe func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeRequest, opts ...grpc.CallOption) (*AssignVolumeResponse, error) { out := new(AssignVolumeResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/AssignVolume", in, out, opts...) if err != nil { return nil, err } @@ -1144,7 +4624,16 @@ func (c *seaweedFilerClient) AssignVolume(ctx context.Context, in *AssignVolumeR func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) { out := new(LookupVolumeResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LookupVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) { + out := new(CollectionListResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/CollectionList", in, out, opts...) if err != nil { return nil, err } @@ -1153,7 +4642,7 @@ func (c *seaweedFilerClient) LookupVolume(ctx context.Context, in *LookupVolumeR func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { out := new(DeleteCollectionResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/DeleteCollection", in, out, opts...) if err != nil { return nil, err } @@ -1162,7 +4651,7 @@ func (c *seaweedFilerClient) DeleteCollection(ctx context.Context, in *DeleteCol func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) { out := new(StatisticsResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/Statistics", in, out, opts...) if err != nil { return nil, err } @@ -1171,27 +4660,218 @@ func (c *seaweedFilerClient) Statistics(ctx context.Context, in *StatisticsReque func (c *seaweedFilerClient) GetFilerConfiguration(ctx context.Context, in *GetFilerConfigurationRequest, opts ...grpc.CallOption) (*GetFilerConfigurationResponse, error) { out := new(GetFilerConfigurationResponse) - err := grpc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/GetFilerConfiguration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) SubscribeMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeMetadataClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[1], "/filer_pb.SeaweedFiler/SubscribeMetadata", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerSubscribeMetadataClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedFiler_SubscribeMetadataClient interface { + Recv() (*SubscribeMetadataResponse, error) + grpc.ClientStream +} + +type seaweedFilerSubscribeMetadataClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerSubscribeMetadataClient) Recv() (*SubscribeMetadataResponse, error) { + m := new(SubscribeMetadataResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedFilerClient) SubscribeLocalMetadata(ctx context.Context, in *SubscribeMetadataRequest, opts ...grpc.CallOption) (SeaweedFiler_SubscribeLocalMetadataClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[2], "/filer_pb.SeaweedFiler/SubscribeLocalMetadata", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerSubscribeLocalMetadataClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SeaweedFiler_SubscribeLocalMetadataClient interface { + Recv() (*SubscribeMetadataResponse, error) + grpc.ClientStream +} + +type seaweedFilerSubscribeLocalMetadataClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerSubscribeLocalMetadataClient) Recv() (*SubscribeMetadataResponse, error) { + m := new(SubscribeMetadataResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedFilerClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (SeaweedFiler_KeepConnectedClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedFiler_serviceDesc.Streams[3], "/filer_pb.SeaweedFiler/KeepConnected", opts...) + if err != nil { + return nil, err + } + x := &seaweedFilerKeepConnectedClient{stream} + return x, nil +} + +type SeaweedFiler_KeepConnectedClient interface { + Send(*KeepConnectedRequest) error + Recv() (*KeepConnectedResponse, error) + grpc.ClientStream +} + +type seaweedFilerKeepConnectedClient struct { + grpc.ClientStream +} + +func (x *seaweedFilerKeepConnectedClient) Send(m *KeepConnectedRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedFilerKeepConnectedClient) Recv() (*KeepConnectedResponse, error) { + m := new(KeepConnectedResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedFilerClient) LocateBroker(ctx context.Context, in *LocateBrokerRequest, opts ...grpc.CallOption) (*LocateBrokerResponse, error) { + out := new(LocateBrokerResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/LocateBroker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedFilerClient) KvGet(ctx context.Context, in *KvGetRequest, opts ...grpc.CallOption) (*KvGetResponse, error) { + out := new(KvGetResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/KvGet", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for SeaweedFiler service +func (c *seaweedFilerClient) KvPut(ctx context.Context, in *KvPutRequest, opts ...grpc.CallOption) (*KvPutResponse, error) { + out := new(KvPutResponse) + err := c.cc.Invoke(ctx, "/filer_pb.SeaweedFiler/KvPut", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} +// SeaweedFilerServer is the server API for SeaweedFiler service. type SeaweedFilerServer interface { LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) + AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) + CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) + SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error + SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error + KeepConnected(SeaweedFiler_KeepConnectedServer) error + LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) + KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error) + KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error) +} + +// UnimplementedSeaweedFilerServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedFilerServer struct { +} + +func (*UnimplementedSeaweedFilerServer) LookupDirectoryEntry(context.Context, *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupDirectoryEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) ListEntries(*ListEntriesRequest, SeaweedFiler_ListEntriesServer) error { + return status.Errorf(codes.Unimplemented, "method ListEntries not implemented") +} +func (*UnimplementedSeaweedFilerServer) CreateEntry(context.Context, *CreateEntryRequest) (*CreateEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) UpdateEntry(context.Context, *UpdateEntryRequest) (*UpdateEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) AppendToEntry(context.Context, *AppendToEntryRequest) (*AppendToEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AppendToEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) DeleteEntry(context.Context, *DeleteEntryRequest) (*DeleteEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) AtomicRenameEntry(context.Context, *AtomicRenameEntryRequest) (*AtomicRenameEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AtomicRenameEntry not implemented") +} +func (*UnimplementedSeaweedFilerServer) AssignVolume(context.Context, *AssignVolumeRequest) (*AssignVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AssignVolume not implemented") +} +func (*UnimplementedSeaweedFilerServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupVolume not implemented") +} +func (*UnimplementedSeaweedFilerServer) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CollectionList not implemented") +} +func (*UnimplementedSeaweedFilerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented") +} +func (*UnimplementedSeaweedFilerServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented") +} +func (*UnimplementedSeaweedFilerServer) GetFilerConfiguration(context.Context, *GetFilerConfigurationRequest) (*GetFilerConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFilerConfiguration not implemented") +} +func (*UnimplementedSeaweedFilerServer) SubscribeMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeMetadataServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeMetadata not implemented") +} +func (*UnimplementedSeaweedFilerServer) SubscribeLocalMetadata(*SubscribeMetadataRequest, SeaweedFiler_SubscribeLocalMetadataServer) error { + return status.Errorf(codes.Unimplemented, "method SubscribeLocalMetadata not implemented") +} +func (*UnimplementedSeaweedFilerServer) KeepConnected(SeaweedFiler_KeepConnectedServer) error { + return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented") +} +func (*UnimplementedSeaweedFilerServer) LocateBroker(context.Context, *LocateBrokerRequest) (*LocateBrokerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LocateBroker not implemented") +} +func (*UnimplementedSeaweedFilerServer) KvGet(context.Context, *KvGetRequest) (*KvGetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method KvGet not implemented") +} +func (*UnimplementedSeaweedFilerServer) KvPut(context.Context, *KvPutRequest) (*KvPutResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method KvPut not implemented") } func RegisterSeaweedFilerServer(s *grpc.Server, srv SeaweedFilerServer) { @@ -1273,6 +4953,24 @@ func _SeaweedFiler_UpdateEntry_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_AppendToEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AppendToEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).AppendToEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/AppendToEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).AppendToEntry(ctx, req.(*AppendToEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _SeaweedFiler_DeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteEntryRequest) if err := dec(in); err != nil { @@ -1345,6 +5043,24 @@ func _SeaweedFiler_LookupVolume_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_CollectionList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CollectionListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).CollectionList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/CollectionList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).CollectionList(ctx, req.(*CollectionListRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _SeaweedFiler_DeleteCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteCollectionRequest) if err := dec(in); err != nil { @@ -1399,6 +5115,128 @@ func _SeaweedFiler_GetFilerConfiguration_Handler(srv interface{}, ctx context.Co return interceptor(ctx, in, info, handler) } +func _SeaweedFiler_SubscribeMetadata_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeMetadataRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SeaweedFilerServer).SubscribeMetadata(m, &seaweedFilerSubscribeMetadataServer{stream}) +} + +type SeaweedFiler_SubscribeMetadataServer interface { + Send(*SubscribeMetadataResponse) error + grpc.ServerStream +} + +type seaweedFilerSubscribeMetadataServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerSubscribeMetadataServer) Send(m *SubscribeMetadataResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SeaweedFiler_SubscribeLocalMetadata_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeMetadataRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SeaweedFilerServer).SubscribeLocalMetadata(m, &seaweedFilerSubscribeLocalMetadataServer{stream}) +} + +type SeaweedFiler_SubscribeLocalMetadataServer interface { + Send(*SubscribeMetadataResponse) error + grpc.ServerStream +} + +type seaweedFilerSubscribeLocalMetadataServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerSubscribeLocalMetadataServer) Send(m *SubscribeMetadataResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SeaweedFiler_KeepConnected_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedFilerServer).KeepConnected(&seaweedFilerKeepConnectedServer{stream}) +} + +type SeaweedFiler_KeepConnectedServer interface { + Send(*KeepConnectedResponse) error + Recv() (*KeepConnectedRequest, error) + grpc.ServerStream +} + +type seaweedFilerKeepConnectedServer struct { + grpc.ServerStream +} + +func (x *seaweedFilerKeepConnectedServer) Send(m *KeepConnectedResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedFilerKeepConnectedServer) Recv() (*KeepConnectedRequest, error) { + m := new(KeepConnectedRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedFiler_LocateBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LocateBrokerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).LocateBroker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/LocateBroker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).LocateBroker(ctx, req.(*LocateBrokerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedFiler_KvGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KvGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).KvGet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/KvGet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).KvGet(ctx, req.(*KvGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedFiler_KvPut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KvPutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedFilerServer).KvPut(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/filer_pb.SeaweedFiler/KvPut", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedFilerServer).KvPut(ctx, req.(*KvPutRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ ServiceName: "filer_pb.SeaweedFiler", HandlerType: (*SeaweedFilerServer)(nil), @@ -1415,6 +5253,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "UpdateEntry", Handler: _SeaweedFiler_UpdateEntry_Handler, }, + { + MethodName: "AppendToEntry", + Handler: _SeaweedFiler_AppendToEntry_Handler, + }, { MethodName: "DeleteEntry", Handler: _SeaweedFiler_DeleteEntry_Handler, @@ -1431,6 +5273,10 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "LookupVolume", Handler: _SeaweedFiler_LookupVolume_Handler, }, + { + MethodName: "CollectionList", + Handler: _SeaweedFiler_CollectionList_Handler, + }, { MethodName: "DeleteCollection", Handler: _SeaweedFiler_DeleteCollection_Handler, @@ -1443,6 +5289,18 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ MethodName: "GetFilerConfiguration", Handler: _SeaweedFiler_GetFilerConfiguration_Handler, }, + { + MethodName: "LocateBroker", + Handler: _SeaweedFiler_LocateBroker_Handler, + }, + { + MethodName: "KvGet", + Handler: _SeaweedFiler_KvGet_Handler, + }, + { + MethodName: "KvPut", + Handler: _SeaweedFiler_KvPut_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1450,113 +5308,22 @@ var _SeaweedFiler_serviceDesc = grpc.ServiceDesc{ Handler: _SeaweedFiler_ListEntries_Handler, ServerStreams: true, }, + { + StreamName: "SubscribeMetadata", + Handler: _SeaweedFiler_SubscribeMetadata_Handler, + ServerStreams: true, + }, + { + StreamName: "SubscribeLocalMetadata", + Handler: _SeaweedFiler_SubscribeLocalMetadata_Handler, + ServerStreams: true, + }, + { + StreamName: "KeepConnected", + Handler: _SeaweedFiler_KeepConnected_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "filer.proto", } - -func init() { proto.RegisterFile("filer.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1603 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcd, 0x6f, 0xdc, 0x44, - 0x14, 0x8f, 0xf7, 0xdb, 0x6f, 0x77, 0xdb, 0x64, 0x92, 0xb6, 0xdb, 0xcd, 0x07, 0xa9, 0x43, 0x4b, - 0x10, 0x55, 0xa8, 0x42, 0x0f, 0x2d, 0x85, 0x43, 0x9b, 0x0f, 0x14, 0x91, 0x7e, 0xc8, 0x69, 0x11, - 0x08, 0x09, 0xcb, 0xb1, 0x67, 0x37, 0x43, 0x6c, 0xcf, 0x32, 0x1e, 0x27, 0x29, 0x7f, 0x02, 0x47, - 0x8e, 0x48, 0x9c, 0xf9, 0x27, 0x10, 0x17, 0x84, 0xf8, 0x6f, 0x38, 0x72, 0x46, 0x33, 0x63, 0x7b, - 0xc7, 0xeb, 0x4d, 0xd2, 0x0a, 0xf5, 0xe6, 0x79, 0xdf, 0xef, 0xcd, 0x7b, 0xbf, 0x37, 0xbb, 0xd0, - 0x1e, 0x90, 0x00, 0xb3, 0x8d, 0x11, 0xa3, 0x9c, 0xa2, 0x96, 0x3c, 0x38, 0xa3, 0x43, 0xeb, 0x39, - 0x2c, 0xee, 0x53, 0x7a, 0x9c, 0x8c, 0xb6, 0x09, 0xc3, 0x1e, 0xa7, 0xec, 0xf5, 0x4e, 0xc4, 0xd9, - 0x6b, 0x1b, 0xff, 0x90, 0xe0, 0x98, 0xa3, 0x25, 0x30, 0xfd, 0x8c, 0xd1, 0x33, 0x56, 0x8d, 0x75, - 0xd3, 0x1e, 0x13, 0x10, 0x82, 0x5a, 0xe4, 0x86, 0xb8, 0x57, 0x91, 0x0c, 0xf9, 0x6d, 0xed, 0xc0, - 0xd2, 0x74, 0x83, 0xf1, 0x88, 0x46, 0x31, 0x46, 0xb7, 0xa1, 0x8e, 0x05, 0x41, 0x5a, 0x6b, 0x6f, - 0x5e, 0xdd, 0xc8, 0x42, 0xd9, 0x50, 0x72, 0x8a, 0x6b, 0xfd, 0x61, 0x00, 0xda, 0x27, 0x31, 0x17, - 0x44, 0x82, 0xe3, 0x37, 0x8b, 0xe7, 0x3a, 0x34, 0x46, 0x0c, 0x0f, 0xc8, 0x59, 0x1a, 0x51, 0x7a, - 0x42, 0x77, 0x61, 0x2e, 0xe6, 0x2e, 0xe3, 0xbb, 0x8c, 0x86, 0xbb, 0x24, 0xc0, 0xcf, 0x44, 0xd0, - 0x55, 0x29, 0x52, 0x66, 0xa0, 0x0d, 0x40, 0x24, 0xf2, 0x82, 0x24, 0x26, 0x27, 0xf8, 0x20, 0xe3, - 0xf6, 0x6a, 0xab, 0xc6, 0x7a, 0xcb, 0x9e, 0xc2, 0x41, 0x0b, 0x50, 0x0f, 0x48, 0x48, 0x78, 0xaf, - 0xbe, 0x6a, 0xac, 0x77, 0x6d, 0x75, 0xb0, 0x3e, 0x83, 0xf9, 0x42, 0xfc, 0x6f, 0x97, 0xfe, 0xaf, - 0x15, 0xa8, 0x4b, 0x42, 0x5e, 0x63, 0x63, 0x5c, 0x63, 0x74, 0x0b, 0x3a, 0x24, 0x76, 0xc6, 0x85, - 0xa8, 0xc8, 0xd8, 0xda, 0x24, 0xce, 0x6b, 0x8e, 0x3e, 0x82, 0x86, 0x77, 0x94, 0x44, 0xc7, 0x71, - 0xaf, 0xba, 0x5a, 0x5d, 0x6f, 0x6f, 0xce, 0x8f, 0x1d, 0x89, 0x44, 0xb7, 0x04, 0xcf, 0x4e, 0x45, - 0xd0, 0x03, 0x00, 0x97, 0x73, 0x46, 0x0e, 0x13, 0x8e, 0x63, 0x99, 0x69, 0x7b, 0xb3, 0xa7, 0x29, - 0x24, 0x31, 0x7e, 0x9c, 0xf3, 0x6d, 0x4d, 0x16, 0x3d, 0x84, 0x16, 0x3e, 0xe3, 0x38, 0xf2, 0xb1, - 0xdf, 0xab, 0x4b, 0x47, 0xcb, 0x13, 0x19, 0x6d, 0xec, 0xa4, 0x7c, 0x95, 0x5f, 0x2e, 0xde, 0x7f, - 0x04, 0xdd, 0x02, 0x0b, 0xcd, 0x42, 0xf5, 0x18, 0x67, 0xb7, 0x2a, 0x3e, 0x45, 0x65, 0x4f, 0xdc, - 0x20, 0x51, 0x0d, 0xd6, 0xb1, 0xd5, 0xe1, 0xd3, 0xca, 0x03, 0xc3, 0xda, 0x06, 0x73, 0x37, 0x09, - 0x82, 0x5c, 0xd1, 0x27, 0x2c, 0x53, 0xf4, 0x09, 0x1b, 0x57, 0xb9, 0x72, 0x61, 0x95, 0x7f, 0x37, - 0x60, 0x6e, 0xe7, 0x04, 0x47, 0xfc, 0x19, 0xe5, 0x64, 0x40, 0x3c, 0x97, 0x13, 0x1a, 0xa1, 0xbb, - 0x60, 0xd2, 0xc0, 0x77, 0x2e, 0xbc, 0xa6, 0x16, 0x0d, 0xd2, 0xa8, 0xef, 0x82, 0x19, 0xe1, 0x53, - 0xe7, 0x42, 0x77, 0xad, 0x08, 0x9f, 0x2a, 0xe9, 0x35, 0xe8, 0xfa, 0x38, 0xc0, 0x1c, 0x3b, 0xf9, - 0xed, 0x88, 0xab, 0xeb, 0x28, 0xe2, 0x96, 0xba, 0x8e, 0x3b, 0x70, 0x55, 0x98, 0x1c, 0xb9, 0x0c, - 0x47, 0xdc, 0x19, 0xb9, 0xfc, 0x48, 0xde, 0x89, 0x69, 0x77, 0x23, 0x7c, 0xfa, 0x42, 0x52, 0x5f, - 0xb8, 0xfc, 0xc8, 0xfa, 0xd7, 0x00, 0x33, 0xbf, 0x4c, 0x74, 0x03, 0x9a, 0xc2, 0xad, 0x43, 0xfc, - 0xb4, 0x12, 0x0d, 0x71, 0xdc, 0xf3, 0xc5, 0x54, 0xd0, 0xc1, 0x20, 0xc6, 0x5c, 0x86, 0x57, 0xb5, - 0xd3, 0x93, 0xe8, 0xac, 0x98, 0xfc, 0xa8, 0x06, 0xa1, 0x66, 0xcb, 0x6f, 0x51, 0xf1, 0x90, 0x93, - 0x10, 0x4b, 0x87, 0x55, 0x5b, 0x1d, 0xd0, 0x3c, 0xd4, 0xb1, 0xc3, 0xdd, 0xa1, 0xec, 0x70, 0xd3, - 0xae, 0xe1, 0x97, 0xee, 0x10, 0xbd, 0x0f, 0x57, 0x62, 0x9a, 0x30, 0x0f, 0x3b, 0x99, 0xdb, 0x86, - 0xe4, 0x76, 0x14, 0x75, 0x57, 0x39, 0xb7, 0xa0, 0x3a, 0x20, 0x7e, 0xaf, 0x29, 0x0b, 0x33, 0x5b, - 0x6c, 0xc2, 0x3d, 0xdf, 0x16, 0x4c, 0xf4, 0x31, 0x40, 0x6e, 0xc9, 0xef, 0xb5, 0xce, 0x11, 0x35, - 0x33, 0xbb, 0xbe, 0xf5, 0x35, 0x34, 0x52, 0xf3, 0x8b, 0x60, 0x9e, 0xd0, 0x20, 0x09, 0xf3, 0xb4, - 0xbb, 0x76, 0x4b, 0x11, 0xf6, 0x7c, 0x74, 0x13, 0x24, 0xce, 0x39, 0xa2, 0xab, 0x2a, 0x32, 0x49, - 0x59, 0xa1, 0x2f, 0xb1, 0x44, 0x0a, 0x8f, 0xd2, 0x63, 0xa2, 0xb2, 0x6f, 0xda, 0xe9, 0xc9, 0xfa, - 0xa7, 0x02, 0x57, 0x8a, 0xed, 0x2e, 0x5c, 0x48, 0x2b, 0xb2, 0x56, 0x86, 0x34, 0x23, 0xcd, 0x1e, - 0x14, 0xea, 0x55, 0xd1, 0xeb, 0x95, 0xa9, 0x84, 0xd4, 0x57, 0x0e, 0xba, 0x4a, 0xe5, 0x29, 0xf5, - 0xb1, 0xe8, 0xd6, 0x84, 0xf8, 0xb2, 0xc0, 0x5d, 0x5b, 0x7c, 0x0a, 0xca, 0x90, 0xf8, 0x29, 0x7c, - 0x88, 0x4f, 0x19, 0x1e, 0x93, 0x76, 0x1b, 0xea, 0xca, 0xd4, 0x49, 0x5c, 0x59, 0x28, 0xa8, 0x4d, - 0x75, 0x0f, 0xe2, 0x1b, 0xad, 0x42, 0x9b, 0xe1, 0x51, 0x90, 0x76, 0xaf, 0x2c, 0x9f, 0x69, 0xeb, - 0x24, 0xb4, 0x02, 0xe0, 0xd1, 0x20, 0xc0, 0x9e, 0x14, 0x30, 0xa5, 0x80, 0x46, 0x11, 0x9d, 0xc3, - 0x79, 0xe0, 0xc4, 0xd8, 0xeb, 0xc1, 0xaa, 0xb1, 0x5e, 0xb7, 0x1b, 0x9c, 0x07, 0x07, 0xd8, 0x13, - 0x79, 0x24, 0x31, 0x66, 0x8e, 0x04, 0xa0, 0xb6, 0xd4, 0x6b, 0x09, 0x82, 0x84, 0xc9, 0x65, 0x80, - 0x21, 0xa3, 0xc9, 0x48, 0x71, 0x3b, 0xab, 0x55, 0x81, 0xc5, 0x92, 0x22, 0xd9, 0xb7, 0xe1, 0x4a, - 0xfc, 0x3a, 0x0c, 0x48, 0x74, 0xec, 0x70, 0x97, 0x0d, 0x31, 0xef, 0x75, 0x55, 0x0f, 0xa7, 0xd4, - 0x97, 0x92, 0x68, 0x7d, 0x03, 0x68, 0x8b, 0x61, 0x97, 0xe3, 0xb7, 0x58, 0x3b, 0x6f, 0x38, 0xdd, - 0xd7, 0x60, 0xbe, 0x60, 0x5a, 0x21, 0xb0, 0xf0, 0xf8, 0x6a, 0xe4, 0xbf, 0x2b, 0x8f, 0x05, 0xd3, - 0xa9, 0xc7, 0xbf, 0x0c, 0x40, 0xdb, 0x72, 0xc0, 0xff, 0xdf, 0x6e, 0x15, 0x23, 0x27, 0x70, 0x5f, - 0x01, 0x88, 0xef, 0x72, 0x37, 0xdd, 0x4a, 0x1d, 0x12, 0x2b, 0xfb, 0xdb, 0x2e, 0x77, 0xd3, 0xed, - 0xc0, 0xb0, 0x97, 0x30, 0xb1, 0xa8, 0x64, 0x5f, 0xc9, 0xed, 0x60, 0x67, 0x24, 0x74, 0x1f, 0xae, - 0x93, 0x61, 0x44, 0x19, 0x1e, 0x8b, 0x39, 0x98, 0x31, 0xca, 0x64, 0xbf, 0xb5, 0xec, 0x05, 0xc5, - 0xcd, 0x15, 0x76, 0x04, 0x4f, 0xa4, 0x57, 0x48, 0x23, 0x4d, 0xef, 0x17, 0x03, 0x7a, 0x8f, 0x39, - 0x0d, 0x89, 0x67, 0x63, 0x11, 0x66, 0x21, 0xc9, 0x35, 0xe8, 0x0a, 0x30, 0x9d, 0x4c, 0xb4, 0x43, - 0x03, 0x7f, 0xbc, 0xac, 0x6e, 0x82, 0xc0, 0x53, 0x47, 0xcb, 0xb7, 0x49, 0x03, 0x5f, 0xb6, 0xd1, - 0x1a, 0x08, 0xd0, 0xd3, 0xf4, 0xd5, 0xda, 0xee, 0x44, 0xf8, 0xb4, 0xa0, 0x2f, 0x84, 0xa4, 0xbe, - 0x42, 0xca, 0x66, 0x84, 0x4f, 0x85, 0xbe, 0xb5, 0x08, 0x37, 0xa7, 0xc4, 0x96, 0x46, 0xfe, 0x9b, - 0x01, 0xf3, 0x8f, 0xe3, 0x98, 0x0c, 0xa3, 0xaf, 0x24, 0x66, 0x64, 0x41, 0x2f, 0x40, 0xdd, 0xa3, - 0x49, 0xc4, 0x65, 0xb0, 0x75, 0x5b, 0x1d, 0x26, 0xc6, 0xa8, 0x52, 0x1a, 0xa3, 0x89, 0x41, 0xac, - 0x96, 0x07, 0x51, 0x1b, 0xb4, 0x5a, 0x61, 0xd0, 0xde, 0x83, 0xb6, 0xb8, 0x4e, 0xc7, 0xc3, 0x11, - 0xc7, 0x2c, 0x85, 0x59, 0x10, 0xa4, 0x2d, 0x49, 0xb1, 0x7e, 0x32, 0x60, 0xa1, 0x18, 0x69, 0xfa, - 0x9e, 0x38, 0x17, 0xf5, 0x05, 0xcc, 0xb0, 0x20, 0x0d, 0x53, 0x7c, 0x8a, 0x81, 0x1d, 0x25, 0x87, - 0x01, 0xf1, 0x1c, 0xc1, 0x50, 0xe1, 0x99, 0x8a, 0xf2, 0x8a, 0x05, 0xe3, 0xa4, 0x6b, 0x7a, 0xd2, - 0x08, 0x6a, 0x6e, 0xc2, 0x8f, 0x32, 0xe4, 0x17, 0xdf, 0xd6, 0x7d, 0x98, 0x57, 0x4f, 0xbc, 0x62, - 0xd5, 0x96, 0x01, 0x72, 0x2c, 0x8e, 0x7b, 0x86, 0x02, 0x84, 0x0c, 0x8c, 0x63, 0xeb, 0x73, 0x30, - 0xf7, 0xa9, 0x2a, 0x44, 0x8c, 0xee, 0x81, 0x19, 0x64, 0x07, 0x29, 0xda, 0xde, 0x44, 0xe3, 0xa1, - 0xca, 0xe4, 0xec, 0xb1, 0x90, 0xf5, 0x08, 0x5a, 0x19, 0x39, 0xcb, 0xcd, 0x38, 0x2f, 0xb7, 0xca, - 0x44, 0x6e, 0xd6, 0x9f, 0x06, 0x2c, 0x14, 0x43, 0x4e, 0xcb, 0xf7, 0x0a, 0xba, 0xb9, 0x0b, 0x27, - 0x74, 0x47, 0x69, 0x2c, 0xf7, 0xf4, 0x58, 0xca, 0x6a, 0x79, 0x80, 0xf1, 0x53, 0x77, 0xa4, 0x5a, - 0xaa, 0x13, 0x68, 0xa4, 0xfe, 0x4b, 0x98, 0x2b, 0x89, 0x4c, 0x79, 0xdf, 0x7c, 0xa8, 0xbf, 0x6f, - 0x0a, 0x6f, 0xb4, 0x5c, 0x5b, 0x7f, 0xf4, 0x3c, 0x84, 0x1b, 0x6a, 0xfe, 0xb6, 0xf2, 0xa6, 0xcb, - 0x6a, 0x5f, 0xec, 0x4d, 0x63, 0xb2, 0x37, 0xad, 0x3e, 0xf4, 0xca, 0xaa, 0xe9, 0x14, 0x0c, 0x61, - 0xee, 0x80, 0xbb, 0x9c, 0xc4, 0x9c, 0x78, 0xf9, 0x43, 0x7b, 0xa2, 0x99, 0x8d, 0xcb, 0xb6, 0x4a, - 0x79, 0x1c, 0x66, 0xa1, 0xca, 0x79, 0xd6, 0x67, 0xe2, 0x53, 0xdc, 0x02, 0xd2, 0x3d, 0xa5, 0x77, - 0xf0, 0x0e, 0x5c, 0x89, 0x7e, 0xe0, 0x94, 0xbb, 0x81, 0xda, 0xda, 0x35, 0xb9, 0xb5, 0x4d, 0x49, - 0x91, 0x6b, 0x5b, 0x2d, 0x36, 0x5f, 0x71, 0xeb, 0x6a, 0xa7, 0x0b, 0x82, 0x64, 0x2e, 0x03, 0xc8, - 0x91, 0x52, 0xd3, 0xd0, 0x50, 0xba, 0x82, 0xb2, 0x25, 0x08, 0xd6, 0x0a, 0x2c, 0x7d, 0x81, 0xb9, - 0x78, 0x7f, 0xb0, 0x2d, 0x1a, 0x0d, 0xc8, 0x30, 0x61, 0xae, 0x76, 0x15, 0xd6, 0xcf, 0x06, 0x2c, - 0x9f, 0x23, 0x90, 0x26, 0xdc, 0x83, 0x66, 0xe8, 0xc6, 0x1c, 0xb3, 0x6c, 0x4a, 0xb2, 0xe3, 0x64, - 0x29, 0x2a, 0x97, 0x95, 0xa2, 0x5a, 0x2a, 0xc5, 0x35, 0x68, 0x84, 0xee, 0x99, 0x13, 0x1e, 0xa6, - 0x0f, 0x8c, 0x7a, 0xe8, 0x9e, 0x3d, 0x3d, 0xdc, 0xfc, 0xbb, 0x09, 0x9d, 0x03, 0xec, 0x9e, 0x62, - 0xec, 0xcb, 0xc0, 0xd0, 0x30, 0x1b, 0x88, 0xe2, 0xcf, 0x34, 0x74, 0x7b, 0xb2, 0xf3, 0xa7, 0xfe, - 0x2e, 0xec, 0xdf, 0xb9, 0x4c, 0x2c, 0xed, 0xad, 0x19, 0xf4, 0x0c, 0xda, 0xda, 0xef, 0x20, 0xb4, - 0xa4, 0x29, 0x96, 0x7e, 0xde, 0xf5, 0x97, 0xcf, 0xe1, 0x66, 0xd6, 0xee, 0x19, 0x68, 0x1f, 0xda, - 0xda, 0x56, 0xd7, 0xed, 0x95, 0xdf, 0x11, 0xba, 0xbd, 0x69, 0x4f, 0x81, 0x19, 0x61, 0x4d, 0xdb, - 0xd8, 0xba, 0xb5, 0xf2, 0x1b, 0x41, 0xb7, 0x36, 0x6d, 0xcd, 0x4b, 0x6b, 0xda, 0x82, 0xd4, 0xad, - 0x95, 0xd7, 0xbf, 0x6e, 0x6d, 0xda, 0x56, 0x9d, 0x41, 0xdf, 0xc1, 0x5c, 0x69, 0x75, 0x21, 0x6b, - 0xac, 0x75, 0xde, 0xce, 0xed, 0xaf, 0x5d, 0x28, 0x93, 0xdb, 0x7f, 0x0e, 0x1d, 0x7d, 0xa5, 0x20, - 0x2d, 0xa0, 0x29, 0x4b, 0xb1, 0xbf, 0x72, 0x1e, 0x5b, 0x37, 0xa8, 0xa3, 0xa5, 0x6e, 0x70, 0xca, - 0xbe, 0xd0, 0x0d, 0x4e, 0x03, 0x59, 0x6b, 0x06, 0x7d, 0x0b, 0xb3, 0x93, 0xa8, 0x85, 0x6e, 0x4d, - 0x96, 0xad, 0x04, 0x86, 0x7d, 0xeb, 0x22, 0x91, 0xdc, 0xf8, 0x1e, 0xc0, 0x18, 0x8c, 0xd0, 0xe2, - 0x58, 0xa7, 0x04, 0x86, 0xfd, 0xa5, 0xe9, 0xcc, 0xdc, 0xd4, 0xf7, 0x70, 0x6d, 0xea, 0xc4, 0x23, - 0x6d, 0x4c, 0x2e, 0xc2, 0x8c, 0xfe, 0x07, 0x97, 0xca, 0x65, 0xbe, 0x9e, 0xac, 0xc0, 0x6c, 0xac, - 0x06, 0x79, 0x10, 0x6f, 0x78, 0x01, 0xc1, 0x11, 0x7f, 0x02, 0x52, 0xe3, 0x05, 0xa3, 0x9c, 0x1e, - 0x36, 0xe4, 0x3f, 0x3c, 0x9f, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0x14, 0x43, 0x9d, 0xb9, 0xf0, - 0x11, 0x00, 0x00, -} diff --git a/weed/pb/filer_pb/filer_client.go b/weed/pb/filer_pb/filer_client.go new file mode 100644 index 000000000..65bd85c84 --- /dev/null +++ b/weed/pb/filer_pb/filer_client.go @@ -0,0 +1,299 @@ +package filer_pb + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "os" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" +) + +var ( + OS_UID = uint32(os.Getuid()) + OS_GID = uint32(os.Getgid()) +) + +type FilerClient interface { + WithFilerClient(fn func(SeaweedFilerClient) error) error + AdjustedUrl(location *Location) string +} + +func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry, err error) { + + dir, name := fullFilePath.DirAndName() + + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &LookupDirectoryEntryRequest{ + Directory: dir, + Name: name, + } + + // glog.V(3).Infof("read %s request: %v", fullFilePath, request) + resp, err := LookupEntry(client, request) + if err != nil { + if err == ErrNotFound { + return nil + } + glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err) + return err + } + + if resp.Entry == nil { + // glog.V(3).Infof("read %s entry: %v", fullFilePath, entry) + return nil + } + + entry = resp.Entry + return nil + }) + + return +} + +type EachEntryFunciton func(entry *Entry, isLast bool) error + +func ReadDirAllEntries(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton) (err error) { + + var counter uint32 + var startFrom string + var counterFunc = func(entry *Entry, isLast bool) error { + counter++ + startFrom = entry.Name + return fn(entry, isLast) + } + + var paginationLimit uint32 = 10000 + + if err = doList(filerClient, fullDirPath, prefix, counterFunc, "", false, paginationLimit); err != nil { + return err + } + + for counter == paginationLimit { + counter = 0 + if err = doList(filerClient, fullDirPath, prefix, counterFunc, startFrom, false, paginationLimit); err != nil { + return err + } + } + + return nil +} + +func List(filerClient FilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + return doSeaweedList(client, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit) + }) +} + +func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + return doSeaweedList(client, fullDirPath, prefix, fn, startFrom, inclusive, limit) + }) +} + +func SeaweedList(client SeaweedFilerClient, parentDirectoryPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + return doSeaweedList(client, util.FullPath(parentDirectoryPath), prefix, fn, startFrom, inclusive, limit) +} + +func doSeaweedList(client SeaweedFilerClient, fullDirPath util.FullPath, prefix string, fn EachEntryFunciton, startFrom string, inclusive bool, limit uint32) (err error) { + // Redundancy limit to make it correctly judge whether it is the last file. + redLimit := limit + if limit != math.MaxInt32 && limit != 0 { + redLimit = limit + 1 + } + request := &ListEntriesRequest{ + Directory: string(fullDirPath), + Prefix: prefix, + StartFromFileName: startFrom, + Limit: redLimit, + InclusiveStartFrom: inclusive, + } + + glog.V(4).Infof("read directory: %v", request) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.ListEntries(ctx, request) + if err != nil { + return fmt.Errorf("list %s: %v", fullDirPath, err) + } + + var prevEntry *Entry + count := 0 + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + if prevEntry != nil { + if err := fn(prevEntry, true); err != nil { + return err + } + } + break + } else { + return recvErr + } + } + if prevEntry != nil { + if err := fn(prevEntry, false); err != nil { + return err + } + } + prevEntry = resp.Entry + count++ + if count > int(limit) && limit != 0 { + prevEntry = nil + } + } + + return nil +} + +func Exists(filerClient FilerClient, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { + + err = filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &LookupDirectoryEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + } + + glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) + resp, err := LookupEntry(client, request) + if err != nil { + if err == ErrNotFound { + exists = false + return nil + } + glog.V(0).Infof("exists entry %v: %v", request, err) + return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) + } + + exists = resp.Entry.IsDirectory == isDirectory + + return nil + }) + + return +} + +func Touch(filerClient FilerClient, parentDirectoryPath string, entryName string, entry *Entry) (err error) { + + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + request := &UpdateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(4).Infof("touch entry %v/%v: %v", parentDirectoryPath, entryName, request) + if err := UpdateEntry(client, request); err != nil { + glog.V(0).Infof("touch exists entry %v: %v", request, err) + return fmt.Errorf("touch exists entry %s/%s: %v", parentDirectoryPath, entryName, err) + } + + return nil + }) + +} + +func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string, fn func(entry *Entry)) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + entry := &Entry{ + Name: dirName, + IsDirectory: true, + Attributes: &FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0777 | os.ModeDir), + Uid: OS_UID, + Gid: OS_GID, + }, + } + + if fn != nil { + fn(entry) + } + + request := &CreateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(1).Infof("mkdir: %v", request) + if err := CreateEntry(client, request); err != nil { + glog.V(0).Infof("mkdir %v: %v", request, err) + return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) + } + + return nil + }) +} + +func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string, chunks []*FileChunk) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + entry := &Entry{ + Name: fileName, + IsDirectory: false, + Attributes: &FuseAttributes{ + Mtime: time.Now().Unix(), + Crtime: time.Now().Unix(), + FileMode: uint32(0770), + Uid: OS_UID, + Gid: OS_GID, + }, + Chunks: chunks, + } + + request := &CreateEntryRequest{ + Directory: parentDirectoryPath, + Entry: entry, + } + + glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) + if err := CreateEntry(client, request); err != nil { + glog.V(0).Infof("create file %v:%v", request, err) + return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) + } + + return nil + }) +} + +func Remove(filerClient FilerClient, parentDirectoryPath, name string, isDeleteData, isRecursive, ignoreRecursiveErr, isFromOtherCluster bool, signatures []int32) error { + return filerClient.WithFilerClient(func(client SeaweedFilerClient) error { + + deleteEntryRequest := &DeleteEntryRequest{ + Directory: parentDirectoryPath, + Name: name, + IsDeleteData: isDeleteData, + IsRecursive: isRecursive, + IgnoreRecursiveError: ignoreRecursiveErr, + IsFromOtherCluster: isFromOtherCluster, + Signatures: signatures, + } + if resp, err := client.DeleteEntry(context.Background(), deleteEntryRequest); err != nil { + if strings.Contains(err.Error(), ErrNotFound.Error()) { + return nil + } + return err + } else { + if resp.Error != "" { + if strings.Contains(resp.Error, ErrNotFound.Error()) { + return nil + } + return errors.New(resp.Error) + } + } + + return nil + + }) +} diff --git a/weed/pb/filer_pb/filer_client_bfs.go b/weed/pb/filer_pb/filer_client_bfs.go new file mode 100644 index 000000000..4e5b65f12 --- /dev/null +++ b/weed/pb/filer_pb/filer_client_bfs.go @@ -0,0 +1,63 @@ +package filer_pb + +import ( + "fmt" + "sync" + "time" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func TraverseBfs(filerClient FilerClient, parentPath util.FullPath, fn func(parentPath util.FullPath, entry *Entry)) (err error) { + + K := 5 + + var jobQueueWg sync.WaitGroup + queue := util.NewQueue() + jobQueueWg.Add(1) + queue.Enqueue(parentPath) + var isTerminating bool + + for i := 0; i < K; i++ { + go func() { + for { + if isTerminating { + break + } + t := queue.Dequeue() + if t == nil { + time.Sleep(329 * time.Millisecond) + continue + } + dir := t.(util.FullPath) + processErr := processOneDirectory(filerClient, dir, queue, &jobQueueWg, fn) + if processErr != nil { + err = processErr + } + jobQueueWg.Done() + } + }() + } + jobQueueWg.Wait() + isTerminating = true + return +} + +func processOneDirectory(filerClient FilerClient, parentPath util.FullPath, queue *util.Queue, jobQueueWg *sync.WaitGroup, fn func(parentPath util.FullPath, entry *Entry)) (err error) { + + return ReadDirAllEntries(filerClient, parentPath, "", func(entry *Entry, isLast bool) error { + + fn(parentPath, entry) + + if entry.IsDirectory { + subDir := fmt.Sprintf("%s/%s", parentPath, entry.Name) + if parentPath == "/" { + subDir = "/" + entry.Name + } + jobQueueWg.Add(1) + queue.Enqueue(util.FullPath(subDir)) + } + return nil + }) + +} diff --git a/weed/pb/filer_pb/filer_pb_helper.go b/weed/pb/filer_pb/filer_pb_helper.go index 5c40332e6..b46385c8f 100644 --- a/weed/pb/filer_pb/filer_pb_helper.go +++ b/weed/pb/filer_pb/filer_pb_helper.go @@ -1,10 +1,18 @@ package filer_pb import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/golang/protobuf/proto" + "github.com/viant/ptrie" ) -func toFileIdObject(fileIdStr string) (*FileId, error) { +func ToFileIdObject(fileIdStr string) (*FileId, error) { t, err := needle.ParseFileIdFromString(fileIdStr) if err != nil { return nil, err @@ -37,14 +45,14 @@ func BeforeEntrySerialization(chunks []*FileChunk) { for _, chunk := range chunks { if chunk.FileId != "" { - if fid, err := toFileIdObject(chunk.FileId); err == nil { + if fid, err := ToFileIdObject(chunk.FileId); err == nil { chunk.Fid = fid chunk.FileId = "" } } if chunk.SourceFileId != "" { - if fid, err := toFileIdObject(chunk.SourceFileId); err == nil { + if fid, err := ToFileIdObject(chunk.SourceFileId); err == nil { chunk.SourceFid = fid chunk.SourceFileId = "" } @@ -53,6 +61,15 @@ func BeforeEntrySerialization(chunks []*FileChunk) { } } +func EnsureFid(chunk *FileChunk) { + if chunk.Fid != nil { + return + } + if fid, err := ToFileIdObject(chunk.FileId); err == nil { + chunk.Fid = fid + } +} + func AfterEntryDeserialization(chunks []*FileChunk) { for _, chunk := range chunks { @@ -67,3 +84,66 @@ func AfterEntryDeserialization(chunks []*FileChunk) { } } + +func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error { + resp, err := client.CreateEntry(context.Background(), request) + if err != nil { + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err) + return fmt.Errorf("CreateEntry: %v", err) + } + if resp.Error != "" { + glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error) + return fmt.Errorf("CreateEntry : %v", resp.Error) + } + return nil +} + +func UpdateEntry(client SeaweedFilerClient, request *UpdateEntryRequest) error { + _, err := client.UpdateEntry(context.Background(), request) + if err != nil { + glog.V(1).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err) + return fmt.Errorf("UpdateEntry: %v", err) + } + return nil +} + +func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest) (*LookupDirectoryEntryResponse, error) { + resp, err := client.LookupDirectoryEntry(context.Background(), request) + if err != nil { + if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) { + return nil, ErrNotFound + } + glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err) + return nil, fmt.Errorf("LookupEntry1: %v", err) + } + if resp.Entry == nil { + return nil, ErrNotFound + } + return resp, nil +} + +var ErrNotFound = errors.New("filer: no entry is found in filer store") + +func IsCreate(event *SubscribeMetadataResponse) bool { + return event.EventNotification.NewEntry != nil && event.EventNotification.OldEntry == nil +} +func IsUpdate(event *SubscribeMetadataResponse) bool { + return event.EventNotification.NewEntry != nil && + event.EventNotification.OldEntry != nil && + event.Directory == event.EventNotification.NewParentPath +} +func IsDelete(event *SubscribeMetadataResponse) bool { + return event.EventNotification.NewEntry == nil && event.EventNotification.OldEntry != nil +} +func IsRename(event *SubscribeMetadataResponse) bool { + return event.EventNotification.NewEntry != nil && + event.EventNotification.OldEntry != nil && + event.Directory != event.EventNotification.NewParentPath +} + +var _ = ptrie.KeyProvider(&FilerConf_PathConf{}) + +func (fp *FilerConf_PathConf) Key() interface{} { + key, _ := proto.Marshal(fp) + return string(key) +} diff --git a/weed/pb/filer_pb/filer_pb_helper_test.go b/weed/pb/filer_pb/filer_pb_helper_test.go index d4468c011..0009afdbe 100644 --- a/weed/pb/filer_pb/filer_pb_helper_test.go +++ b/weed/pb/filer_pb/filer_pb_helper_test.go @@ -9,7 +9,7 @@ import ( func TestFileIdSize(t *testing.T) { fileIdStr := "11745,0293434534cbb9892b" - fid, _ := toFileIdObject(fileIdStr) + fid, _ := ToFileIdObject(fileIdStr) bytes, _ := proto.Marshal(fid) println(len(fileIdStr)) diff --git a/weed/pb/filer_pb/signature.go b/weed/pb/filer_pb/signature.go new file mode 100644 index 000000000..e13afc656 --- /dev/null +++ b/weed/pb/filer_pb/signature.go @@ -0,0 +1,13 @@ +package filer_pb + +func (r *CreateEntryRequest) AddSignature(sig int32) { + r.Signatures = append(r.Signatures, sig) +} +func (r *CreateEntryRequest) HasSigned(sig int32) bool { + for _, s := range r.Signatures { + if s == sig { + return true + } + } + return false +} diff --git a/weed/pb/grpc_client_server.go b/weed/pb/grpc_client_server.go new file mode 100644 index 000000000..9efcd9bdc --- /dev/null +++ b/weed/pb/grpc_client_server.go @@ -0,0 +1,204 @@ +package pb + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb" +) + +const ( + Max_Message_Size = 1 << 30 // 1 GB +) + +var ( + // cache grpc connections + grpcClients = make(map[string]*grpc.ClientConn) + grpcClientsLock sync.Mutex +) + +func init() { + http.DefaultTransport.(*http.Transport).MaxIdleConnsPerHost = 1024 + http.DefaultTransport.(*http.Transport).MaxIdleConns = 1024 +} + +func NewGrpcServer(opts ...grpc.ServerOption) *grpc.Server { + var options []grpc.ServerOption + options = append(options, + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 10 * time.Second, // wait time before ping if no activity + Timeout: 20 * time.Second, // ping timeout + MaxConnectionAge: 10 * time.Hour, + }), + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: 60 * time.Second, // min time a client should wait before sending a ping + PermitWithoutStream: false, + }), + grpc.MaxRecvMsgSize(Max_Message_Size), + grpc.MaxSendMsgSize(Max_Message_Size), + ) + for _, opt := range opts { + if opt != nil { + options = append(options, opt) + } + } + return grpc.NewServer(options...) +} + +func GrpcDial(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + // opts = append(opts, grpc.WithBlock()) + // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second))) + var options []grpc.DialOption + options = append(options, + // grpc.WithInsecure(), + grpc.WithDefaultCallOptions( + grpc.MaxCallSendMsgSize(Max_Message_Size), + grpc.MaxCallRecvMsgSize(Max_Message_Size), + ), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 30 * time.Second, // client ping server if no activity for this long + Timeout: 20 * time.Second, + PermitWithoutStream: false, + })) + for _, opt := range opts { + if opt != nil { + options = append(options, opt) + } + } + return grpc.DialContext(ctx, address, options...) +} + +func getOrCreateConnection(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + + grpcClientsLock.Lock() + defer grpcClientsLock.Unlock() + + existingConnection, found := grpcClients[address] + if found { + return existingConnection, nil + } + + grpcConnection, err := GrpcDial(context.Background(), address, opts...) + if err != nil { + return nil, fmt.Errorf("fail to dial %s: %v", address, err) + } + + grpcClients[address] = grpcConnection + + return grpcConnection, nil +} + +func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { + + grpcConnection, err := getOrCreateConnection(address, opts...) + if err != nil { + return fmt.Errorf("getOrCreateConnection %s: %v", address, err) + } + return fn(grpcConnection) +} + +func ParseServerToGrpcAddress(server string) (serverGrpcAddress string, err error) { + return ParseServerAddress(server, 10000) +} + +func ParseServerAddress(server string, deltaPort int) (newServerAddress string, err error) { + + host, port, parseErr := hostAndPort(server) + if parseErr != nil { + return "", fmt.Errorf("server port parse error: %v", parseErr) + } + + newPort := int(port) + deltaPort + + return fmt.Sprintf("%s:%d", host, newPort), nil +} + +func hostAndPort(address string) (host string, port uint64, err error) { + colonIndex := strings.LastIndex(address, ":") + if colonIndex < 0 { + return "", 0, fmt.Errorf("server should have hostname:port format: %v", address) + } + port, err = strconv.ParseUint(address[colonIndex+1:], 10, 64) + if err != nil { + return "", 0, fmt.Errorf("server port parse error: %v", err) + } + + return address[:colonIndex], port, err +} + +func ServerToGrpcAddress(server string) (serverGrpcAddress string) { + + host, port, parseErr := hostAndPort(server) + if parseErr != nil { + glog.Fatalf("server address %s parse error: %v", server, parseErr) + } + + grpcPort := int(port) + 10000 + + return fmt.Sprintf("%s:%d", host, grpcPort) +} + +func GrpcAddressToServerAddress(grpcAddress string) (serverAddress string) { + host, grpcPort, parseErr := hostAndPort(grpcAddress) + if parseErr != nil { + glog.Fatalf("server grpc address %s parse error: %v", grpcAddress, parseErr) + } + + port := int(grpcPort) - 10000 + + return fmt.Sprintf("%s:%d", host, port) +} + +func WithMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error { + + masterGrpcAddress, parseErr := ParseServerToGrpcAddress(master) + if parseErr != nil { + return fmt.Errorf("failed to parse master grpc %v: %v", master, parseErr) + } + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := master_pb.NewSeaweedClient(grpcConnection) + return fn(client) + }, masterGrpcAddress, grpcDialOption) + +} + +func WithBrokerGrpcClient(brokerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client messaging_pb.SeaweedMessagingClient) error) error { + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := messaging_pb.NewSeaweedMessagingClient(grpcConnection) + return fn(client) + }, brokerGrpcAddress, grpcDialOption) + +} + +func WithFilerClient(filer string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { + + filerGrpcAddress, parseErr := ParseServerToGrpcAddress(filer) + if parseErr != nil { + return fmt.Errorf("failed to parse filer grpc %v: %v", filer, parseErr) + } + + return WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, fn) + +} + +func WithGrpcFilerClient(filerGrpcAddress string, grpcDialOption grpc.DialOption, fn func(client filer_pb.SeaweedFilerClient) error) error { + + return WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { + client := filer_pb.NewSeaweedFilerClient(grpcConnection) + return fn(client) + }, filerGrpcAddress, grpcDialOption) + +} diff --git a/weed/pb/iam.proto b/weed/pb/iam.proto new file mode 100644 index 000000000..558bd2b70 --- /dev/null +++ b/weed/pb/iam.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package iam_pb; + +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/iam_pb"; +option java_package = "seaweedfs.client"; +option java_outer_classname = "IamProto"; + +////////////////////////////////////////////////// + +service SeaweedIdentityAccessManagement { + +} + +////////////////////////////////////////////////// + +message S3ApiConfiguration { + repeated Identity identities = 1; +} + +message Identity { + string name = 1; + repeated Credential credentials = 2; + repeated string actions = 3; +} + +message Credential { + string access_key = 1; + string secret_key = 2; + // uint64 expiration = 3; + // bool is_disabled = 4; +} + +/* +message Policy { + repeated Statement statements = 1; +} + +message Statement { + repeated Action action = 1; + repeated Resource resource = 2; +} + +message Action { + string action = 1; +} +message Resource { + string bucket = 1; + // string path = 2; +} +*/ \ No newline at end of file diff --git a/weed/pb/iam_pb/iam.pb.go b/weed/pb/iam_pb/iam.pb.go new file mode 100644 index 000000000..7d0b6281b --- /dev/null +++ b/weed/pb/iam_pb/iam.pb.go @@ -0,0 +1,356 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 +// source: iam.proto + +package iam_pb + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type S3ApiConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"` +} + +func (x *S3ApiConfiguration) Reset() { + *x = S3ApiConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S3ApiConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S3ApiConfiguration) ProtoMessage() {} + +func (x *S3ApiConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_iam_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S3ApiConfiguration.ProtoReflect.Descriptor instead. +func (*S3ApiConfiguration) Descriptor() ([]byte, []int) { + return file_iam_proto_rawDescGZIP(), []int{0} +} + +func (x *S3ApiConfiguration) GetIdentities() []*Identity { + if x != nil { + return x.Identities + } + return nil +} + +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"` + Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_iam_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_iam_proto_rawDescGZIP(), []int{1} +} + +func (x *Identity) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Identity) GetCredentials() []*Credential { + if x != nil { + return x.Credentials + } + return nil +} + +func (x *Identity) GetActions() []string { + if x != nil { + return x.Actions + } + return nil +} + +type Credential struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"` + SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"` +} + +func (x *Credential) Reset() { + *x = Credential{} + if protoimpl.UnsafeEnabled { + mi := &file_iam_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Credential) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Credential) ProtoMessage() {} + +func (x *Credential) ProtoReflect() protoreflect.Message { + mi := &file_iam_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Credential.ProtoReflect.Descriptor instead. +func (*Credential) Descriptor() ([]byte, []int) { + return file_iam_proto_rawDescGZIP(), []int{2} +} + +func (x *Credential) GetAccessKey() string { + if x != nil { + return x.AccessKey + } + return "" +} + +func (x *Credential) GetSecretKey() string { + if x != nil { + return x.SecretKey + } + return "" +} + +var File_iam_proto protoreflect.FileDescriptor + +var file_iam_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x69, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x69, 0x61, 0x6d, + 0x5f, 0x70, 0x62, 0x22, 0x46, 0x0a, 0x12, 0x53, 0x33, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x08, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4a, 0x0a, 0x0a, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x32, 0x21, 0x0a, 0x1f, 0x53, 0x65, 0x61, 0x77, 0x65, + 0x65, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x4b, 0x0a, 0x10, 0x73, 0x65, + 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x08, + 0x49, 0x61, 0x6d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, + 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, + 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_iam_proto_rawDescOnce sync.Once + file_iam_proto_rawDescData = file_iam_proto_rawDesc +) + +func file_iam_proto_rawDescGZIP() []byte { + file_iam_proto_rawDescOnce.Do(func() { + file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(file_iam_proto_rawDescData) + }) + return file_iam_proto_rawDescData +} + +var file_iam_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_iam_proto_goTypes = []interface{}{ + (*S3ApiConfiguration)(nil), // 0: iam_pb.S3ApiConfiguration + (*Identity)(nil), // 1: iam_pb.Identity + (*Credential)(nil), // 2: iam_pb.Credential +} +var file_iam_proto_depIdxs = []int32{ + 1, // 0: iam_pb.S3ApiConfiguration.identities:type_name -> iam_pb.Identity + 2, // 1: iam_pb.Identity.credentials:type_name -> iam_pb.Credential + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_iam_proto_init() } +func file_iam_proto_init() { + if File_iam_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_iam_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S3ApiConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_iam_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_iam_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Credential); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_iam_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_iam_proto_goTypes, + DependencyIndexes: file_iam_proto_depIdxs, + MessageInfos: file_iam_proto_msgTypes, + }.Build() + File_iam_proto = out.File + file_iam_proto_rawDesc = nil + file_iam_proto_goTypes = nil + file_iam_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// SeaweedIdentityAccessManagementClient is the client API for SeaweedIdentityAccessManagement service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SeaweedIdentityAccessManagementClient interface { +} + +type seaweedIdentityAccessManagementClient struct { + cc grpc.ClientConnInterface +} + +func NewSeaweedIdentityAccessManagementClient(cc grpc.ClientConnInterface) SeaweedIdentityAccessManagementClient { + return &seaweedIdentityAccessManagementClient{cc} +} + +// SeaweedIdentityAccessManagementServer is the server API for SeaweedIdentityAccessManagement service. +type SeaweedIdentityAccessManagementServer interface { +} + +// UnimplementedSeaweedIdentityAccessManagementServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedIdentityAccessManagementServer struct { +} + +func RegisterSeaweedIdentityAccessManagementServer(s *grpc.Server, srv SeaweedIdentityAccessManagementServer) { + s.RegisterService(&_SeaweedIdentityAccessManagement_serviceDesc, srv) +} + +var _SeaweedIdentityAccessManagement_serviceDesc = grpc.ServiceDesc{ + ServiceName: "iam_pb.SeaweedIdentityAccessManagement", + HandlerType: (*SeaweedIdentityAccessManagementServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: "iam.proto", +} diff --git a/weed/pb/master.proto b/weed/pb/master.proto index 9b1e884c7..cdb49d1e3 100644 --- a/weed/pb/master.proto +++ b/weed/pb/master.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package master_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/master_pb"; + ////////////////////////////////////////////////// service Seaweed { @@ -23,8 +25,17 @@ service Seaweed { } rpc LookupEcVolume (LookupEcVolumeRequest) returns (LookupEcVolumeResponse) { } + rpc VacuumVolume (VacuumVolumeRequest) returns (VacuumVolumeResponse) { + } rpc GetMasterConfiguration (GetMasterConfigurationRequest) returns (GetMasterConfigurationResponse) { } + rpc ListMasterClients (ListMasterClientsRequest) returns (ListMasterClientsResponse) { + } + rpc LeaseAdminToken (LeaseAdminTokenRequest) returns (LeaseAdminTokenResponse) { + } + rpc ReleaseAdminToken (ReleaseAdminTokenRequest) returns (ReleaseAdminTokenResponse) { + } + } ////////////////////////////////////////////////// @@ -33,7 +44,6 @@ message Heartbeat { string ip = 1; uint32 port = 2; string public_url = 3; - uint32 max_volume_count = 4; uint64 max_file_key = 5; string data_center = 6; string rack = 7; @@ -51,6 +61,8 @@ message Heartbeat { repeated VolumeEcShardInformationMessage deleted_ec_shards = 18; bool has_no_ec_shards = 19; + map max_volume_counts = 4; + } message HeartbeatResponse { @@ -76,6 +88,7 @@ message VolumeInformationMessage { int64 modified_at_second = 12; string remote_storage_name = 13; string remote_storage_key = 14; + string disk_type = 15; } message VolumeShortInformationMessage { @@ -84,12 +97,14 @@ message VolumeShortInformationMessage { uint32 replica_placement = 8; uint32 version = 9; uint32 ttl = 10; + string disk_type = 15; } message VolumeEcShardInformationMessage { uint32 id = 1; string collection = 2; uint32 ec_index_bits = 3; + string disk_type = 4; } message StorageBackend { @@ -112,6 +127,7 @@ message SuperBlockExtra { message KeepConnectedRequest { string name = 1; + uint32 grpc_port = 2; } message VolumeLocation { @@ -120,6 +136,7 @@ message VolumeLocation { repeated uint32 new_vids = 3; repeated uint32 deleted_vids = 4; string leader = 5; // optional when leader is not itself + string data_center = 6; // optional when DataCenter is in use } message LookupVolumeRequest { @@ -150,6 +167,7 @@ message AssignRequest { string data_node = 7; uint32 memory_map_max_size_mb = 8; uint32 Writable_volume_count = 9; + string disk_type = 10; } message AssignResponse { string fid = 1; @@ -164,11 +182,9 @@ message StatisticsRequest { string replication = 1; string collection = 2; string ttl = 3; + string disk_type = 4; } message StatisticsResponse { - string replication = 1; - string collection = 2; - string ttl = 3; uint64 total_size = 4; uint64 used_size = 5; uint64 file_count = 6; @@ -177,11 +193,6 @@ message StatisticsResponse { // // collection related // - -message StorageType { - string replication = 1; - string ttl = 2; -} message Collection { string name = 1; } @@ -202,8 +213,8 @@ message CollectionDeleteResponse { // // volume related // -message DataNodeInfo { - string id = 1; +message DiskInfo { + string type = 1; uint64 volume_count = 2; uint64 max_volume_count = 3; uint64 free_volume_count = 4; @@ -212,32 +223,24 @@ message DataNodeInfo { repeated VolumeEcShardInformationMessage ec_shard_infos = 7; uint64 remote_volume_count = 8; } +message DataNodeInfo { + string id = 1; + map diskInfos = 2; +} message RackInfo { string id = 1; - uint64 volume_count = 2; - uint64 max_volume_count = 3; - uint64 free_volume_count = 4; - uint64 active_volume_count = 5; - repeated DataNodeInfo data_node_infos = 6; - uint64 remote_volume_count = 7; + repeated DataNodeInfo data_node_infos = 2; + map diskInfos = 3; } message DataCenterInfo { string id = 1; - uint64 volume_count = 2; - uint64 max_volume_count = 3; - uint64 free_volume_count = 4; - uint64 active_volume_count = 5; - repeated RackInfo rack_infos = 6; - uint64 remote_volume_count = 7; + repeated RackInfo rack_infos = 2; + map diskInfos = 3; } message TopologyInfo { string id = 1; - uint64 volume_count = 2; - uint64 max_volume_count = 3; - uint64 free_volume_count = 4; - uint64 active_volume_count = 5; - repeated DataCenterInfo data_center_infos = 6; - uint64 remote_volume_count = 7; + repeated DataCenterInfo data_center_infos = 2; + map diskInfos = 3; } message VolumeListRequest { } @@ -258,9 +261,44 @@ message LookupEcVolumeResponse { repeated EcShardIdLocation shard_id_locations = 2; } +message VacuumVolumeRequest { + float garbage_threshold = 1; +} +message VacuumVolumeResponse { +} + message GetMasterConfigurationRequest { } message GetMasterConfigurationResponse { string metrics_address = 1; uint32 metrics_interval_seconds = 2; + repeated StorageBackend storage_backends = 3; + string default_replication = 4; + string leader = 5; +} + +message ListMasterClientsRequest { + string client_type = 1; +} +message ListMasterClientsResponse { + repeated string grpc_addresses = 1; +} + +message LeaseAdminTokenRequest { + int64 previous_token = 1; + int64 previous_lock_time = 2; + string lock_name = 3; + string client_name = 4; +} +message LeaseAdminTokenResponse { + int64 token = 1; + int64 lock_ts_ns = 2; +} + +message ReleaseAdminTokenRequest { + int64 previous_token = 1; + int64 previous_lock_time = 2; + string lock_name = 3; +} +message ReleaseAdminTokenResponse { } diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go index ea4362c92..29d8499f8 100644 --- a/weed/pb/master_pb/master.pb.go +++ b/weed/pb/master_pb/master.pb.go @@ -1,1485 +1,4088 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 // source: master.proto -// DO NOT EDIT! - -/* -Package master_pb is a generated protocol buffer package. - -It is generated from these files: - master.proto - -It has these top-level messages: - Heartbeat - HeartbeatResponse - VolumeInformationMessage - VolumeShortInformationMessage - VolumeEcShardInformationMessage - StorageBackend - Empty - SuperBlockExtra - KeepConnectedRequest - VolumeLocation - LookupVolumeRequest - LookupVolumeResponse - Location - AssignRequest - AssignResponse - StatisticsRequest - StatisticsResponse - StorageType - Collection - CollectionListRequest - CollectionListResponse - CollectionDeleteRequest - CollectionDeleteResponse - DataNodeInfo - RackInfo - DataCenterInfo - TopologyInfo - VolumeListRequest - VolumeListResponse - LookupEcVolumeRequest - LookupEcVolumeResponse - GetMasterConfigurationRequest - GetMasterConfigurationResponse -*/ -package master_pb -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package master_pb import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type Heartbeat struct { - Ip string `protobuf:"bytes,1,opt,name=ip" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - MaxVolumeCount uint32 `protobuf:"varint,4,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey" json:"max_file_key,omitempty"` - DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,7,opt,name=rack" json:"rack,omitempty"` - AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort" json:"admin_port,omitempty"` - Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes" json:"volumes,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + MaxFileKey uint64 `protobuf:"varint,5,opt,name=max_file_key,json=maxFileKey,proto3" json:"max_file_key,omitempty"` + DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,7,opt,name=rack,proto3" json:"rack,omitempty"` + AdminPort uint32 `protobuf:"varint,8,opt,name=admin_port,json=adminPort,proto3" json:"admin_port,omitempty"` + Volumes []*VolumeInformationMessage `protobuf:"bytes,9,rep,name=volumes,proto3" json:"volumes,omitempty"` // delta volumes - NewVolumes []*VolumeShortInformationMessage `protobuf:"bytes,10,rep,name=new_volumes,json=newVolumes" json:"new_volumes,omitempty"` - DeletedVolumes []*VolumeShortInformationMessage `protobuf:"bytes,11,rep,name=deleted_volumes,json=deletedVolumes" json:"deleted_volumes,omitempty"` - HasNoVolumes bool `protobuf:"varint,12,opt,name=has_no_volumes,json=hasNoVolumes" json:"has_no_volumes,omitempty"` + NewVolumes []*VolumeShortInformationMessage `protobuf:"bytes,10,rep,name=new_volumes,json=newVolumes,proto3" json:"new_volumes,omitempty"` + DeletedVolumes []*VolumeShortInformationMessage `protobuf:"bytes,11,rep,name=deleted_volumes,json=deletedVolumes,proto3" json:"deleted_volumes,omitempty"` + HasNoVolumes bool `protobuf:"varint,12,opt,name=has_no_volumes,json=hasNoVolumes,proto3" json:"has_no_volumes,omitempty"` // erasure coding - EcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,16,rep,name=ec_shards,json=ecShards" json:"ec_shards,omitempty"` + EcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,16,rep,name=ec_shards,json=ecShards,proto3" json:"ec_shards,omitempty"` // delta erasure coding shards - NewEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,17,rep,name=new_ec_shards,json=newEcShards" json:"new_ec_shards,omitempty"` - DeletedEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,18,rep,name=deleted_ec_shards,json=deletedEcShards" json:"deleted_ec_shards,omitempty"` - HasNoEcShards bool `protobuf:"varint,19,opt,name=has_no_ec_shards,json=hasNoEcShards" json:"has_no_ec_shards,omitempty"` + NewEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,17,rep,name=new_ec_shards,json=newEcShards,proto3" json:"new_ec_shards,omitempty"` + DeletedEcShards []*VolumeEcShardInformationMessage `protobuf:"bytes,18,rep,name=deleted_ec_shards,json=deletedEcShards,proto3" json:"deleted_ec_shards,omitempty"` + HasNoEcShards bool `protobuf:"varint,19,opt,name=has_no_ec_shards,json=hasNoEcShards,proto3" json:"has_no_ec_shards,omitempty"` + MaxVolumeCounts map[string]uint32 `protobuf:"bytes,4,rep,name=max_volume_counts,json=maxVolumeCounts,proto3" json:"max_volume_counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (m *Heartbeat) Reset() { *m = Heartbeat{} } -func (m *Heartbeat) String() string { return proto.CompactTextString(m) } -func (*Heartbeat) ProtoMessage() {} -func (*Heartbeat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Heartbeat) GetIp() string { - if m != nil { - return m.Ip +func (x *Heartbeat) Reset() { + *x = Heartbeat{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (m *Heartbeat) GetPort() uint32 { - if m != nil { - return m.Port +func (x *Heartbeat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Heartbeat) ProtoMessage() {} + +func (x *Heartbeat) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) +} + +// Deprecated: Use Heartbeat.ProtoReflect.Descriptor instead. +func (*Heartbeat) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{0} } -func (m *Heartbeat) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *Heartbeat) GetIp() string { + if x != nil { + return x.Ip } return "" } -func (m *Heartbeat) GetMaxVolumeCount() uint32 { - if m != nil { - return m.MaxVolumeCount +func (x *Heartbeat) GetPort() uint32 { + if x != nil { + return x.Port } return 0 } -func (m *Heartbeat) GetMaxFileKey() uint64 { - if m != nil { - return m.MaxFileKey +func (x *Heartbeat) GetPublicUrl() string { + if x != nil { + return x.PublicUrl + } + return "" +} + +func (x *Heartbeat) GetMaxFileKey() uint64 { + if x != nil { + return x.MaxFileKey } return 0 } -func (m *Heartbeat) GetDataCenter() string { - if m != nil { - return m.DataCenter +func (x *Heartbeat) GetDataCenter() string { + if x != nil { + return x.DataCenter } return "" } -func (m *Heartbeat) GetRack() string { - if m != nil { - return m.Rack +func (x *Heartbeat) GetRack() string { + if x != nil { + return x.Rack } return "" } -func (m *Heartbeat) GetAdminPort() uint32 { - if m != nil { - return m.AdminPort +func (x *Heartbeat) GetAdminPort() uint32 { + if x != nil { + return x.AdminPort } return 0 } -func (m *Heartbeat) GetVolumes() []*VolumeInformationMessage { - if m != nil { - return m.Volumes +func (x *Heartbeat) GetVolumes() []*VolumeInformationMessage { + if x != nil { + return x.Volumes } return nil } -func (m *Heartbeat) GetNewVolumes() []*VolumeShortInformationMessage { - if m != nil { - return m.NewVolumes +func (x *Heartbeat) GetNewVolumes() []*VolumeShortInformationMessage { + if x != nil { + return x.NewVolumes } return nil } -func (m *Heartbeat) GetDeletedVolumes() []*VolumeShortInformationMessage { - if m != nil { - return m.DeletedVolumes +func (x *Heartbeat) GetDeletedVolumes() []*VolumeShortInformationMessage { + if x != nil { + return x.DeletedVolumes } return nil } -func (m *Heartbeat) GetHasNoVolumes() bool { - if m != nil { - return m.HasNoVolumes +func (x *Heartbeat) GetHasNoVolumes() bool { + if x != nil { + return x.HasNoVolumes } return false } -func (m *Heartbeat) GetEcShards() []*VolumeEcShardInformationMessage { - if m != nil { - return m.EcShards +func (x *Heartbeat) GetEcShards() []*VolumeEcShardInformationMessage { + if x != nil { + return x.EcShards } return nil } -func (m *Heartbeat) GetNewEcShards() []*VolumeEcShardInformationMessage { - if m != nil { - return m.NewEcShards +func (x *Heartbeat) GetNewEcShards() []*VolumeEcShardInformationMessage { + if x != nil { + return x.NewEcShards } return nil } -func (m *Heartbeat) GetDeletedEcShards() []*VolumeEcShardInformationMessage { - if m != nil { - return m.DeletedEcShards +func (x *Heartbeat) GetDeletedEcShards() []*VolumeEcShardInformationMessage { + if x != nil { + return x.DeletedEcShards } return nil } -func (m *Heartbeat) GetHasNoEcShards() bool { - if m != nil { - return m.HasNoEcShards +func (x *Heartbeat) GetHasNoEcShards() bool { + if x != nil { + return x.HasNoEcShards } return false } +func (x *Heartbeat) GetMaxVolumeCounts() map[string]uint32 { + if x != nil { + return x.MaxVolumeCounts + } + return nil +} + type HeartbeatResponse struct { - VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit" json:"volume_size_limit,omitempty"` - Leader string `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` - MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` - MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` - StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends" json:"storage_backends,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeSizeLimit uint64 `protobuf:"varint,1,opt,name=volume_size_limit,json=volumeSizeLimit,proto3" json:"volume_size_limit,omitempty"` + Leader string `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` + MetricsAddress string `protobuf:"bytes,3,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,4,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` + StorageBackends []*StorageBackend `protobuf:"bytes,5,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` +} + +func (x *HeartbeatResponse) Reset() { + *x = HeartbeatResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeartbeatResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeartbeatResponse) ProtoMessage() {} + +func (x *HeartbeatResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } -func (m *HeartbeatResponse) String() string { return proto.CompactTextString(m) } -func (*HeartbeatResponse) ProtoMessage() {} -func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +// Deprecated: Use HeartbeatResponse.ProtoReflect.Descriptor instead. +func (*HeartbeatResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{1} +} -func (m *HeartbeatResponse) GetVolumeSizeLimit() uint64 { - if m != nil { - return m.VolumeSizeLimit +func (x *HeartbeatResponse) GetVolumeSizeLimit() uint64 { + if x != nil { + return x.VolumeSizeLimit } return 0 } -func (m *HeartbeatResponse) GetLeader() string { - if m != nil { - return m.Leader +func (x *HeartbeatResponse) GetLeader() string { + if x != nil { + return x.Leader } return "" } -func (m *HeartbeatResponse) GetMetricsAddress() string { - if m != nil { - return m.MetricsAddress +func (x *HeartbeatResponse) GetMetricsAddress() string { + if x != nil { + return x.MetricsAddress } return "" } -func (m *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 { - if m != nil { - return m.MetricsIntervalSeconds +func (x *HeartbeatResponse) GetMetricsIntervalSeconds() uint32 { + if x != nil { + return x.MetricsIntervalSeconds } return 0 } -func (m *HeartbeatResponse) GetStorageBackends() []*StorageBackend { - if m != nil { - return m.StorageBackends +func (x *HeartbeatResponse) GetStorageBackends() []*StorageBackend { + if x != nil { + return x.StorageBackends } return nil } type VolumeInformationMessage struct { - Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` - DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount" json:"delete_count,omitempty"` - DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount" json:"deleted_byte_count,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` - Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` - Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` - CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` - ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond" json:"modified_at_second,omitempty"` - RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName" json:"remote_storage_name,omitempty"` - RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey" json:"remote_storage_key,omitempty"` -} - -func (m *VolumeInformationMessage) Reset() { *m = VolumeInformationMessage{} } -func (m *VolumeInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeInformationMessage) ProtoMessage() {} -func (*VolumeInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *VolumeInformationMessage) GetId() uint32 { - if m != nil { - return m.Id + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Size uint64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + FileCount uint64 `protobuf:"varint,4,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` + DeleteCount uint64 `protobuf:"varint,5,opt,name=delete_count,json=deleteCount,proto3" json:"delete_count,omitempty"` + DeletedByteCount uint64 `protobuf:"varint,6,opt,name=deleted_byte_count,json=deletedByteCount,proto3" json:"deleted_byte_count,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` + CompactRevision uint32 `protobuf:"varint,11,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + ModifiedAtSecond int64 `protobuf:"varint,12,opt,name=modified_at_second,json=modifiedAtSecond,proto3" json:"modified_at_second,omitempty"` + RemoteStorageName string `protobuf:"bytes,13,opt,name=remote_storage_name,json=remoteStorageName,proto3" json:"remote_storage_name,omitempty"` + RemoteStorageKey string `protobuf:"bytes,14,opt,name=remote_storage_key,json=remoteStorageKey,proto3" json:"remote_storage_key,omitempty"` + DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *VolumeInformationMessage) Reset() { + *x = VolumeInformationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeInformationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeInformationMessage) ProtoMessage() {} + +func (x *VolumeInformationMessage) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeInformationMessage.ProtoReflect.Descriptor instead. +func (*VolumeInformationMessage) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{2} +} + +func (x *VolumeInformationMessage) GetId() uint32 { + if x != nil { + return x.Id } return 0 } -func (m *VolumeInformationMessage) GetSize() uint64 { - if m != nil { - return m.Size +func (x *VolumeInformationMessage) GetSize() uint64 { + if x != nil { + return x.Size } return 0 } -func (m *VolumeInformationMessage) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeInformationMessage) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeInformationMessage) GetFileCount() uint64 { - if m != nil { - return m.FileCount +func (x *VolumeInformationMessage) GetFileCount() uint64 { + if x != nil { + return x.FileCount } return 0 } -func (m *VolumeInformationMessage) GetDeleteCount() uint64 { - if m != nil { - return m.DeleteCount +func (x *VolumeInformationMessage) GetDeleteCount() uint64 { + if x != nil { + return x.DeleteCount } return 0 } -func (m *VolumeInformationMessage) GetDeletedByteCount() uint64 { - if m != nil { - return m.DeletedByteCount +func (x *VolumeInformationMessage) GetDeletedByteCount() uint64 { + if x != nil { + return x.DeletedByteCount } return 0 } -func (m *VolumeInformationMessage) GetReadOnly() bool { - if m != nil { - return m.ReadOnly +func (x *VolumeInformationMessage) GetReadOnly() bool { + if x != nil { + return x.ReadOnly } return false } -func (m *VolumeInformationMessage) GetReplicaPlacement() uint32 { - if m != nil { - return m.ReplicaPlacement +func (x *VolumeInformationMessage) GetReplicaPlacement() uint32 { + if x != nil { + return x.ReplicaPlacement } return 0 } -func (m *VolumeInformationMessage) GetVersion() uint32 { - if m != nil { - return m.Version +func (x *VolumeInformationMessage) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } -func (m *VolumeInformationMessage) GetTtl() uint32 { - if m != nil { - return m.Ttl +func (x *VolumeInformationMessage) GetTtl() uint32 { + if x != nil { + return x.Ttl } return 0 } -func (m *VolumeInformationMessage) GetCompactRevision() uint32 { - if m != nil { - return m.CompactRevision +func (x *VolumeInformationMessage) GetCompactRevision() uint32 { + if x != nil { + return x.CompactRevision } return 0 } -func (m *VolumeInformationMessage) GetModifiedAtSecond() int64 { - if m != nil { - return m.ModifiedAtSecond +func (x *VolumeInformationMessage) GetModifiedAtSecond() int64 { + if x != nil { + return x.ModifiedAtSecond } return 0 } -func (m *VolumeInformationMessage) GetRemoteStorageName() string { - if m != nil { - return m.RemoteStorageName +func (x *VolumeInformationMessage) GetRemoteStorageName() string { + if x != nil { + return x.RemoteStorageName + } + return "" +} + +func (x *VolumeInformationMessage) GetRemoteStorageKey() string { + if x != nil { + return x.RemoteStorageKey } return "" } -func (m *VolumeInformationMessage) GetRemoteStorageKey() string { - if m != nil { - return m.RemoteStorageKey +func (x *VolumeInformationMessage) GetDiskType() string { + if x != nil { + return x.DiskType } return "" } type VolumeShortInformationMessage struct { - Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement" json:"replica_placement,omitempty"` - Version uint32 `protobuf:"varint,9,opt,name=version" json:"version,omitempty"` - Ttl uint32 `protobuf:"varint,10,opt,name=ttl" json:"ttl,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + ReplicaPlacement uint32 `protobuf:"varint,8,opt,name=replica_placement,json=replicaPlacement,proto3" json:"replica_placement,omitempty"` + Version uint32 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + Ttl uint32 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,15,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *VolumeShortInformationMessage) Reset() { + *x = VolumeShortInformationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeShortInformationMessage) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeShortInformationMessage) Reset() { *m = VolumeShortInformationMessage{} } -func (m *VolumeShortInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeShortInformationMessage) ProtoMessage() {} -func (*VolumeShortInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*VolumeShortInformationMessage) ProtoMessage() {} -func (m *VolumeShortInformationMessage) GetId() uint32 { - if m != nil { - return m.Id +func (x *VolumeShortInformationMessage) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeShortInformationMessage.ProtoReflect.Descriptor instead. +func (*VolumeShortInformationMessage) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{3} +} + +func (x *VolumeShortInformationMessage) GetId() uint32 { + if x != nil { + return x.Id } return 0 } -func (m *VolumeShortInformationMessage) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeShortInformationMessage) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeShortInformationMessage) GetReplicaPlacement() uint32 { - if m != nil { - return m.ReplicaPlacement +func (x *VolumeShortInformationMessage) GetReplicaPlacement() uint32 { + if x != nil { + return x.ReplicaPlacement } return 0 } -func (m *VolumeShortInformationMessage) GetVersion() uint32 { - if m != nil { - return m.Version +func (x *VolumeShortInformationMessage) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } -func (m *VolumeShortInformationMessage) GetTtl() uint32 { - if m != nil { - return m.Ttl +func (x *VolumeShortInformationMessage) GetTtl() uint32 { + if x != nil { + return x.Ttl } return 0 } +func (x *VolumeShortInformationMessage) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + type VolumeEcShardInformationMessage struct { - Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits" json:"ec_index_bits,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + EcIndexBits uint32 `protobuf:"varint,3,opt,name=ec_index_bits,json=ecIndexBits,proto3" json:"ec_index_bits,omitempty"` + DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *VolumeEcShardInformationMessage) Reset() { + *x = VolumeEcShardInformationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardInformationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardInformationMessage) ProtoMessage() {} + +func (x *VolumeEcShardInformationMessage) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeEcShardInformationMessage) Reset() { *m = VolumeEcShardInformationMessage{} } -func (m *VolumeEcShardInformationMessage) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardInformationMessage) ProtoMessage() {} -func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +// Deprecated: Use VolumeEcShardInformationMessage.ProtoReflect.Descriptor instead. +func (*VolumeEcShardInformationMessage) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{4} +} -func (m *VolumeEcShardInformationMessage) GetId() uint32 { - if m != nil { - return m.Id +func (x *VolumeEcShardInformationMessage) GetId() uint32 { + if x != nil { + return x.Id } return 0 } -func (m *VolumeEcShardInformationMessage) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeEcShardInformationMessage) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 { - if m != nil { - return m.EcIndexBits +func (x *VolumeEcShardInformationMessage) GetEcIndexBits() uint32 { + if x != nil { + return x.EcIndexBits } return 0 } +func (x *VolumeEcShardInformationMessage) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + type StorageBackend struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` - Properties map[string]string `protobuf:"bytes,3,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Properties map[string]string `protobuf:"bytes,3,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *StorageBackend) Reset() { + *x = StorageBackend{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageBackend) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageBackend) ProtoMessage() {} + +func (x *StorageBackend) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *StorageBackend) Reset() { *m = StorageBackend{} } -func (m *StorageBackend) String() string { return proto.CompactTextString(m) } -func (*StorageBackend) ProtoMessage() {} -func (*StorageBackend) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +// Deprecated: Use StorageBackend.ProtoReflect.Descriptor instead. +func (*StorageBackend) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{5} +} -func (m *StorageBackend) GetType() string { - if m != nil { - return m.Type +func (x *StorageBackend) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *StorageBackend) GetId() string { - if m != nil { - return m.Id +func (x *StorageBackend) GetId() string { + if x != nil { + return x.Id } return "" } -func (m *StorageBackend) GetProperties() map[string]string { - if m != nil { - return m.Properties +func (x *StorageBackend) GetProperties() map[string]string { + if x != nil { + return x.Properties } return nil } type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type SuperBlockExtra struct { - ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding" json:"erasure_coding,omitempty"` +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SuperBlockExtra) Reset() { *m = SuperBlockExtra{} } -func (m *SuperBlockExtra) String() string { return proto.CompactTextString(m) } -func (*SuperBlockExtra) ProtoMessage() {} -func (*SuperBlockExtra) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*Empty) ProtoMessage() {} -func (m *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding { - if m != nil { - return m.ErasureCoding +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -type SuperBlockExtra_ErasureCoding struct { - Data uint32 `protobuf:"varint,1,opt,name=data" json:"data,omitempty"` - Parity uint32 `protobuf:"varint,2,opt,name=parity" json:"parity,omitempty"` - VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{6} } -func (m *SuperBlockExtra_ErasureCoding) Reset() { *m = SuperBlockExtra_ErasureCoding{} } -func (m *SuperBlockExtra_ErasureCoding) String() string { return proto.CompactTextString(m) } -func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} -func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{7, 0} +type SuperBlockExtra struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ErasureCoding *SuperBlockExtra_ErasureCoding `protobuf:"bytes,1,opt,name=erasure_coding,json=erasureCoding,proto3" json:"erasure_coding,omitempty"` } -func (m *SuperBlockExtra_ErasureCoding) GetData() uint32 { - if m != nil { - return m.Data +func (x *SuperBlockExtra) Reset() { + *x = SuperBlockExtra{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *SuperBlockExtra_ErasureCoding) GetParity() uint32 { - if m != nil { - return m.Parity +func (x *SuperBlockExtra) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SuperBlockExtra) ProtoMessage() {} + +func (x *SuperBlockExtra) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 { - if m != nil { - return m.VolumeIds +// Deprecated: Use SuperBlockExtra.ProtoReflect.Descriptor instead. +func (*SuperBlockExtra) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{7} +} + +func (x *SuperBlockExtra) GetErasureCoding() *SuperBlockExtra_ErasureCoding { + if x != nil { + return x.ErasureCoding } return nil } type KeepConnectedRequest struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + GrpcPort uint32 `protobuf:"varint,2,opt,name=grpc_port,json=grpcPort,proto3" json:"grpc_port,omitempty"` +} + +func (x *KeepConnectedRequest) Reset() { + *x = KeepConnectedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeepConnectedRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *KeepConnectedRequest) Reset() { *m = KeepConnectedRequest{} } -func (m *KeepConnectedRequest) String() string { return proto.CompactTextString(m) } -func (*KeepConnectedRequest) ProtoMessage() {} -func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*KeepConnectedRequest) ProtoMessage() {} -func (m *KeepConnectedRequest) GetName() string { - if m != nil { - return m.Name +func (x *KeepConnectedRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeepConnectedRequest.ProtoReflect.Descriptor instead. +func (*KeepConnectedRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{8} +} + +func (x *KeepConnectedRequest) GetName() string { + if x != nil { + return x.Name } return "" } +func (x *KeepConnectedRequest) GetGrpcPort() uint32 { + if x != nil { + return x.GrpcPort + } + return 0 +} + type VolumeLocation struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids" json:"new_vids,omitempty"` - DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids" json:"deleted_vids,omitempty"` - Leader string `protobuf:"bytes,5,opt,name=leader" json:"leader,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + NewVids []uint32 `protobuf:"varint,3,rep,packed,name=new_vids,json=newVids,proto3" json:"new_vids,omitempty"` + DeletedVids []uint32 `protobuf:"varint,4,rep,packed,name=deleted_vids,json=deletedVids,proto3" json:"deleted_vids,omitempty"` + Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` // optional when leader is not itself + DataCenter string `protobuf:"bytes,6,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` // optional when DataCenter is in use +} + +func (x *VolumeLocation) Reset() { + *x = VolumeLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeLocation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeLocation) ProtoMessage() {} + +func (x *VolumeLocation) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeLocation) Reset() { *m = VolumeLocation{} } -func (m *VolumeLocation) String() string { return proto.CompactTextString(m) } -func (*VolumeLocation) ProtoMessage() {} -func (*VolumeLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +// Deprecated: Use VolumeLocation.ProtoReflect.Descriptor instead. +func (*VolumeLocation) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{9} +} -func (m *VolumeLocation) GetUrl() string { - if m != nil { - return m.Url +func (x *VolumeLocation) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *VolumeLocation) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *VolumeLocation) GetPublicUrl() string { + if x != nil { + return x.PublicUrl } return "" } -func (m *VolumeLocation) GetNewVids() []uint32 { - if m != nil { - return m.NewVids +func (x *VolumeLocation) GetNewVids() []uint32 { + if x != nil { + return x.NewVids } return nil } -func (m *VolumeLocation) GetDeletedVids() []uint32 { - if m != nil { - return m.DeletedVids +func (x *VolumeLocation) GetDeletedVids() []uint32 { + if x != nil { + return x.DeletedVids } return nil } -func (m *VolumeLocation) GetLeader() string { - if m != nil { - return m.Leader +func (x *VolumeLocation) GetLeader() string { + if x != nil { + return x.Leader + } + return "" +} + +func (x *VolumeLocation) GetDataCenter() string { + if x != nil { + return x.DataCenter } return "" } type LookupVolumeRequest struct { - VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds" json:"volume_ids,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeIds []string `protobuf:"bytes,1,rep,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` // optional, a bit faster if provided. +} + +func (x *LookupVolumeRequest) Reset() { + *x = LookupVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupVolumeRequest) ProtoMessage() {} + +func (x *LookupVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LookupVolumeRequest) Reset() { *m = LookupVolumeRequest{} } -func (m *LookupVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeRequest) ProtoMessage() {} -func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +// Deprecated: Use LookupVolumeRequest.ProtoReflect.Descriptor instead. +func (*LookupVolumeRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{10} +} -func (m *LookupVolumeRequest) GetVolumeIds() []string { - if m != nil { - return m.VolumeIds +func (x *LookupVolumeRequest) GetVolumeIds() []string { + if x != nil { + return x.VolumeIds } return nil } -func (m *LookupVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *LookupVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } type LookupVolumeResponse struct { - VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations" json:"volume_id_locations,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeIdLocations []*LookupVolumeResponse_VolumeIdLocation `protobuf:"bytes,1,rep,name=volume_id_locations,json=volumeIdLocations,proto3" json:"volume_id_locations,omitempty"` +} + +func (x *LookupVolumeResponse) Reset() { + *x = LookupVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupVolumeResponse) ProtoMessage() {} + +func (x *LookupVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LookupVolumeResponse) Reset() { *m = LookupVolumeResponse{} } -func (m *LookupVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeResponse) ProtoMessage() {} -func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +// Deprecated: Use LookupVolumeResponse.ProtoReflect.Descriptor instead. +func (*LookupVolumeResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{11} +} -func (m *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation { - if m != nil { - return m.VolumeIdLocations +func (x *LookupVolumeResponse) GetVolumeIdLocations() []*LookupVolumeResponse_VolumeIdLocation { + if x != nil { + return x.VolumeIdLocations } return nil } -type LookupVolumeResponse_VolumeIdLocation struct { - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Locations []*Location `protobuf:"bytes,2,rep,name=locations" json:"locations,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"` +type Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` } -func (m *LookupVolumeResponse_VolumeIdLocation) Reset() { *m = LookupVolumeResponse_VolumeIdLocation{} } -func (m *LookupVolumeResponse_VolumeIdLocation) String() string { return proto.CompactTextString(m) } -func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} -func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{11, 0} +func (x *Location) Reset() { + *x = Location{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Location) ProtoMessage() {} + +func (x *Location) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Location.ProtoReflect.Descriptor instead. +func (*Location) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{12} } -func (m *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *Location) GetUrl() string { + if x != nil { + return x.Url } return "" } -func (m *LookupVolumeResponse_VolumeIdLocation) GetLocations() []*Location { - if m != nil { - return m.Locations +func (x *Location) GetPublicUrl() string { + if x != nil { + return x.PublicUrl } - return nil + return "" +} + +type AssignRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,3,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter,proto3" json:"data_center,omitempty"` + Rack string `protobuf:"bytes,6,opt,name=rack,proto3" json:"rack,omitempty"` + DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode,proto3" json:"data_node,omitempty"` + MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` + WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=Writable_volume_count,json=WritableVolumeCount,proto3" json:"Writable_volume_count,omitempty"` + DiskType string `protobuf:"bytes,10,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *AssignRequest) Reset() { + *x = AssignRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignRequest) ProtoMessage() {} + +func (x *AssignRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignRequest.ProtoReflect.Descriptor instead. +func (*AssignRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{13} +} + +func (x *AssignRequest) GetCount() uint64 { + if x != nil { + return x.Count + } + return 0 } -func (m *LookupVolumeResponse_VolumeIdLocation) GetError() string { - if m != nil { - return m.Error +func (x *AssignRequest) GetReplication() string { + if x != nil { + return x.Replication } return "" } -type Location struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,2,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` +func (x *AssignRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" } -func (m *Location) Reset() { *m = Location{} } -func (m *Location) String() string { return proto.CompactTextString(m) } -func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (x *AssignRequest) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} -func (m *Location) GetUrl() string { - if m != nil { - return m.Url +func (x *AssignRequest) GetDataCenter() string { + if x != nil { + return x.DataCenter } return "" } -func (m *Location) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (x *AssignRequest) GetRack() string { + if x != nil { + return x.Rack } return "" } -type AssignRequest struct { - Count uint64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` - Replication string `protobuf:"bytes,2,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,3,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"` - DataCenter string `protobuf:"bytes,5,opt,name=data_center,json=dataCenter" json:"data_center,omitempty"` - Rack string `protobuf:"bytes,6,opt,name=rack" json:"rack,omitempty"` - DataNode string `protobuf:"bytes,7,opt,name=data_node,json=dataNode" json:"data_node,omitempty"` - MemoryMapMaxSizeMb uint32 `protobuf:"varint,8,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb" json:"memory_map_max_size_mb,omitempty"` - WritableVolumeCount uint32 `protobuf:"varint,9,opt,name=Writable_volume_count,json=WritableVolumeCount" json:"Writable_volume_count,omitempty"` -} - -func (m *AssignRequest) Reset() { *m = AssignRequest{} } -func (m *AssignRequest) String() string { return proto.CompactTextString(m) } -func (*AssignRequest) ProtoMessage() {} -func (*AssignRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -func (m *AssignRequest) GetCount() uint64 { - if m != nil { - return m.Count +func (x *AssignRequest) GetDataNode() string { + if x != nil { + return x.DataNode + } + return "" +} + +func (x *AssignRequest) GetMemoryMapMaxSizeMb() uint32 { + if x != nil { + return x.MemoryMapMaxSizeMb + } + return 0 +} + +func (x *AssignRequest) GetWritableVolumeCount() uint32 { + if x != nil { + return x.WritableVolumeCount } return 0 } -func (m *AssignRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *AssignRequest) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + +type AssignResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Fid string `protobuf:"bytes,1,opt,name=fid,proto3" json:"fid,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl,proto3" json:"public_url,omitempty"` + Count uint64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` + Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + Auth string `protobuf:"bytes,6,opt,name=auth,proto3" json:"auth,omitempty"` +} + +func (x *AssignResponse) Reset() { + *x = AssignResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignResponse) ProtoMessage() {} + +func (x *AssignResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignResponse.ProtoReflect.Descriptor instead. +func (*AssignResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{14} +} + +func (x *AssignResponse) GetFid() string { + if x != nil { + return x.Fid + } + return "" +} + +func (x *AssignResponse) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *AssignResponse) GetPublicUrl() string { + if x != nil { + return x.PublicUrl } return "" } -func (m *AssignRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *AssignResponse) GetCount() uint64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *AssignResponse) GetError() string { + if x != nil { + return x.Error } return "" } -func (m *AssignRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *AssignResponse) GetAuth() string { + if x != nil { + return x.Auth } return "" } -func (m *AssignRequest) GetDataCenter() string { - if m != nil { - return m.DataCenter +type StatisticsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Replication string `protobuf:"bytes,1,opt,name=replication,proto3" json:"replication,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Ttl string `protobuf:"bytes,3,opt,name=ttl,proto3" json:"ttl,omitempty"` + DiskType string `protobuf:"bytes,4,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *StatisticsRequest) Reset() { + *x = StatisticsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsRequest) ProtoMessage() {} + +func (x *StatisticsRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatisticsRequest.ProtoReflect.Descriptor instead. +func (*StatisticsRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{15} +} + +func (x *StatisticsRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *StatisticsRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *StatisticsRequest) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +func (x *StatisticsRequest) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + +type StatisticsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize,proto3" json:"used_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` +} + +func (x *StatisticsResponse) Reset() { + *x = StatisticsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatisticsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatisticsResponse) ProtoMessage() {} + +func (x *StatisticsResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatisticsResponse.ProtoReflect.Descriptor instead. +func (*StatisticsResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{16} +} + +func (x *StatisticsResponse) GetTotalSize() uint64 { + if x != nil { + return x.TotalSize + } + return 0 +} + +func (x *StatisticsResponse) GetUsedSize() uint64 { + if x != nil { + return x.UsedSize + } + return 0 +} + +func (x *StatisticsResponse) GetFileCount() uint64 { + if x != nil { + return x.FileCount + } + return 0 +} + +// +// collection related +// +type Collection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Collection) Reset() { + *x = Collection{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Collection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Collection) ProtoMessage() {} + +func (x *Collection) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Collection.ProtoReflect.Descriptor instead. +func (*Collection) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{17} +} + +func (x *Collection) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type CollectionListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes,proto3" json:"include_normal_volumes,omitempty"` + IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes,proto3" json:"include_ec_volumes,omitempty"` +} + +func (x *CollectionListRequest) Reset() { + *x = CollectionListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionListRequest) ProtoMessage() {} + +func (x *CollectionListRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionListRequest.ProtoReflect.Descriptor instead. +func (*CollectionListRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{18} +} + +func (x *CollectionListRequest) GetIncludeNormalVolumes() bool { + if x != nil { + return x.IncludeNormalVolumes + } + return false +} + +func (x *CollectionListRequest) GetIncludeEcVolumes() bool { + if x != nil { + return x.IncludeEcVolumes + } + return false +} + +type CollectionListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Collections []*Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` +} + +func (x *CollectionListResponse) Reset() { + *x = CollectionListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionListResponse) ProtoMessage() {} + +func (x *CollectionListResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionListResponse.ProtoReflect.Descriptor instead. +func (*CollectionListResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{19} +} + +func (x *CollectionListResponse) GetCollections() []*Collection { + if x != nil { + return x.Collections + } + return nil +} + +type CollectionDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *CollectionDeleteRequest) Reset() { + *x = CollectionDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionDeleteRequest) ProtoMessage() {} + +func (x *CollectionDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionDeleteRequest.ProtoReflect.Descriptor instead. +func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{20} +} + +func (x *CollectionDeleteRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type CollectionDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CollectionDeleteResponse) Reset() { + *x = CollectionDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectionDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectionDeleteResponse) ProtoMessage() {} + +func (x *CollectionDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectionDeleteResponse.ProtoReflect.Descriptor instead. +func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{21} +} + +// +// volume related +// +type DiskInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount,proto3" json:"volume_count,omitempty"` + MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount,proto3" json:"max_volume_count,omitempty"` + FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount,proto3" json:"free_volume_count,omitempty"` + ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount,proto3" json:"active_volume_count,omitempty"` + VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos,proto3" json:"volume_infos,omitempty"` + EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos,proto3" json:"ec_shard_infos,omitempty"` + RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount,proto3" json:"remote_volume_count,omitempty"` +} + +func (x *DiskInfo) Reset() { + *x = DiskInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DiskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiskInfo) ProtoMessage() {} + +func (x *DiskInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiskInfo.ProtoReflect.Descriptor instead. +func (*DiskInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{22} +} + +func (x *DiskInfo) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *DiskInfo) GetVolumeCount() uint64 { + if x != nil { + return x.VolumeCount + } + return 0 +} + +func (x *DiskInfo) GetMaxVolumeCount() uint64 { + if x != nil { + return x.MaxVolumeCount + } + return 0 +} + +func (x *DiskInfo) GetFreeVolumeCount() uint64 { + if x != nil { + return x.FreeVolumeCount + } + return 0 +} + +func (x *DiskInfo) GetActiveVolumeCount() uint64 { + if x != nil { + return x.ActiveVolumeCount + } + return 0 +} + +func (x *DiskInfo) GetVolumeInfos() []*VolumeInformationMessage { + if x != nil { + return x.VolumeInfos + } + return nil +} + +func (x *DiskInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage { + if x != nil { + return x.EcShardInfos + } + return nil +} + +func (x *DiskInfo) GetRemoteVolumeCount() uint64 { + if x != nil { + return x.RemoteVolumeCount + } + return 0 +} + +type DataNodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,2,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DataNodeInfo) Reset() { + *x = DataNodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataNodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataNodeInfo) ProtoMessage() {} + +func (x *DataNodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataNodeInfo.ProtoReflect.Descriptor instead. +func (*DataNodeInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{23} +} + +func (x *DataNodeInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *DataNodeInfo) GetDiskInfos() map[string]*DiskInfo { + if x != nil { + return x.DiskInfos + } + return nil +} + +type RackInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + DataNodeInfos []*DataNodeInfo `protobuf:"bytes,2,rep,name=data_node_infos,json=dataNodeInfos,proto3" json:"data_node_infos,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RackInfo) Reset() { + *x = RackInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RackInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RackInfo) ProtoMessage() {} + +func (x *RackInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RackInfo.ProtoReflect.Descriptor instead. +func (*RackInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{24} +} + +func (x *RackInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *RackInfo) GetDataNodeInfos() []*DataNodeInfo { + if x != nil { + return x.DataNodeInfos + } + return nil +} + +func (x *RackInfo) GetDiskInfos() map[string]*DiskInfo { + if x != nil { + return x.DiskInfos + } + return nil +} + +type DataCenterInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + RackInfos []*RackInfo `protobuf:"bytes,2,rep,name=rack_infos,json=rackInfos,proto3" json:"rack_infos,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DataCenterInfo) Reset() { + *x = DataCenterInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataCenterInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataCenterInfo) ProtoMessage() {} + +func (x *DataCenterInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataCenterInfo.ProtoReflect.Descriptor instead. +func (*DataCenterInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{25} +} + +func (x *DataCenterInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *DataCenterInfo) GetRackInfos() []*RackInfo { + if x != nil { + return x.RackInfos + } + return nil +} + +func (x *DataCenterInfo) GetDiskInfos() map[string]*DiskInfo { + if x != nil { + return x.DiskInfos + } + return nil +} + +type TopologyInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + DataCenterInfos []*DataCenterInfo `protobuf:"bytes,2,rep,name=data_center_infos,json=dataCenterInfos,proto3" json:"data_center_infos,omitempty"` + DiskInfos map[string]*DiskInfo `protobuf:"bytes,3,rep,name=diskInfos,proto3" json:"diskInfos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *TopologyInfo) Reset() { + *x = TopologyInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TopologyInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TopologyInfo) ProtoMessage() {} + +func (x *TopologyInfo) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TopologyInfo.ProtoReflect.Descriptor instead. +func (*TopologyInfo) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{26} +} + +func (x *TopologyInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo { + if x != nil { + return x.DataCenterInfos + } + return nil +} + +func (x *TopologyInfo) GetDiskInfos() map[string]*DiskInfo { + if x != nil { + return x.DiskInfos + } + return nil +} + +type VolumeListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeListRequest) Reset() { + *x = VolumeListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeListRequest) ProtoMessage() {} + +func (x *VolumeListRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeListRequest.ProtoReflect.Descriptor instead. +func (*VolumeListRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{27} +} + +type VolumeListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo,proto3" json:"topology_info,omitempty"` + VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb,proto3" json:"volume_size_limit_mb,omitempty"` +} + +func (x *VolumeListResponse) Reset() { + *x = VolumeListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeListResponse) ProtoMessage() {} + +func (x *VolumeListResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeListResponse.ProtoReflect.Descriptor instead. +func (*VolumeListResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{28} +} + +func (x *VolumeListResponse) GetTopologyInfo() *TopologyInfo { + if x != nil { + return x.TopologyInfo + } + return nil +} + +func (x *VolumeListResponse) GetVolumeSizeLimitMb() uint64 { + if x != nil { + return x.VolumeSizeLimitMb + } + return 0 +} + +type LookupEcVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *LookupEcVolumeRequest) Reset() { + *x = LookupEcVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupEcVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupEcVolumeRequest) ProtoMessage() {} + +func (x *LookupEcVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupEcVolumeRequest.ProtoReflect.Descriptor instead. +func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{29} +} + +func (x *LookupEcVolumeRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type LookupEcVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardIdLocations []*LookupEcVolumeResponse_EcShardIdLocation `protobuf:"bytes,2,rep,name=shard_id_locations,json=shardIdLocations,proto3" json:"shard_id_locations,omitempty"` +} + +func (x *LookupEcVolumeResponse) Reset() { + *x = LookupEcVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LookupEcVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LookupEcVolumeResponse) ProtoMessage() {} + +func (x *LookupEcVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LookupEcVolumeResponse.ProtoReflect.Descriptor instead. +func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{30} +} + +func (x *LookupEcVolumeResponse) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *LookupEcVolumeResponse) GetShardIdLocations() []*LookupEcVolumeResponse_EcShardIdLocation { + if x != nil { + return x.ShardIdLocations + } + return nil +} + +type VacuumVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GarbageThreshold float32 `protobuf:"fixed32,1,opt,name=garbage_threshold,json=garbageThreshold,proto3" json:"garbage_threshold,omitempty"` +} + +func (x *VacuumVolumeRequest) Reset() { + *x = VacuumVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (m *AssignRequest) GetRack() string { - if m != nil { - return m.Rack - } - return "" +func (x *VacuumVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AssignRequest) GetDataNode() string { - if m != nil { - return m.DataNode +func (*VacuumVolumeRequest) ProtoMessage() {} + +func (x *VacuumVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *AssignRequest) GetMemoryMapMaxSizeMb() uint32 { - if m != nil { - return m.MemoryMapMaxSizeMb - } - return 0 +// Deprecated: Use VacuumVolumeRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{31} } -func (m *AssignRequest) GetWritableVolumeCount() uint32 { - if m != nil { - return m.WritableVolumeCount +func (x *VacuumVolumeRequest) GetGarbageThreshold() float32 { + if x != nil { + return x.GarbageThreshold } return 0 } -type AssignResponse struct { - Fid string `protobuf:"bytes,1,opt,name=fid" json:"fid,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - PublicUrl string `protobuf:"bytes,3,opt,name=public_url,json=publicUrl" json:"public_url,omitempty"` - Count uint64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - Error string `protobuf:"bytes,5,opt,name=error" json:"error,omitempty"` - Auth string `protobuf:"bytes,6,opt,name=auth" json:"auth,omitempty"` +type VacuumVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *AssignResponse) Reset() { *m = AssignResponse{} } -func (m *AssignResponse) String() string { return proto.CompactTextString(m) } -func (*AssignResponse) ProtoMessage() {} -func (*AssignResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -func (m *AssignResponse) GetFid() string { - if m != nil { - return m.Fid +func (x *VacuumVolumeResponse) Reset() { + *x = VacuumVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (m *AssignResponse) GetUrl() string { - if m != nil { - return m.Url - } - return "" +func (x *VacuumVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AssignResponse) GetPublicUrl() string { - if m != nil { - return m.PublicUrl +func (*VacuumVolumeResponse) ProtoMessage() {} + +func (x *VacuumVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *AssignResponse) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 +// Deprecated: Use VacuumVolumeResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{32} } -func (m *AssignResponse) GetError() string { - if m != nil { - return m.Error - } - return "" +type GetMasterConfigurationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *AssignResponse) GetAuth() string { - if m != nil { - return m.Auth +func (x *GetMasterConfigurationRequest) Reset() { + *x = GetMasterConfigurationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type StatisticsRequest struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` +func (x *GetMasterConfigurationRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StatisticsRequest) Reset() { *m = StatisticsRequest{} } -func (m *StatisticsRequest) String() string { return proto.CompactTextString(m) } -func (*StatisticsRequest) ProtoMessage() {} -func (*StatisticsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*GetMasterConfigurationRequest) ProtoMessage() {} -func (m *StatisticsRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *GetMasterConfigurationRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *StatisticsRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +// Deprecated: Use GetMasterConfigurationRequest.ProtoReflect.Descriptor instead. +func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{33} } -func (m *StatisticsRequest) GetTtl() string { - if m != nil { - return m.Ttl +type GetMasterConfigurationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress,proto3" json:"metrics_address,omitempty"` + MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds,proto3" json:"metrics_interval_seconds,omitempty"` + StorageBackends []*StorageBackend `protobuf:"bytes,3,rep,name=storage_backends,json=storageBackends,proto3" json:"storage_backends,omitempty"` + DefaultReplication string `protobuf:"bytes,4,opt,name=default_replication,json=defaultReplication,proto3" json:"default_replication,omitempty"` + Leader string `protobuf:"bytes,5,opt,name=leader,proto3" json:"leader,omitempty"` +} + +func (x *GetMasterConfigurationResponse) Reset() { + *x = GetMasterConfigurationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type StatisticsResponse struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Ttl string `protobuf:"bytes,3,opt,name=ttl" json:"ttl,omitempty"` - TotalSize uint64 `protobuf:"varint,4,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` - UsedSize uint64 `protobuf:"varint,5,opt,name=used_size,json=usedSize" json:"used_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` +func (x *GetMasterConfigurationResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StatisticsResponse) Reset() { *m = StatisticsResponse{} } -func (m *StatisticsResponse) String() string { return proto.CompactTextString(m) } -func (*StatisticsResponse) ProtoMessage() {} -func (*StatisticsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (m *StatisticsResponse) GetReplication() string { - if m != nil { - return m.Replication +func (x *GetMasterConfigurationResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *StatisticsResponse) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +// Deprecated: Use GetMasterConfigurationResponse.ProtoReflect.Descriptor instead. +func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{34} } -func (m *StatisticsResponse) GetTtl() string { - if m != nil { - return m.Ttl +func (x *GetMasterConfigurationResponse) GetMetricsAddress() string { + if x != nil { + return x.MetricsAddress } return "" } -func (m *StatisticsResponse) GetTotalSize() uint64 { - if m != nil { - return m.TotalSize +func (x *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 { + if x != nil { + return x.MetricsIntervalSeconds } return 0 } -func (m *StatisticsResponse) GetUsedSize() uint64 { - if m != nil { - return m.UsedSize +func (x *GetMasterConfigurationResponse) GetStorageBackends() []*StorageBackend { + if x != nil { + return x.StorageBackends } - return 0 + return nil } -func (m *StatisticsResponse) GetFileCount() uint64 { - if m != nil { - return m.FileCount +func (x *GetMasterConfigurationResponse) GetDefaultReplication() string { + if x != nil { + return x.DefaultReplication } - return 0 + return "" } -type StorageType struct { - Replication string `protobuf:"bytes,1,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,2,opt,name=ttl" json:"ttl,omitempty"` +func (x *GetMasterConfigurationResponse) GetLeader() string { + if x != nil { + return x.Leader + } + return "" } -func (m *StorageType) Reset() { *m = StorageType{} } -func (m *StorageType) String() string { return proto.CompactTextString(m) } -func (*StorageType) ProtoMessage() {} -func (*StorageType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +type ListMasterClientsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *StorageType) GetReplication() string { - if m != nil { - return m.Replication - } - return "" + ClientType string `protobuf:"bytes,1,opt,name=client_type,json=clientType,proto3" json:"client_type,omitempty"` } -func (m *StorageType) GetTtl() string { - if m != nil { - return m.Ttl +func (x *ListMasterClientsRequest) Reset() { + *x = ListMasterClientsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type Collection struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +func (x *ListMasterClientsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Collection) Reset() { *m = Collection{} } -func (m *Collection) String() string { return proto.CompactTextString(m) } -func (*Collection) ProtoMessage() {} -func (*Collection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*ListMasterClientsRequest) ProtoMessage() {} -func (m *Collection) GetName() string { - if m != nil { - return m.Name +func (x *ListMasterClientsRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -type CollectionListRequest struct { - IncludeNormalVolumes bool `protobuf:"varint,1,opt,name=include_normal_volumes,json=includeNormalVolumes" json:"include_normal_volumes,omitempty"` - IncludeEcVolumes bool `protobuf:"varint,2,opt,name=include_ec_volumes,json=includeEcVolumes" json:"include_ec_volumes,omitempty"` +// Deprecated: Use ListMasterClientsRequest.ProtoReflect.Descriptor instead. +func (*ListMasterClientsRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{35} } -func (m *CollectionListRequest) Reset() { *m = CollectionListRequest{} } -func (m *CollectionListRequest) String() string { return proto.CompactTextString(m) } -func (*CollectionListRequest) ProtoMessage() {} -func (*CollectionListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *CollectionListRequest) GetIncludeNormalVolumes() bool { - if m != nil { - return m.IncludeNormalVolumes +func (x *ListMasterClientsRequest) GetClientType() string { + if x != nil { + return x.ClientType } - return false + return "" } -func (m *CollectionListRequest) GetIncludeEcVolumes() bool { - if m != nil { - return m.IncludeEcVolumes +type ListMasterClientsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GrpcAddresses []string `protobuf:"bytes,1,rep,name=grpc_addresses,json=grpcAddresses,proto3" json:"grpc_addresses,omitempty"` +} + +func (x *ListMasterClientsResponse) Reset() { + *x = ListMasterClientsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -type CollectionListResponse struct { - Collections []*Collection `protobuf:"bytes,1,rep,name=collections" json:"collections,omitempty"` +func (x *ListMasterClientsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CollectionListResponse) Reset() { *m = CollectionListResponse{} } -func (m *CollectionListResponse) String() string { return proto.CompactTextString(m) } -func (*CollectionListResponse) ProtoMessage() {} -func (*CollectionListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (*ListMasterClientsResponse) ProtoMessage() {} -func (m *CollectionListResponse) GetCollections() []*Collection { - if m != nil { - return m.Collections +func (x *ListMasterClientsResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -type CollectionDeleteRequest struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +// Deprecated: Use ListMasterClientsResponse.ProtoReflect.Descriptor instead. +func (*ListMasterClientsResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{36} } -func (m *CollectionDeleteRequest) Reset() { *m = CollectionDeleteRequest{} } -func (m *CollectionDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*CollectionDeleteRequest) ProtoMessage() {} -func (*CollectionDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -func (m *CollectionDeleteRequest) GetName() string { - if m != nil { - return m.Name +func (x *ListMasterClientsResponse) GetGrpcAddresses() []string { + if x != nil { + return x.GrpcAddresses } - return "" + return nil } -type CollectionDeleteResponse struct { +type LeaseAdminTokenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` + PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` + LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` + ClientName string `protobuf:"bytes,4,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"` } -func (m *CollectionDeleteResponse) Reset() { *m = CollectionDeleteResponse{} } -func (m *CollectionDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*CollectionDeleteResponse) ProtoMessage() {} -func (*CollectionDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (x *LeaseAdminTokenRequest) Reset() { + *x = LeaseAdminTokenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -// -// volume related -// -type DataNodeInfo struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` - MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` - ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` - VolumeInfos []*VolumeInformationMessage `protobuf:"bytes,6,rep,name=volume_infos,json=volumeInfos" json:"volume_infos,omitempty"` - EcShardInfos []*VolumeEcShardInformationMessage `protobuf:"bytes,7,rep,name=ec_shard_infos,json=ecShardInfos" json:"ec_shard_infos,omitempty"` - RemoteVolumeCount uint64 `protobuf:"varint,8,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` +func (x *LeaseAdminTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DataNodeInfo) Reset() { *m = DataNodeInfo{} } -func (m *DataNodeInfo) String() string { return proto.CompactTextString(m) } -func (*DataNodeInfo) ProtoMessage() {} -func (*DataNodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (*LeaseAdminTokenRequest) ProtoMessage() {} -func (m *DataNodeInfo) GetId() string { - if m != nil { - return m.Id +func (x *LeaseAdminTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *DataNodeInfo) GetVolumeCount() uint64 { - if m != nil { - return m.VolumeCount - } - return 0 +// Deprecated: Use LeaseAdminTokenRequest.ProtoReflect.Descriptor instead. +func (*LeaseAdminTokenRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{37} } -func (m *DataNodeInfo) GetMaxVolumeCount() uint64 { - if m != nil { - return m.MaxVolumeCount +func (x *LeaseAdminTokenRequest) GetPreviousToken() int64 { + if x != nil { + return x.PreviousToken } return 0 } -func (m *DataNodeInfo) GetFreeVolumeCount() uint64 { - if m != nil { - return m.FreeVolumeCount +func (x *LeaseAdminTokenRequest) GetPreviousLockTime() int64 { + if x != nil { + return x.PreviousLockTime } return 0 } -func (m *DataNodeInfo) GetActiveVolumeCount() uint64 { - if m != nil { - return m.ActiveVolumeCount +func (x *LeaseAdminTokenRequest) GetLockName() string { + if x != nil { + return x.LockName } - return 0 + return "" } -func (m *DataNodeInfo) GetVolumeInfos() []*VolumeInformationMessage { - if m != nil { - return m.VolumeInfos +func (x *LeaseAdminTokenRequest) GetClientName() string { + if x != nil { + return x.ClientName } - return nil + return "" } -func (m *DataNodeInfo) GetEcShardInfos() []*VolumeEcShardInformationMessage { - if m != nil { - return m.EcShardInfos - } - return nil +type LeaseAdminTokenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Token int64 `protobuf:"varint,1,opt,name=token,proto3" json:"token,omitempty"` + LockTsNs int64 `protobuf:"varint,2,opt,name=lock_ts_ns,json=lockTsNs,proto3" json:"lock_ts_ns,omitempty"` } -func (m *DataNodeInfo) GetRemoteVolumeCount() uint64 { - if m != nil { - return m.RemoteVolumeCount +func (x *LeaseAdminTokenResponse) Reset() { + *x = LeaseAdminTokenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type RackInfo struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` - MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` - ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` - DataNodeInfos []*DataNodeInfo `protobuf:"bytes,6,rep,name=data_node_infos,json=dataNodeInfos" json:"data_node_infos,omitempty"` - RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` +func (x *LeaseAdminTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *RackInfo) Reset() { *m = RackInfo{} } -func (m *RackInfo) String() string { return proto.CompactTextString(m) } -func (*RackInfo) ProtoMessage() {} -func (*RackInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (*LeaseAdminTokenResponse) ProtoMessage() {} -func (m *RackInfo) GetId() string { - if m != nil { - return m.Id +func (x *LeaseAdminTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *RackInfo) GetVolumeCount() uint64 { - if m != nil { - return m.VolumeCount - } - return 0 +// Deprecated: Use LeaseAdminTokenResponse.ProtoReflect.Descriptor instead. +func (*LeaseAdminTokenResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{38} } -func (m *RackInfo) GetMaxVolumeCount() uint64 { - if m != nil { - return m.MaxVolumeCount +func (x *LeaseAdminTokenResponse) GetToken() int64 { + if x != nil { + return x.Token } return 0 } -func (m *RackInfo) GetFreeVolumeCount() uint64 { - if m != nil { - return m.FreeVolumeCount +func (x *LeaseAdminTokenResponse) GetLockTsNs() int64 { + if x != nil { + return x.LockTsNs } return 0 } -func (m *RackInfo) GetActiveVolumeCount() uint64 { - if m != nil { - return m.ActiveVolumeCount - } - return 0 -} +type ReleaseAdminTokenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *RackInfo) GetDataNodeInfos() []*DataNodeInfo { - if m != nil { - return m.DataNodeInfos - } - return nil + PreviousToken int64 `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"` + PreviousLockTime int64 `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"` + LockName string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"` } -func (m *RackInfo) GetRemoteVolumeCount() uint64 { - if m != nil { - return m.RemoteVolumeCount +func (x *ReleaseAdminTokenRequest) Reset() { + *x = ReleaseAdminTokenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type DataCenterInfo struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` - MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` - ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` - RackInfos []*RackInfo `protobuf:"bytes,6,rep,name=rack_infos,json=rackInfos" json:"rack_infos,omitempty"` - RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` +func (x *ReleaseAdminTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DataCenterInfo) Reset() { *m = DataCenterInfo{} } -func (m *DataCenterInfo) String() string { return proto.CompactTextString(m) } -func (*DataCenterInfo) ProtoMessage() {} -func (*DataCenterInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*ReleaseAdminTokenRequest) ProtoMessage() {} -func (m *DataCenterInfo) GetId() string { - if m != nil { - return m.Id +func (x *ReleaseAdminTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *DataCenterInfo) GetVolumeCount() uint64 { - if m != nil { - return m.VolumeCount - } - return 0 +// Deprecated: Use ReleaseAdminTokenRequest.ProtoReflect.Descriptor instead. +func (*ReleaseAdminTokenRequest) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{39} } -func (m *DataCenterInfo) GetMaxVolumeCount() uint64 { - if m != nil { - return m.MaxVolumeCount +func (x *ReleaseAdminTokenRequest) GetPreviousToken() int64 { + if x != nil { + return x.PreviousToken } return 0 } -func (m *DataCenterInfo) GetFreeVolumeCount() uint64 { - if m != nil { - return m.FreeVolumeCount +func (x *ReleaseAdminTokenRequest) GetPreviousLockTime() int64 { + if x != nil { + return x.PreviousLockTime } return 0 } -func (m *DataCenterInfo) GetActiveVolumeCount() uint64 { - if m != nil { - return m.ActiveVolumeCount +func (x *ReleaseAdminTokenRequest) GetLockName() string { + if x != nil { + return x.LockName } - return 0 + return "" } -func (m *DataCenterInfo) GetRackInfos() []*RackInfo { - if m != nil { - return m.RackInfos - } - return nil +type ReleaseAdminTokenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *DataCenterInfo) GetRemoteVolumeCount() uint64 { - if m != nil { - return m.RemoteVolumeCount +func (x *ReleaseAdminTokenResponse) Reset() { + *x = ReleaseAdminTokenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type TopologyInfo struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - VolumeCount uint64 `protobuf:"varint,2,opt,name=volume_count,json=volumeCount" json:"volume_count,omitempty"` - MaxVolumeCount uint64 `protobuf:"varint,3,opt,name=max_volume_count,json=maxVolumeCount" json:"max_volume_count,omitempty"` - FreeVolumeCount uint64 `protobuf:"varint,4,opt,name=free_volume_count,json=freeVolumeCount" json:"free_volume_count,omitempty"` - ActiveVolumeCount uint64 `protobuf:"varint,5,opt,name=active_volume_count,json=activeVolumeCount" json:"active_volume_count,omitempty"` - DataCenterInfos []*DataCenterInfo `protobuf:"bytes,6,rep,name=data_center_infos,json=dataCenterInfos" json:"data_center_infos,omitempty"` - RemoteVolumeCount uint64 `protobuf:"varint,7,opt,name=remote_volume_count,json=remoteVolumeCount" json:"remote_volume_count,omitempty"` +func (x *ReleaseAdminTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TopologyInfo) Reset() { *m = TopologyInfo{} } -func (m *TopologyInfo) String() string { return proto.CompactTextString(m) } -func (*TopologyInfo) ProtoMessage() {} -func (*TopologyInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +func (*ReleaseAdminTokenResponse) ProtoMessage() {} -func (m *TopologyInfo) GetId() string { - if m != nil { - return m.Id +func (x *ReleaseAdminTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *TopologyInfo) GetVolumeCount() uint64 { - if m != nil { - return m.VolumeCount - } - return 0 +// Deprecated: Use ReleaseAdminTokenResponse.ProtoReflect.Descriptor instead. +func (*ReleaseAdminTokenResponse) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{40} } -func (m *TopologyInfo) GetMaxVolumeCount() uint64 { - if m != nil { - return m.MaxVolumeCount - } - return 0 +type SuperBlockExtra_ErasureCoding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data uint32 `protobuf:"varint,1,opt,name=data,proto3" json:"data,omitempty"` + Parity uint32 `protobuf:"varint,2,opt,name=parity,proto3" json:"parity,omitempty"` + VolumeIds []uint32 `protobuf:"varint,3,rep,packed,name=volume_ids,json=volumeIds,proto3" json:"volume_ids,omitempty"` } -func (m *TopologyInfo) GetFreeVolumeCount() uint64 { - if m != nil { - return m.FreeVolumeCount +func (x *SuperBlockExtra_ErasureCoding) Reset() { + *x = SuperBlockExtra_ErasureCoding{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *TopologyInfo) GetActiveVolumeCount() uint64 { - if m != nil { - return m.ActiveVolumeCount - } - return 0 +func (x *SuperBlockExtra_ErasureCoding) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TopologyInfo) GetDataCenterInfos() []*DataCenterInfo { - if m != nil { - return m.DataCenterInfos +func (*SuperBlockExtra_ErasureCoding) ProtoMessage() {} + +func (x *SuperBlockExtra_ErasureCoding) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) +} + +// Deprecated: Use SuperBlockExtra_ErasureCoding.ProtoReflect.Descriptor instead. +func (*SuperBlockExtra_ErasureCoding) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{7, 0} } -func (m *TopologyInfo) GetRemoteVolumeCount() uint64 { - if m != nil { - return m.RemoteVolumeCount +func (x *SuperBlockExtra_ErasureCoding) GetData() uint32 { + if x != nil { + return x.Data } return 0 } -type VolumeListRequest struct { +func (x *SuperBlockExtra_ErasureCoding) GetParity() uint32 { + if x != nil { + return x.Parity + } + return 0 } -func (m *VolumeListRequest) Reset() { *m = VolumeListRequest{} } -func (m *VolumeListRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeListRequest) ProtoMessage() {} -func (*VolumeListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } - -type VolumeListResponse struct { - TopologyInfo *TopologyInfo `protobuf:"bytes,1,opt,name=topology_info,json=topologyInfo" json:"topology_info,omitempty"` - VolumeSizeLimitMb uint64 `protobuf:"varint,2,opt,name=volume_size_limit_mb,json=volumeSizeLimitMb" json:"volume_size_limit_mb,omitempty"` +func (x *SuperBlockExtra_ErasureCoding) GetVolumeIds() []uint32 { + if x != nil { + return x.VolumeIds + } + return nil } -func (m *VolumeListResponse) Reset() { *m = VolumeListResponse{} } -func (m *VolumeListResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeListResponse) ProtoMessage() {} -func (*VolumeListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +type LookupVolumeResponse_VolumeIdLocation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *VolumeListResponse) GetTopologyInfo() *TopologyInfo { - if m != nil { - return m.TopologyInfo - } - return nil + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` } -func (m *VolumeListResponse) GetVolumeSizeLimitMb() uint64 { - if m != nil { - return m.VolumeSizeLimitMb +func (x *LookupVolumeResponse_VolumeIdLocation) Reset() { + *x = LookupVolumeResponse_VolumeIdLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type LookupEcVolumeRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +func (x *LookupVolumeResponse_VolumeIdLocation) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LookupEcVolumeRequest) Reset() { *m = LookupEcVolumeRequest{} } -func (m *LookupEcVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*LookupEcVolumeRequest) ProtoMessage() {} -func (*LookupEcVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (*LookupVolumeResponse_VolumeIdLocation) ProtoMessage() {} -func (m *LookupEcVolumeRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *LookupVolumeResponse_VolumeIdLocation) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -type LookupEcVolumeResponse struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - ShardIdLocations []*LookupEcVolumeResponse_EcShardIdLocation `protobuf:"bytes,2,rep,name=shard_id_locations,json=shardIdLocations" json:"shard_id_locations,omitempty"` +// Deprecated: Use LookupVolumeResponse_VolumeIdLocation.ProtoReflect.Descriptor instead. +func (*LookupVolumeResponse_VolumeIdLocation) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{11, 0} } -func (m *LookupEcVolumeResponse) Reset() { *m = LookupEcVolumeResponse{} } -func (m *LookupEcVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*LookupEcVolumeResponse) ProtoMessage() {} -func (*LookupEcVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } - -func (m *LookupEcVolumeResponse) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *LookupVolumeResponse_VolumeIdLocation) GetVolumeId() string { + if x != nil { + return x.VolumeId } - return 0 + return "" } -func (m *LookupEcVolumeResponse) GetShardIdLocations() []*LookupEcVolumeResponse_EcShardIdLocation { - if m != nil { - return m.ShardIdLocations +func (x *LookupVolumeResponse_VolumeIdLocation) GetLocations() []*Location { + if x != nil { + return x.Locations } return nil } -type LookupEcVolumeResponse_EcShardIdLocation struct { - ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId" json:"shard_id,omitempty"` - Locations []*Location `protobuf:"bytes,2,rep,name=locations" json:"locations,omitempty"` +func (x *LookupVolumeResponse_VolumeIdLocation) GetError() string { + if x != nil { + return x.Error + } + return "" } -func (m *LookupEcVolumeResponse_EcShardIdLocation) Reset() { - *m = LookupEcVolumeResponse_EcShardIdLocation{} -} -func (m *LookupEcVolumeResponse_EcShardIdLocation) String() string { return proto.CompactTextString(m) } -func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {} -func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{30, 0} -} +type LookupEcVolumeResponse_EcShardIdLocation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 { - if m != nil { - return m.ShardId - } - return 0 + ShardId uint32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Locations []*Location `protobuf:"bytes,2,rep,name=locations,proto3" json:"locations,omitempty"` } -func (m *LookupEcVolumeResponse_EcShardIdLocation) GetLocations() []*Location { - if m != nil { - return m.Locations +func (x *LookupEcVolumeResponse_EcShardIdLocation) Reset() { + *x = LookupEcVolumeResponse_EcShardIdLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_master_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type GetMasterConfigurationRequest struct { +func (x *LookupEcVolumeResponse_EcShardIdLocation) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetMasterConfigurationRequest) Reset() { *m = GetMasterConfigurationRequest{} } -func (m *GetMasterConfigurationRequest) String() string { return proto.CompactTextString(m) } -func (*GetMasterConfigurationRequest) ProtoMessage() {} -func (*GetMasterConfigurationRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (*LookupEcVolumeResponse_EcShardIdLocation) ProtoMessage() {} -type GetMasterConfigurationResponse struct { - MetricsAddress string `protobuf:"bytes,1,opt,name=metrics_address,json=metricsAddress" json:"metrics_address,omitempty"` - MetricsIntervalSeconds uint32 `protobuf:"varint,2,opt,name=metrics_interval_seconds,json=metricsIntervalSeconds" json:"metrics_interval_seconds,omitempty"` +func (x *LookupEcVolumeResponse_EcShardIdLocation) ProtoReflect() protoreflect.Message { + mi := &file_master_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *GetMasterConfigurationResponse) Reset() { *m = GetMasterConfigurationResponse{} } -func (m *GetMasterConfigurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetMasterConfigurationResponse) ProtoMessage() {} -func (*GetMasterConfigurationResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +// Deprecated: Use LookupEcVolumeResponse_EcShardIdLocation.ProtoReflect.Descriptor instead. +func (*LookupEcVolumeResponse_EcShardIdLocation) Descriptor() ([]byte, []int) { + return file_master_proto_rawDescGZIP(), []int{30, 0} +} -func (m *GetMasterConfigurationResponse) GetMetricsAddress() string { - if m != nil { - return m.MetricsAddress +func (x *LookupEcVolumeResponse_EcShardIdLocation) GetShardId() uint32 { + if x != nil { + return x.ShardId } - return "" + return 0 } -func (m *GetMasterConfigurationResponse) GetMetricsIntervalSeconds() uint32 { - if m != nil { - return m.MetricsIntervalSeconds +func (x *LookupEcVolumeResponse_EcShardIdLocation) GetLocations() []*Location { + if x != nil { + return x.Locations } - return 0 + return nil } -func init() { - proto.RegisterType((*Heartbeat)(nil), "master_pb.Heartbeat") - proto.RegisterType((*HeartbeatResponse)(nil), "master_pb.HeartbeatResponse") - proto.RegisterType((*VolumeInformationMessage)(nil), "master_pb.VolumeInformationMessage") - proto.RegisterType((*VolumeShortInformationMessage)(nil), "master_pb.VolumeShortInformationMessage") - proto.RegisterType((*VolumeEcShardInformationMessage)(nil), "master_pb.VolumeEcShardInformationMessage") - proto.RegisterType((*StorageBackend)(nil), "master_pb.StorageBackend") - proto.RegisterType((*Empty)(nil), "master_pb.Empty") - proto.RegisterType((*SuperBlockExtra)(nil), "master_pb.SuperBlockExtra") - proto.RegisterType((*SuperBlockExtra_ErasureCoding)(nil), "master_pb.SuperBlockExtra.ErasureCoding") - proto.RegisterType((*KeepConnectedRequest)(nil), "master_pb.KeepConnectedRequest") - proto.RegisterType((*VolumeLocation)(nil), "master_pb.VolumeLocation") - proto.RegisterType((*LookupVolumeRequest)(nil), "master_pb.LookupVolumeRequest") - proto.RegisterType((*LookupVolumeResponse)(nil), "master_pb.LookupVolumeResponse") - proto.RegisterType((*LookupVolumeResponse_VolumeIdLocation)(nil), "master_pb.LookupVolumeResponse.VolumeIdLocation") - proto.RegisterType((*Location)(nil), "master_pb.Location") - proto.RegisterType((*AssignRequest)(nil), "master_pb.AssignRequest") - proto.RegisterType((*AssignResponse)(nil), "master_pb.AssignResponse") - proto.RegisterType((*StatisticsRequest)(nil), "master_pb.StatisticsRequest") - proto.RegisterType((*StatisticsResponse)(nil), "master_pb.StatisticsResponse") - proto.RegisterType((*StorageType)(nil), "master_pb.StorageType") - proto.RegisterType((*Collection)(nil), "master_pb.Collection") - proto.RegisterType((*CollectionListRequest)(nil), "master_pb.CollectionListRequest") - proto.RegisterType((*CollectionListResponse)(nil), "master_pb.CollectionListResponse") - proto.RegisterType((*CollectionDeleteRequest)(nil), "master_pb.CollectionDeleteRequest") - proto.RegisterType((*CollectionDeleteResponse)(nil), "master_pb.CollectionDeleteResponse") - proto.RegisterType((*DataNodeInfo)(nil), "master_pb.DataNodeInfo") - proto.RegisterType((*RackInfo)(nil), "master_pb.RackInfo") - proto.RegisterType((*DataCenterInfo)(nil), "master_pb.DataCenterInfo") - proto.RegisterType((*TopologyInfo)(nil), "master_pb.TopologyInfo") - proto.RegisterType((*VolumeListRequest)(nil), "master_pb.VolumeListRequest") - proto.RegisterType((*VolumeListResponse)(nil), "master_pb.VolumeListResponse") - proto.RegisterType((*LookupEcVolumeRequest)(nil), "master_pb.LookupEcVolumeRequest") - proto.RegisterType((*LookupEcVolumeResponse)(nil), "master_pb.LookupEcVolumeResponse") - proto.RegisterType((*LookupEcVolumeResponse_EcShardIdLocation)(nil), "master_pb.LookupEcVolumeResponse.EcShardIdLocation") - proto.RegisterType((*GetMasterConfigurationRequest)(nil), "master_pb.GetMasterConfigurationRequest") - proto.RegisterType((*GetMasterConfigurationResponse)(nil), "master_pb.GetMasterConfigurationResponse") +var File_master_proto protoreflect.FileDescriptor + +var file_master_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0xfc, 0x06, 0x0a, 0x09, 0x48, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0c, 0x6d, 0x61, + 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x0b, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, + 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x3d, 0x0a, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, + 0x49, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0a, + 0x6e, 0x65, 0x77, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x0f, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0e, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x24, 0x0a, + 0x0e, 0x68, 0x61, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x09, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x08, 0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x4e, 0x0a, 0x0d, + 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x11, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x0b, 0x6e, 0x65, 0x77, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x56, 0x0a, 0x11, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x0f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x45, 0x63, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x12, 0x27, 0x0a, 0x10, 0x68, 0x61, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x65, + 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, + 0x68, 0x61, 0x73, 0x4e, 0x6f, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x55, 0x0a, + 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x2e, 0x4d, + 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x80, 0x02, 0x0a, 0x11, 0x48, 0x65, 0x61, + 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, + 0x0a, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x44, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x22, 0x98, 0x04, 0x0a, 0x18, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, + 0x0a, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x64, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, 0x61, + 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, + 0x74, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, + 0x12, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6d, 0x6f, 0x64, 0x69, 0x66, + 0x69, 0x65, 0x64, 0x41, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, + 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, + 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0xc5, 0x01, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x50, 0x6c, 0x61, 0x63, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, + 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x92, + 0x01, 0x0a, 0x1f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x63, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x62, + 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65, 0x63, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x42, 0x69, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, + 0x79, 0x70, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x49, 0x0a, 0x0a, 0x70, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0xbe, 0x01, + 0x0a, 0x0f, 0x53, 0x75, 0x70, 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x78, 0x74, 0x72, + 0x61, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x70, 0x65, 0x72, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x45, 0x78, 0x74, 0x72, 0x61, 0x2e, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x52, 0x0d, 0x65, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x1a, 0x5a, 0x0a, 0x0d, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65, 0x43, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x69, 0x74, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x70, 0x61, 0x72, 0x69, 0x74, 0x79, 0x12, + 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0d, 0x52, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x22, 0x47, + 0x0a, 0x14, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x72, + 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x67, + 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xb8, 0x01, 0x0a, 0x0e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6e, + 0x65, 0x77, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x07, 0x6e, + 0x65, 0x77, 0x56, 0x69, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x76, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0b, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x56, 0x69, 0x64, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x14, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x60, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x1a, 0x78, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x3b, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x22, 0xd0, 0x02, 0x0a, 0x0d, 0x41, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, + 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x64, + 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, + 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, + 0x6d, 0x62, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x4d, 0x61, 0x70, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x32, 0x0a, 0x15, + 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x13, 0x57, 0x72, 0x69, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x93, 0x01, + 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x66, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x66, + 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x61, + 0x75, 0x74, 0x68, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, + 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1b, 0x0a, + 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x12, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x75, 0x73, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x20, 0x0a, 0x0a, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7b, 0x0a, + 0x15, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x5f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, + 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x16, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, + 0x17, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x91, 0x03, 0x0a, 0x08, 0x44, 0x69, 0x73, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, + 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0f, 0x66, 0x72, 0x65, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x50, 0x0a, 0x0e, 0x65, 0x63, + 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x0c, + 0x65, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xb7, 0x01, 0x0a, + 0x0c, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x44, 0x0a, + 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, + 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, + 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, + 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x63, 0x6b, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x3f, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x69, 0x73, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x64, 0x69, 0x73, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, + 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xef, 0x01, 0x0a, 0x0e, 0x44, 0x61, + 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x32, 0x0a, 0x0a, + 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x63, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x72, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, + 0x12, 0x46, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, + 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x64, + 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, + 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x01, 0x0a, 0x0c, + 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x45, 0x0a, 0x11, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x49, 0x6e, + 0x66, 0x6f, 0x73, 0x12, 0x44, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, + 0x64, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x44, 0x69, 0x73, + 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x13, 0x0a, 0x11, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x74, 0x6f, 0x70, 0x6f, + 0x6c, 0x6f, 0x67, 0x79, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x6f, + 0x6c, 0x6f, 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0c, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, + 0x67, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x14, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x6d, 0x62, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x4d, 0x62, 0x22, 0x34, 0x0a, 0x15, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, + 0x0a, 0x16, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x61, 0x0a, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, + 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x33, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x61, 0x0a, 0x11, 0x45, 0x63, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x49, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, + 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x42, 0x0a, 0x13, 0x56, + 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x10, 0x67, + 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x22, + 0x16, 0x0a, 0x14, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x92, 0x02, 0x0a, 0x1e, 0x47, 0x65, 0x74, + 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x44, + 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, + 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x52, 0x0f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x3b, 0x0a, + 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x19, 0x4c, 0x69, + 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0xab, + 0x01, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, + 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, + 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17, + 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, + 0x0a, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, + 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, + 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, + 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xca, 0x09, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, + 0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74, + 0x62, 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, + 0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, + 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x51, 0x0a, 0x0c, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, + 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, + 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, + 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, + 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, + 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_master_proto_rawDescOnce sync.Once + file_master_proto_rawDescData = file_master_proto_rawDesc +) + +func file_master_proto_rawDescGZIP() []byte { + file_master_proto_rawDescOnce.Do(func() { + file_master_proto_rawDescData = protoimpl.X.CompressGZIP(file_master_proto_rawDescData) + }) + return file_master_proto_rawDescData +} + +var file_master_proto_msgTypes = make([]protoimpl.MessageInfo, 50) +var file_master_proto_goTypes = []interface{}{ + (*Heartbeat)(nil), // 0: master_pb.Heartbeat + (*HeartbeatResponse)(nil), // 1: master_pb.HeartbeatResponse + (*VolumeInformationMessage)(nil), // 2: master_pb.VolumeInformationMessage + (*VolumeShortInformationMessage)(nil), // 3: master_pb.VolumeShortInformationMessage + (*VolumeEcShardInformationMessage)(nil), // 4: master_pb.VolumeEcShardInformationMessage + (*StorageBackend)(nil), // 5: master_pb.StorageBackend + (*Empty)(nil), // 6: master_pb.Empty + (*SuperBlockExtra)(nil), // 7: master_pb.SuperBlockExtra + (*KeepConnectedRequest)(nil), // 8: master_pb.KeepConnectedRequest + (*VolumeLocation)(nil), // 9: master_pb.VolumeLocation + (*LookupVolumeRequest)(nil), // 10: master_pb.LookupVolumeRequest + (*LookupVolumeResponse)(nil), // 11: master_pb.LookupVolumeResponse + (*Location)(nil), // 12: master_pb.Location + (*AssignRequest)(nil), // 13: master_pb.AssignRequest + (*AssignResponse)(nil), // 14: master_pb.AssignResponse + (*StatisticsRequest)(nil), // 15: master_pb.StatisticsRequest + (*StatisticsResponse)(nil), // 16: master_pb.StatisticsResponse + (*Collection)(nil), // 17: master_pb.Collection + (*CollectionListRequest)(nil), // 18: master_pb.CollectionListRequest + (*CollectionListResponse)(nil), // 19: master_pb.CollectionListResponse + (*CollectionDeleteRequest)(nil), // 20: master_pb.CollectionDeleteRequest + (*CollectionDeleteResponse)(nil), // 21: master_pb.CollectionDeleteResponse + (*DiskInfo)(nil), // 22: master_pb.DiskInfo + (*DataNodeInfo)(nil), // 23: master_pb.DataNodeInfo + (*RackInfo)(nil), // 24: master_pb.RackInfo + (*DataCenterInfo)(nil), // 25: master_pb.DataCenterInfo + (*TopologyInfo)(nil), // 26: master_pb.TopologyInfo + (*VolumeListRequest)(nil), // 27: master_pb.VolumeListRequest + (*VolumeListResponse)(nil), // 28: master_pb.VolumeListResponse + (*LookupEcVolumeRequest)(nil), // 29: master_pb.LookupEcVolumeRequest + (*LookupEcVolumeResponse)(nil), // 30: master_pb.LookupEcVolumeResponse + (*VacuumVolumeRequest)(nil), // 31: master_pb.VacuumVolumeRequest + (*VacuumVolumeResponse)(nil), // 32: master_pb.VacuumVolumeResponse + (*GetMasterConfigurationRequest)(nil), // 33: master_pb.GetMasterConfigurationRequest + (*GetMasterConfigurationResponse)(nil), // 34: master_pb.GetMasterConfigurationResponse + (*ListMasterClientsRequest)(nil), // 35: master_pb.ListMasterClientsRequest + (*ListMasterClientsResponse)(nil), // 36: master_pb.ListMasterClientsResponse + (*LeaseAdminTokenRequest)(nil), // 37: master_pb.LeaseAdminTokenRequest + (*LeaseAdminTokenResponse)(nil), // 38: master_pb.LeaseAdminTokenResponse + (*ReleaseAdminTokenRequest)(nil), // 39: master_pb.ReleaseAdminTokenRequest + (*ReleaseAdminTokenResponse)(nil), // 40: master_pb.ReleaseAdminTokenResponse + nil, // 41: master_pb.Heartbeat.MaxVolumeCountsEntry + nil, // 42: master_pb.StorageBackend.PropertiesEntry + (*SuperBlockExtra_ErasureCoding)(nil), // 43: master_pb.SuperBlockExtra.ErasureCoding + (*LookupVolumeResponse_VolumeIdLocation)(nil), // 44: master_pb.LookupVolumeResponse.VolumeIdLocation + nil, // 45: master_pb.DataNodeInfo.DiskInfosEntry + nil, // 46: master_pb.RackInfo.DiskInfosEntry + nil, // 47: master_pb.DataCenterInfo.DiskInfosEntry + nil, // 48: master_pb.TopologyInfo.DiskInfosEntry + (*LookupEcVolumeResponse_EcShardIdLocation)(nil), // 49: master_pb.LookupEcVolumeResponse.EcShardIdLocation +} +var file_master_proto_depIdxs = []int32{ + 2, // 0: master_pb.Heartbeat.volumes:type_name -> master_pb.VolumeInformationMessage + 3, // 1: master_pb.Heartbeat.new_volumes:type_name -> master_pb.VolumeShortInformationMessage + 3, // 2: master_pb.Heartbeat.deleted_volumes:type_name -> master_pb.VolumeShortInformationMessage + 4, // 3: master_pb.Heartbeat.ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage + 4, // 4: master_pb.Heartbeat.new_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage + 4, // 5: master_pb.Heartbeat.deleted_ec_shards:type_name -> master_pb.VolumeEcShardInformationMessage + 41, // 6: master_pb.Heartbeat.max_volume_counts:type_name -> master_pb.Heartbeat.MaxVolumeCountsEntry + 5, // 7: master_pb.HeartbeatResponse.storage_backends:type_name -> master_pb.StorageBackend + 42, // 8: master_pb.StorageBackend.properties:type_name -> master_pb.StorageBackend.PropertiesEntry + 43, // 9: master_pb.SuperBlockExtra.erasure_coding:type_name -> master_pb.SuperBlockExtra.ErasureCoding + 44, // 10: master_pb.LookupVolumeResponse.volume_id_locations:type_name -> master_pb.LookupVolumeResponse.VolumeIdLocation + 17, // 11: master_pb.CollectionListResponse.collections:type_name -> master_pb.Collection + 2, // 12: master_pb.DiskInfo.volume_infos:type_name -> master_pb.VolumeInformationMessage + 4, // 13: master_pb.DiskInfo.ec_shard_infos:type_name -> master_pb.VolumeEcShardInformationMessage + 45, // 14: master_pb.DataNodeInfo.diskInfos:type_name -> master_pb.DataNodeInfo.DiskInfosEntry + 23, // 15: master_pb.RackInfo.data_node_infos:type_name -> master_pb.DataNodeInfo + 46, // 16: master_pb.RackInfo.diskInfos:type_name -> master_pb.RackInfo.DiskInfosEntry + 24, // 17: master_pb.DataCenterInfo.rack_infos:type_name -> master_pb.RackInfo + 47, // 18: master_pb.DataCenterInfo.diskInfos:type_name -> master_pb.DataCenterInfo.DiskInfosEntry + 25, // 19: master_pb.TopologyInfo.data_center_infos:type_name -> master_pb.DataCenterInfo + 48, // 20: master_pb.TopologyInfo.diskInfos:type_name -> master_pb.TopologyInfo.DiskInfosEntry + 26, // 21: master_pb.VolumeListResponse.topology_info:type_name -> master_pb.TopologyInfo + 49, // 22: master_pb.LookupEcVolumeResponse.shard_id_locations:type_name -> master_pb.LookupEcVolumeResponse.EcShardIdLocation + 5, // 23: master_pb.GetMasterConfigurationResponse.storage_backends:type_name -> master_pb.StorageBackend + 12, // 24: master_pb.LookupVolumeResponse.VolumeIdLocation.locations:type_name -> master_pb.Location + 22, // 25: master_pb.DataNodeInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 22, // 26: master_pb.RackInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 22, // 27: master_pb.DataCenterInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 22, // 28: master_pb.TopologyInfo.DiskInfosEntry.value:type_name -> master_pb.DiskInfo + 12, // 29: master_pb.LookupEcVolumeResponse.EcShardIdLocation.locations:type_name -> master_pb.Location + 0, // 30: master_pb.Seaweed.SendHeartbeat:input_type -> master_pb.Heartbeat + 8, // 31: master_pb.Seaweed.KeepConnected:input_type -> master_pb.KeepConnectedRequest + 10, // 32: master_pb.Seaweed.LookupVolume:input_type -> master_pb.LookupVolumeRequest + 13, // 33: master_pb.Seaweed.Assign:input_type -> master_pb.AssignRequest + 15, // 34: master_pb.Seaweed.Statistics:input_type -> master_pb.StatisticsRequest + 18, // 35: master_pb.Seaweed.CollectionList:input_type -> master_pb.CollectionListRequest + 20, // 36: master_pb.Seaweed.CollectionDelete:input_type -> master_pb.CollectionDeleteRequest + 27, // 37: master_pb.Seaweed.VolumeList:input_type -> master_pb.VolumeListRequest + 29, // 38: master_pb.Seaweed.LookupEcVolume:input_type -> master_pb.LookupEcVolumeRequest + 31, // 39: master_pb.Seaweed.VacuumVolume:input_type -> master_pb.VacuumVolumeRequest + 33, // 40: master_pb.Seaweed.GetMasterConfiguration:input_type -> master_pb.GetMasterConfigurationRequest + 35, // 41: master_pb.Seaweed.ListMasterClients:input_type -> master_pb.ListMasterClientsRequest + 37, // 42: master_pb.Seaweed.LeaseAdminToken:input_type -> master_pb.LeaseAdminTokenRequest + 39, // 43: master_pb.Seaweed.ReleaseAdminToken:input_type -> master_pb.ReleaseAdminTokenRequest + 1, // 44: master_pb.Seaweed.SendHeartbeat:output_type -> master_pb.HeartbeatResponse + 9, // 45: master_pb.Seaweed.KeepConnected:output_type -> master_pb.VolumeLocation + 11, // 46: master_pb.Seaweed.LookupVolume:output_type -> master_pb.LookupVolumeResponse + 14, // 47: master_pb.Seaweed.Assign:output_type -> master_pb.AssignResponse + 16, // 48: master_pb.Seaweed.Statistics:output_type -> master_pb.StatisticsResponse + 19, // 49: master_pb.Seaweed.CollectionList:output_type -> master_pb.CollectionListResponse + 21, // 50: master_pb.Seaweed.CollectionDelete:output_type -> master_pb.CollectionDeleteResponse + 28, // 51: master_pb.Seaweed.VolumeList:output_type -> master_pb.VolumeListResponse + 30, // 52: master_pb.Seaweed.LookupEcVolume:output_type -> master_pb.LookupEcVolumeResponse + 32, // 53: master_pb.Seaweed.VacuumVolume:output_type -> master_pb.VacuumVolumeResponse + 34, // 54: master_pb.Seaweed.GetMasterConfiguration:output_type -> master_pb.GetMasterConfigurationResponse + 36, // 55: master_pb.Seaweed.ListMasterClients:output_type -> master_pb.ListMasterClientsResponse + 38, // 56: master_pb.Seaweed.LeaseAdminToken:output_type -> master_pb.LeaseAdminTokenResponse + 40, // 57: master_pb.Seaweed.ReleaseAdminToken:output_type -> master_pb.ReleaseAdminTokenResponse + 44, // [44:58] is the sub-list for method output_type + 30, // [30:44] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_master_proto_init() } +func file_master_proto_init() { + if File_master_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_master_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Heartbeat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeartbeatResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeShortInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardInformationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageBackend); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SuperBlockExtra); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeepConnectedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatisticsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectionDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiskInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataNodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RackInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataCenterInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopologyInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMasterConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMasterConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMasterClientsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListMasterClientsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaseAdminTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaseAdminTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseAdminTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseAdminTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SuperBlockExtra_ErasureCoding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVolumeResponse_VolumeIdLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_master_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupEcVolumeResponse_EcShardIdLocation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_master_proto_rawDesc, + NumEnums: 0, + NumMessages: 50, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_master_proto_goTypes, + DependencyIndexes: file_master_proto_depIdxs, + MessageInfos: file_master_proto_msgTypes, + }.Build() + File_master_proto = out.File + file_master_proto_rawDesc = nil + file_master_proto_goTypes = nil + file_master_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Seaweed service +const _ = grpc.SupportPackageIsVersion6 +// SeaweedClient is the client API for Seaweed service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type SeaweedClient interface { SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) @@ -1490,19 +4093,23 @@ type SeaweedClient interface { CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) + VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) + ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) + LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) + ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) } type seaweedClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewSeaweedClient(cc *grpc.ClientConn) SeaweedClient { +func NewSeaweedClient(cc grpc.ClientConnInterface) SeaweedClient { return &seaweedClient{cc} } func (c *seaweedClient) SendHeartbeat(ctx context.Context, opts ...grpc.CallOption) (Seaweed_SendHeartbeatClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[0], c.cc, "/master_pb.Seaweed/SendHeartbeat", opts...) + stream, err := c.cc.NewStream(ctx, &_Seaweed_serviceDesc.Streams[0], "/master_pb.Seaweed/SendHeartbeat", opts...) if err != nil { return nil, err } @@ -1533,7 +4140,7 @@ func (x *seaweedSendHeartbeatClient) Recv() (*HeartbeatResponse, error) { } func (c *seaweedClient) KeepConnected(ctx context.Context, opts ...grpc.CallOption) (Seaweed_KeepConnectedClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Seaweed_serviceDesc.Streams[1], c.cc, "/master_pb.Seaweed/KeepConnected", opts...) + stream, err := c.cc.NewStream(ctx, &_Seaweed_serviceDesc.Streams[1], "/master_pb.Seaweed/KeepConnected", opts...) if err != nil { return nil, err } @@ -1565,7 +4172,7 @@ func (x *seaweedKeepConnectedClient) Recv() (*VolumeLocation, error) { func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeRequest, opts ...grpc.CallOption) (*LookupVolumeResponse, error) { out := new(LookupVolumeResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupVolume", in, out, opts...) if err != nil { return nil, err } @@ -1574,7 +4181,7 @@ func (c *seaweedClient) LookupVolume(ctx context.Context, in *LookupVolumeReques func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...grpc.CallOption) (*AssignResponse, error) { out := new(AssignResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Assign", in, out, opts...) if err != nil { return nil, err } @@ -1583,7 +4190,7 @@ func (c *seaweedClient) Assign(ctx context.Context, in *AssignRequest, opts ...g func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, opts ...grpc.CallOption) (*StatisticsResponse, error) { out := new(StatisticsResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/Statistics", in, out, opts...) if err != nil { return nil, err } @@ -1592,7 +4199,7 @@ func (c *seaweedClient) Statistics(ctx context.Context, in *StatisticsRequest, o func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRequest, opts ...grpc.CallOption) (*CollectionListResponse, error) { out := new(CollectionListResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionList", in, out, opts...) if err != nil { return nil, err } @@ -1601,7 +4208,7 @@ func (c *seaweedClient) CollectionList(ctx context.Context, in *CollectionListRe func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDeleteRequest, opts ...grpc.CallOption) (*CollectionDeleteResponse, error) { out := new(CollectionDeleteResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/CollectionDelete", in, out, opts...) if err != nil { return nil, err } @@ -1610,7 +4217,7 @@ func (c *seaweedClient) CollectionDelete(ctx context.Context, in *CollectionDele func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, opts ...grpc.CallOption) (*VolumeListResponse, error) { out := new(VolumeListResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VolumeList", in, out, opts...) if err != nil { return nil, err } @@ -1619,7 +4226,16 @@ func (c *seaweedClient) VolumeList(ctx context.Context, in *VolumeListRequest, o func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRequest, opts ...grpc.CallOption) (*LookupEcVolumeResponse, error) { out := new(LookupEcVolumeResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/LookupEcVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LookupEcVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) VacuumVolume(ctx context.Context, in *VacuumVolumeRequest, opts ...grpc.CallOption) (*VacuumVolumeResponse, error) { + out := new(VacuumVolumeResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/VacuumVolume", in, out, opts...) if err != nil { return nil, err } @@ -1628,15 +4244,41 @@ func (c *seaweedClient) LookupEcVolume(ctx context.Context, in *LookupEcVolumeRe func (c *seaweedClient) GetMasterConfiguration(ctx context.Context, in *GetMasterConfigurationRequest, opts ...grpc.CallOption) (*GetMasterConfigurationResponse, error) { out := new(GetMasterConfigurationResponse) - err := grpc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/GetMasterConfiguration", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for Seaweed service +func (c *seaweedClient) ListMasterClients(ctx context.Context, in *ListMasterClientsRequest, opts ...grpc.CallOption) (*ListMasterClientsResponse, error) { + out := new(ListMasterClientsResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ListMasterClients", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) LeaseAdminToken(ctx context.Context, in *LeaseAdminTokenRequest, opts ...grpc.CallOption) (*LeaseAdminTokenResponse, error) { + out := new(LeaseAdminTokenResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/LeaseAdminToken", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedClient) ReleaseAdminToken(ctx context.Context, in *ReleaseAdminTokenRequest, opts ...grpc.CallOption) (*ReleaseAdminTokenResponse, error) { + out := new(ReleaseAdminTokenResponse) + err := c.cc.Invoke(ctx, "/master_pb.Seaweed/ReleaseAdminToken", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} +// SeaweedServer is the server API for Seaweed service. type SeaweedServer interface { SendHeartbeat(Seaweed_SendHeartbeatServer) error KeepConnected(Seaweed_KeepConnectedServer) error @@ -1647,7 +4289,58 @@ type SeaweedServer interface { CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) + VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) + ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error) + LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error) + ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error) +} + +// UnimplementedSeaweedServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedServer struct { +} + +func (*UnimplementedSeaweedServer) SendHeartbeat(Seaweed_SendHeartbeatServer) error { + return status.Errorf(codes.Unimplemented, "method SendHeartbeat not implemented") +} +func (*UnimplementedSeaweedServer) KeepConnected(Seaweed_KeepConnectedServer) error { + return status.Errorf(codes.Unimplemented, "method KeepConnected not implemented") +} +func (*UnimplementedSeaweedServer) LookupVolume(context.Context, *LookupVolumeRequest) (*LookupVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupVolume not implemented") +} +func (*UnimplementedSeaweedServer) Assign(context.Context, *AssignRequest) (*AssignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Assign not implemented") +} +func (*UnimplementedSeaweedServer) Statistics(context.Context, *StatisticsRequest) (*StatisticsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Statistics not implemented") +} +func (*UnimplementedSeaweedServer) CollectionList(context.Context, *CollectionListRequest) (*CollectionListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CollectionList not implemented") +} +func (*UnimplementedSeaweedServer) CollectionDelete(context.Context, *CollectionDeleteRequest) (*CollectionDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CollectionDelete not implemented") +} +func (*UnimplementedSeaweedServer) VolumeList(context.Context, *VolumeListRequest) (*VolumeListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeList not implemented") +} +func (*UnimplementedSeaweedServer) LookupEcVolume(context.Context, *LookupEcVolumeRequest) (*LookupEcVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupEcVolume not implemented") +} +func (*UnimplementedSeaweedServer) VacuumVolume(context.Context, *VacuumVolumeRequest) (*VacuumVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolume not implemented") +} +func (*UnimplementedSeaweedServer) GetMasterConfiguration(context.Context, *GetMasterConfigurationRequest) (*GetMasterConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMasterConfiguration not implemented") +} +func (*UnimplementedSeaweedServer) ListMasterClients(context.Context, *ListMasterClientsRequest) (*ListMasterClientsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListMasterClients not implemented") +} +func (*UnimplementedSeaweedServer) LeaseAdminToken(context.Context, *LeaseAdminTokenRequest) (*LeaseAdminTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeaseAdminToken not implemented") +} +func (*UnimplementedSeaweedServer) ReleaseAdminToken(context.Context, *ReleaseAdminTokenRequest) (*ReleaseAdminTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReleaseAdminToken not implemented") } func RegisterSeaweedServer(s *grpc.Server, srv SeaweedServer) { @@ -1832,6 +4525,24 @@ func _Seaweed_LookupEcVolume_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _Seaweed_VacuumVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VacuumVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).VacuumVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/VacuumVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).VacuumVolume(ctx, req.(*VacuumVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetMasterConfigurationRequest) if err := dec(in); err != nil { @@ -1850,6 +4561,60 @@ func _Seaweed_GetMasterConfiguration_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _Seaweed_ListMasterClients_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMasterClientsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).ListMasterClients(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/ListMasterClients", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).ListMasterClients(ctx, req.(*ListMasterClientsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_LeaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaseAdminTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).LeaseAdminToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/LeaseAdminToken", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).LeaseAdminToken(ctx, req.(*LeaseAdminTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Seaweed_ReleaseAdminToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReleaseAdminTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedServer).ReleaseAdminToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/master_pb.Seaweed/ReleaseAdminToken", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedServer).ReleaseAdminToken(ctx, req.(*ReleaseAdminTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Seaweed_serviceDesc = grpc.ServiceDesc{ ServiceName: "master_pb.Seaweed", HandlerType: (*SeaweedServer)(nil), @@ -1882,10 +4647,26 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ MethodName: "LookupEcVolume", Handler: _Seaweed_LookupEcVolume_Handler, }, + { + MethodName: "VacuumVolume", + Handler: _Seaweed_VacuumVolume_Handler, + }, { MethodName: "GetMasterConfiguration", Handler: _Seaweed_GetMasterConfiguration_Handler, }, + { + MethodName: "ListMasterClients", + Handler: _Seaweed_ListMasterClients_Handler, + }, + { + MethodName: "LeaseAdminToken", + Handler: _Seaweed_LeaseAdminToken_Handler, + }, + { + MethodName: "ReleaseAdminToken", + Handler: _Seaweed_ReleaseAdminToken_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1903,141 +4684,3 @@ var _Seaweed_serviceDesc = grpc.ServiceDesc{ }, Metadata: "master.proto", } - -func init() { proto.RegisterFile("master.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 2102 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x59, 0x4b, 0x6f, 0x1c, 0xc7, - 0x11, 0xd6, 0xec, 0x2e, 0x97, 0xbb, 0xb5, 0xef, 0x26, 0x45, 0xaf, 0xd6, 0x96, 0xb5, 0x1a, 0x07, - 0x30, 0xa5, 0x38, 0x8c, 0x43, 0x1b, 0x88, 0x91, 0xc4, 0x30, 0x24, 0x8a, 0x76, 0x08, 0x89, 0xb4, - 0x34, 0x54, 0x64, 0x20, 0x40, 0x30, 0xe9, 0x9d, 0x69, 0x92, 0x03, 0xce, 0x2b, 0xd3, 0xbd, 0x14, - 0xd7, 0xb9, 0x04, 0xc8, 0x31, 0xa7, 0x20, 0x87, 0xfc, 0x85, 0x5c, 0x72, 0x4a, 0xce, 0xbe, 0xe4, - 0x47, 0xe4, 0x7f, 0xe4, 0xea, 0x4b, 0xd0, 0xaf, 0x99, 0x9e, 0x7d, 0x90, 0xa6, 0x01, 0x1f, 0x74, - 0x9b, 0xae, 0xaa, 0xae, 0xae, 0xfe, 0xaa, 0xbb, 0xea, 0xeb, 0x5d, 0x68, 0x47, 0x98, 0x32, 0x92, - 0xed, 0xa4, 0x59, 0xc2, 0x12, 0xd4, 0x94, 0x23, 0x37, 0x9d, 0xd8, 0x7f, 0xa9, 0x43, 0xf3, 0xd7, - 0x04, 0x67, 0x6c, 0x42, 0x30, 0x43, 0x5d, 0xa8, 0x04, 0xe9, 0xd0, 0x1a, 0x5b, 0xdb, 0x4d, 0xa7, - 0x12, 0xa4, 0x08, 0x41, 0x2d, 0x4d, 0x32, 0x36, 0xac, 0x8c, 0xad, 0xed, 0x8e, 0x23, 0xbe, 0xd1, - 0x5d, 0x80, 0x74, 0x3a, 0x09, 0x03, 0xcf, 0x9d, 0x66, 0xe1, 0xb0, 0x2a, 0x6c, 0x9b, 0x52, 0xf2, - 0x9b, 0x2c, 0x44, 0xdb, 0xd0, 0x8f, 0xf0, 0xa5, 0x7b, 0x91, 0x84, 0xd3, 0x88, 0xb8, 0x5e, 0x32, - 0x8d, 0xd9, 0xb0, 0x26, 0xa6, 0x77, 0x23, 0x7c, 0xf9, 0x4a, 0x88, 0xf7, 0xb8, 0x14, 0x8d, 0x79, - 0x54, 0x97, 0xee, 0x49, 0x10, 0x12, 0xf7, 0x9c, 0xcc, 0x86, 0x6b, 0x63, 0x6b, 0xbb, 0xe6, 0x40, - 0x84, 0x2f, 0x3f, 0x0f, 0x42, 0xf2, 0x94, 0xcc, 0xd0, 0x3d, 0x68, 0xf9, 0x98, 0x61, 0xd7, 0x23, - 0x31, 0x23, 0xd9, 0xb0, 0x2e, 0xd6, 0x02, 0x2e, 0xda, 0x13, 0x12, 0x1e, 0x5f, 0x86, 0xbd, 0xf3, - 0xe1, 0xba, 0xd0, 0x88, 0x6f, 0x1e, 0x1f, 0xf6, 0xa3, 0x20, 0x76, 0x45, 0xe4, 0x0d, 0xb1, 0x74, - 0x53, 0x48, 0x9e, 0xf3, 0xf0, 0x3f, 0x85, 0x75, 0x19, 0x1b, 0x1d, 0x36, 0xc7, 0xd5, 0xed, 0xd6, - 0xee, 0x7b, 0x3b, 0x39, 0x1a, 0x3b, 0x32, 0xbc, 0x83, 0xf8, 0x24, 0xc9, 0x22, 0xcc, 0x82, 0x24, - 0x3e, 0x24, 0x94, 0xe2, 0x53, 0xe2, 0xe8, 0x39, 0xe8, 0x00, 0x5a, 0x31, 0x79, 0xed, 0x6a, 0x17, - 0x20, 0x5c, 0x6c, 0x2f, 0xb8, 0x38, 0x3e, 0x4b, 0x32, 0xb6, 0xc4, 0x0f, 0xc4, 0xe4, 0xf5, 0x2b, - 0xe5, 0xea, 0x05, 0xf4, 0x7c, 0x12, 0x12, 0x46, 0xfc, 0xdc, 0x5d, 0xeb, 0x86, 0xee, 0xba, 0xca, - 0x81, 0x76, 0xf9, 0x23, 0xe8, 0x9e, 0x61, 0xea, 0xc6, 0x49, 0xee, 0xb1, 0x3d, 0xb6, 0xb6, 0x1b, - 0x4e, 0xfb, 0x0c, 0xd3, 0xa3, 0x44, 0x5b, 0x7d, 0x01, 0x4d, 0xe2, 0xb9, 0xf4, 0x0c, 0x67, 0x3e, - 0x1d, 0xf6, 0xc5, 0x92, 0x0f, 0x17, 0x96, 0xdc, 0xf7, 0x8e, 0xb9, 0xc1, 0x92, 0x45, 0x1b, 0x44, - 0xaa, 0x28, 0x3a, 0x82, 0x0e, 0x07, 0xa3, 0x70, 0x36, 0xb8, 0xb1, 0x33, 0x8e, 0xe6, 0xbe, 0xf6, - 0xf7, 0x0a, 0x06, 0x1a, 0x91, 0xc2, 0x27, 0xba, 0xb1, 0x4f, 0x0d, 0x6b, 0xee, 0xf7, 0x7d, 0xe8, - 0x2b, 0x58, 0x0a, 0xb7, 0x1b, 0x02, 0x98, 0x8e, 0x00, 0x46, 0x1b, 0xda, 0x7f, 0xaa, 0xc0, 0x20, - 0xbf, 0x0d, 0x0e, 0xa1, 0x69, 0x12, 0x53, 0x82, 0x1e, 0xc2, 0x40, 0x1d, 0x67, 0x1a, 0x7c, 0x4d, - 0xdc, 0x30, 0x88, 0x02, 0x26, 0x2e, 0x49, 0xcd, 0xe9, 0x49, 0xc5, 0x71, 0xf0, 0x35, 0x79, 0xc6, - 0xc5, 0x68, 0x0b, 0xea, 0x21, 0xc1, 0x3e, 0xc9, 0xc4, 0x9d, 0x69, 0x3a, 0x6a, 0x84, 0xde, 0x87, - 0x5e, 0x44, 0x58, 0x16, 0x78, 0xd4, 0xc5, 0xbe, 0x9f, 0x11, 0x4a, 0xd5, 0xd5, 0xe9, 0x2a, 0xf1, - 0x23, 0x29, 0x45, 0x9f, 0xc0, 0x50, 0x1b, 0x06, 0xfc, 0x8c, 0x5f, 0xe0, 0xd0, 0xa5, 0xc4, 0x4b, - 0x62, 0x9f, 0xaa, 0x7b, 0xb4, 0xa5, 0xf4, 0x07, 0x4a, 0x7d, 0x2c, 0xb5, 0xe8, 0x09, 0xf4, 0x29, - 0x4b, 0x32, 0x7c, 0x4a, 0xdc, 0x09, 0xf6, 0xce, 0x09, 0x9f, 0xb1, 0x26, 0xc0, 0xbb, 0x63, 0x80, - 0x77, 0x2c, 0x4d, 0x1e, 0x4b, 0x0b, 0xa7, 0x47, 0x4b, 0x63, 0x6a, 0x7f, 0x5b, 0x85, 0xe1, 0xaa, - 0x6b, 0x20, 0xea, 0x83, 0x2f, 0xb6, 0xde, 0x71, 0x2a, 0x81, 0xcf, 0xef, 0x1f, 0x87, 0x44, 0xec, - 0xb5, 0xe6, 0x88, 0x6f, 0xf4, 0x2e, 0x80, 0x97, 0x84, 0x21, 0xf1, 0xf8, 0x44, 0xb5, 0x49, 0x43, - 0xc2, 0xef, 0xa7, 0xb8, 0xf2, 0x45, 0x69, 0xa8, 0x39, 0x4d, 0x2e, 0x91, 0x55, 0xe1, 0x3e, 0xb4, - 0x65, 0xfa, 0x94, 0x81, 0xac, 0x0a, 0x2d, 0x29, 0x93, 0x26, 0x1f, 0x00, 0xd2, 0xc7, 0x64, 0x32, - 0xcb, 0x0d, 0xeb, 0xc2, 0xb0, 0xaf, 0x34, 0x8f, 0x67, 0xda, 0xfa, 0x6d, 0x68, 0x66, 0x04, 0xfb, - 0x6e, 0x12, 0x87, 0x33, 0x51, 0x28, 0x1a, 0x4e, 0x83, 0x0b, 0xbe, 0x8c, 0xc3, 0x19, 0xfa, 0x31, - 0x0c, 0x32, 0x92, 0x86, 0x81, 0x87, 0xdd, 0x34, 0xc4, 0x1e, 0x89, 0x48, 0xac, 0x6b, 0x46, 0x5f, - 0x29, 0x9e, 0x6b, 0x39, 0x1a, 0xc2, 0xfa, 0x05, 0xc9, 0x28, 0xdf, 0x56, 0x53, 0x98, 0xe8, 0x21, - 0xea, 0x43, 0x95, 0xb1, 0x70, 0x08, 0x42, 0xca, 0x3f, 0xd1, 0x03, 0xe8, 0x7b, 0x49, 0x94, 0x62, - 0x8f, 0xb9, 0x19, 0xb9, 0x08, 0xc4, 0xa4, 0x96, 0x50, 0xf7, 0x94, 0xdc, 0x51, 0x62, 0xbe, 0x9d, - 0x28, 0xf1, 0x83, 0x93, 0x80, 0xf8, 0x2e, 0x66, 0x2a, 0xd9, 0xe2, 0xe2, 0x56, 0x9d, 0xbe, 0xd6, - 0x3c, 0x62, 0x32, 0xcd, 0x68, 0x07, 0x36, 0x32, 0x12, 0x25, 0x8c, 0xb8, 0x3a, 0xd9, 0x31, 0x8e, - 0xc8, 0xb0, 0x23, 0x70, 0x1e, 0x48, 0x95, 0xca, 0xf1, 0x11, 0x8e, 0x08, 0xf7, 0x3e, 0x67, 0xcf, - 0x6b, 0x6d, 0x57, 0x98, 0xf7, 0x4b, 0xe6, 0x4f, 0xc9, 0xcc, 0xfe, 0x87, 0x05, 0x77, 0xaf, 0x2c, - 0x39, 0x0b, 0x47, 0xe0, 0xba, 0x74, 0xff, 0x50, 0x08, 0xdb, 0x53, 0xb8, 0x77, 0x4d, 0x21, 0xb8, - 0x26, 0xd6, 0xca, 0x42, 0xac, 0x36, 0x74, 0x88, 0xe7, 0x06, 0xb1, 0x4f, 0x2e, 0xdd, 0x49, 0xc0, - 0xe4, 0x15, 0xed, 0x38, 0x2d, 0xe2, 0x1d, 0x70, 0xd9, 0xe3, 0x80, 0x51, 0xfb, 0x1b, 0x0b, 0xba, - 0xe5, 0x3b, 0xc4, 0x6f, 0x01, 0x9b, 0xa5, 0x44, 0xf5, 0x4d, 0xf1, 0xad, 0x96, 0xae, 0xa8, 0x4e, - 0xea, 0xa3, 0x03, 0x80, 0x34, 0x4b, 0x52, 0x92, 0xb1, 0x80, 0x70, 0xbf, 0xfc, 0x5a, 0x3e, 0x58, - 0x79, 0x2d, 0x77, 0x9e, 0xe7, 0xb6, 0xfb, 0x31, 0xcb, 0x66, 0x8e, 0x31, 0x79, 0xf4, 0x29, 0xf4, - 0xe6, 0xd4, 0x1c, 0x1d, 0x9e, 0x55, 0x19, 0x00, 0xff, 0x44, 0x9b, 0xb0, 0x76, 0x81, 0xc3, 0x29, - 0x51, 0x21, 0xc8, 0xc1, 0x2f, 0x2a, 0x9f, 0x58, 0xf6, 0x3a, 0xac, 0xed, 0x47, 0x29, 0x9b, 0xf1, - 0x9d, 0xf4, 0x8e, 0xa7, 0x29, 0xc9, 0x1e, 0x87, 0x89, 0x77, 0xbe, 0x7f, 0xc9, 0x32, 0x8c, 0xbe, - 0x84, 0x2e, 0xc9, 0x30, 0x9d, 0x66, 0xfc, 0x56, 0xf9, 0x41, 0x7c, 0x2a, 0x7c, 0x96, 0x5b, 0xd2, - 0xdc, 0x9c, 0x9d, 0x7d, 0x39, 0x61, 0x4f, 0xd8, 0x3b, 0x1d, 0x62, 0x0e, 0x47, 0xbf, 0x85, 0x4e, - 0x49, 0xcf, 0xc1, 0xe2, 0x0d, 0x5c, 0x65, 0x45, 0x7c, 0xf3, 0xa2, 0x99, 0xe2, 0x2c, 0x60, 0x33, - 0x45, 0x34, 0xd4, 0x88, 0x97, 0x0a, 0x55, 0x78, 0x03, 0x5f, 0x82, 0xd6, 0x71, 0x9a, 0x52, 0x72, - 0xe0, 0x53, 0xfb, 0x21, 0x6c, 0x3e, 0x25, 0x24, 0xdd, 0x4b, 0xe2, 0x98, 0x78, 0x8c, 0xf8, 0x0e, - 0xf9, 0xc3, 0x94, 0x50, 0xc6, 0x97, 0x10, 0x77, 0x42, 0xe5, 0x83, 0x7f, 0xdb, 0x7f, 0xb7, 0xa0, - 0x2b, 0x8f, 0xcb, 0xb3, 0xc4, 0x13, 0x87, 0x84, 0x83, 0xc6, 0x19, 0x8c, 0x02, 0x6d, 0x9a, 0x85, - 0x73, 0xd4, 0xa6, 0x32, 0x4f, 0x6d, 0xee, 0x40, 0x43, 0xf4, 0xfe, 0x22, 0x98, 0x75, 0xde, 0xce, - 0x03, 0x9f, 0x16, 0x55, 0xcb, 0x97, 0xea, 0x9a, 0x50, 0xb7, 0x74, 0x7b, 0xe6, 0x26, 0x45, 0x67, - 0x58, 0x33, 0x3b, 0x83, 0xfd, 0x12, 0x36, 0x9e, 0x25, 0xc9, 0xf9, 0x34, 0x95, 0xe1, 0xe9, 0x4d, - 0x94, 0xf7, 0x6e, 0x8d, 0xab, 0x3c, 0x96, 0x7c, 0xef, 0xd7, 0x1d, 0x65, 0xfb, 0x7f, 0x16, 0x6c, - 0x96, 0xdd, 0xaa, 0x66, 0xf6, 0x7b, 0xd8, 0xc8, 0xfd, 0xba, 0xa1, 0xc2, 0x42, 0x2e, 0xd0, 0xda, - 0xfd, 0xd0, 0x48, 0xf3, 0xb2, 0xd9, 0x9a, 0x20, 0xf9, 0x1a, 0x44, 0x67, 0x70, 0x31, 0x27, 0xa1, - 0xa3, 0x4b, 0xe8, 0xcf, 0x9b, 0xf1, 0x22, 0x9c, 0xaf, 0xaa, 0x10, 0x6f, 0xe8, 0x99, 0xe8, 0x67, - 0xd0, 0x2c, 0x02, 0xa9, 0x88, 0x40, 0x36, 0x4a, 0x81, 0xa8, 0xb5, 0x0a, 0x2b, 0x7e, 0xbc, 0x49, - 0x96, 0x25, 0x99, 0x2a, 0x38, 0x72, 0x60, 0xff, 0x12, 0x1a, 0xdf, 0x3b, 0xbb, 0xf6, 0xbf, 0x2a, - 0xd0, 0x79, 0x44, 0x69, 0x70, 0x1a, 0xeb, 0x14, 0x6c, 0xc2, 0x9a, 0x6c, 0x2d, 0xb2, 0xd7, 0xcb, - 0x01, 0x1a, 0x43, 0x4b, 0xd5, 0x2d, 0x03, 0x7a, 0x53, 0x74, 0x6d, 0x49, 0x54, 0xb5, 0xac, 0x26, - 0x43, 0xe3, 0xdd, 0x62, 0x8e, 0xe8, 0xae, 0xad, 0x24, 0xba, 0x75, 0x83, 0xe8, 0xbe, 0x0d, 0x4d, - 0x31, 0x29, 0x4e, 0x7c, 0xa2, 0x18, 0x70, 0x83, 0x0b, 0x8e, 0x12, 0x9f, 0xa0, 0x5d, 0xd8, 0x8a, - 0x48, 0x94, 0x64, 0x33, 0x37, 0xc2, 0xa9, 0xcb, 0x79, 0xb6, 0xe0, 0x2e, 0xd1, 0x44, 0xd5, 0x5e, - 0x24, 0xb5, 0x87, 0x38, 0x3d, 0xc4, 0x97, 0x9c, 0xbe, 0x1c, 0x4e, 0xd0, 0x2e, 0xdc, 0xfe, 0x2a, - 0x0b, 0x18, 0x9e, 0x84, 0xa4, 0xcc, 0xdf, 0x65, 0x2d, 0xde, 0xd0, 0x4a, 0x83, 0xc4, 0xdb, 0x7f, - 0xb3, 0xa0, 0xab, 0x51, 0x53, 0x27, 0xac, 0x0f, 0xd5, 0x93, 0x3c, 0xcb, 0xfc, 0x53, 0xe7, 0xa2, - 0xb2, 0x2a, 0x17, 0x0b, 0x8f, 0x88, 0x1c, 0xf9, 0x9a, 0x89, 0x7c, 0x9e, 0xf4, 0x35, 0x23, 0xe9, - 0x1c, 0x1a, 0x3c, 0x65, 0x67, 0x1a, 0x1a, 0xfe, 0x6d, 0x9f, 0xc2, 0xe0, 0x98, 0x61, 0x16, 0x50, - 0x16, 0x78, 0x54, 0xa7, 0x73, 0x2e, 0x71, 0xd6, 0x75, 0x89, 0xab, 0xac, 0x4a, 0x5c, 0x35, 0x4f, - 0x9c, 0xfd, 0x1f, 0x0b, 0x90, 0xb9, 0x92, 0x82, 0xe0, 0x07, 0x58, 0x8a, 0x43, 0xc6, 0x12, 0xc6, - 0xd9, 0x20, 0x67, 0x5c, 0x8a, 0x37, 0x09, 0x09, 0x4f, 0x1f, 0x3f, 0x0d, 0x53, 0x4a, 0x7c, 0xa9, - 0x95, 0xa4, 0xa9, 0xc1, 0x05, 0x42, 0x59, 0xe6, 0x5c, 0xf5, 0x39, 0xce, 0x65, 0x3f, 0x82, 0x96, - 0xea, 0x3f, 0x2f, 0x79, 0xef, 0xba, 0x3e, 0x7a, 0x15, 0x5d, 0xa5, 0x00, 0x62, 0x0c, 0xb0, 0x57, - 0x44, 0xbf, 0xac, 0x02, 0xff, 0x11, 0x6e, 0x17, 0x16, 0xcf, 0x02, 0xca, 0x74, 0x5e, 0x3e, 0x86, - 0xad, 0x20, 0xf6, 0xc2, 0xa9, 0x4f, 0xdc, 0x98, 0x77, 0xf0, 0x30, 0x7f, 0xbc, 0x58, 0x82, 0xad, - 0x6d, 0x2a, 0xed, 0x91, 0x50, 0xea, 0x47, 0xcc, 0x07, 0x80, 0xf4, 0x2c, 0xe2, 0xe5, 0x33, 0x2a, - 0x62, 0x46, 0x5f, 0x69, 0xf6, 0x3d, 0x65, 0x6d, 0xbf, 0x80, 0xad, 0xf9, 0xc5, 0x55, 0xaa, 0x7e, - 0x0e, 0xad, 0x02, 0x76, 0x5d, 0x07, 0x6f, 0x1b, 0xe5, 0xa7, 0x98, 0xe7, 0x98, 0x96, 0xf6, 0x4f, - 0xe0, 0xad, 0x42, 0xf5, 0x44, 0x14, 0xfa, 0xab, 0x1a, 0xd0, 0x08, 0x86, 0x8b, 0xe6, 0x32, 0x06, - 0xfb, 0xaf, 0x55, 0x68, 0x3f, 0x51, 0x37, 0x97, 0xd3, 0x18, 0x83, 0xb8, 0x48, 0xf6, 0x70, 0x1f, - 0xda, 0xa5, 0x0b, 0x29, 0xf9, 0x76, 0xeb, 0xc2, 0x78, 0x4d, 0x2f, 0x7b, 0x77, 0x57, 0x85, 0xd9, - 0xfc, 0xbb, 0xfb, 0x21, 0x0c, 0x4e, 0x32, 0x42, 0x16, 0x9f, 0xe8, 0x35, 0xa7, 0xc7, 0x15, 0xa6, - 0xed, 0x0e, 0x6c, 0x60, 0x8f, 0x05, 0x17, 0x73, 0xd6, 0xf2, 0x7c, 0x0d, 0xa4, 0xca, 0xb4, 0xff, - 0x3c, 0x0f, 0x34, 0x88, 0x4f, 0x12, 0x3a, 0xac, 0x7f, 0xf7, 0x27, 0xb6, 0xda, 0x0d, 0xd7, 0x50, - 0xf4, 0x1c, 0xba, 0xfa, 0xa9, 0xa6, 0x3c, 0xad, 0xdf, 0xf8, 0x19, 0xd8, 0x26, 0x85, 0x8a, 0x1a, - 0xbc, 0xb9, 0xb4, 0x93, 0x86, 0xdc, 0x89, 0x54, 0x99, 0x85, 0xed, 0xdf, 0x15, 0x68, 0x38, 0xd8, - 0x3b, 0x7f, 0xb3, 0xf3, 0xf1, 0x19, 0xf4, 0xf2, 0x1e, 0x51, 0x4a, 0xc9, 0x5b, 0x06, 0x90, 0xe6, - 0xd1, 0x73, 0x3a, 0xbe, 0x31, 0x5a, 0x09, 0xdb, 0xfa, 0x2a, 0xd8, 0xfe, 0x59, 0x81, 0xee, 0x93, - 0xbc, 0x6f, 0xbd, 0xd9, 0xe0, 0xed, 0x02, 0xf0, 0x46, 0x5b, 0xc2, 0xcd, 0x24, 0x26, 0xfa, 0x78, - 0x38, 0xcd, 0x4c, 0x7d, 0xdd, 0x1c, 0xaf, 0x6f, 0x2a, 0xd0, 0x7e, 0x99, 0xa4, 0x49, 0x98, 0x9c, - 0xce, 0xde, 0x6c, 0xb4, 0xf6, 0x61, 0x60, 0x70, 0x98, 0x12, 0x68, 0x77, 0xe6, 0x0e, 0x5b, 0x71, - 0x38, 0x9c, 0x9e, 0x5f, 0x1a, 0xdf, 0x1c, 0xc0, 0x0d, 0x18, 0x28, 0x5e, 0x5f, 0xb4, 0x14, 0xfb, - 0xcf, 0x16, 0x20, 0x53, 0xaa, 0x6a, 0xfd, 0xaf, 0xa0, 0xc3, 0x14, 0xd6, 0x22, 0x3e, 0xf5, 0xb8, - 0x31, 0xef, 0x82, 0x99, 0x0b, 0xa7, 0xcd, 0xcc, 0xcc, 0xfc, 0x14, 0x36, 0x17, 0x7e, 0x06, 0xe2, - 0x84, 0x4a, 0x66, 0x64, 0x30, 0xf7, 0x4b, 0xd0, 0xe1, 0xc4, 0xfe, 0x18, 0x6e, 0x4b, 0x12, 0xad, - 0xfb, 0x90, 0xee, 0x0f, 0x0b, 0x6c, 0xb8, 0x53, 0xb0, 0x61, 0xfb, 0x5b, 0x0b, 0xb6, 0xe6, 0xa7, - 0xa9, 0xf8, 0xaf, 0x9a, 0x87, 0x30, 0x20, 0x55, 0x2f, 0x4d, 0x5e, 0x2f, 0xe9, 0xf4, 0x47, 0x0b, - 0xbc, 0x7e, 0xde, 0xf7, 0x8e, 0xae, 0xa3, 0x05, 0xb5, 0xef, 0xd3, 0xb2, 0x80, 0x8e, 0x30, 0x0c, - 0x16, 0xcc, 0xf8, 0xab, 0x48, 0xaf, 0xab, 0x62, 0x5a, 0x57, 0x13, 0xbf, 0x07, 0xb1, 0xb7, 0xef, - 0xc1, 0xdd, 0x2f, 0x08, 0x3b, 0x14, 0x36, 0x7b, 0x49, 0x7c, 0x12, 0x9c, 0x4e, 0x33, 0x69, 0x54, - 0xa4, 0xf6, 0xdd, 0x55, 0x16, 0x0a, 0xa6, 0x25, 0xbf, 0xb5, 0x59, 0x37, 0xfe, 0xad, 0xad, 0x72, - 0xd5, 0x6f, 0x6d, 0xbb, 0xff, 0xad, 0xc3, 0xfa, 0x31, 0xc1, 0xaf, 0x09, 0xe1, 0x4f, 0xfb, 0xce, - 0x31, 0x89, 0xfd, 0xe2, 0x57, 0xf4, 0x4d, 0x63, 0x8f, 0xb9, 0x74, 0xf4, 0xce, 0x32, 0x69, 0x4e, - 0x01, 0x6e, 0x6d, 0x5b, 0x1f, 0x5a, 0xe8, 0x05, 0x74, 0x4a, 0x2f, 0x5a, 0x74, 0xcf, 0x98, 0xb4, - 0xec, 0xad, 0x3b, 0xba, 0xb3, 0xd0, 0x10, 0x35, 0xaa, 0xb9, 0xcb, 0xb6, 0xf9, 0x92, 0x43, 0xef, - 0xae, 0x7c, 0xe2, 0x49, 0x87, 0xf7, 0xae, 0x79, 0x02, 0xda, 0xb7, 0xd0, 0x67, 0x50, 0x97, 0x94, - 0x1f, 0x0d, 0x0d, 0xe3, 0xd2, 0xdb, 0xa9, 0x14, 0x57, 0xf9, 0x7d, 0x60, 0xdf, 0x42, 0x4f, 0x01, - 0x0a, 0xd2, 0x8c, 0xde, 0x29, 0xfd, 0x0c, 0x32, 0xc7, 0xda, 0x47, 0x77, 0x57, 0x68, 0x73, 0x67, - 0x5f, 0x41, 0xb7, 0x4c, 0xed, 0xd0, 0x78, 0x29, 0x7b, 0x33, 0xea, 0xc3, 0xe8, 0xfe, 0x15, 0x16, - 0xb9, 0xe3, 0xdf, 0x41, 0x7f, 0x9e, 0xb1, 0x21, 0x7b, 0xe9, 0xc4, 0x12, 0xfb, 0x1b, 0xbd, 0x77, - 0xa5, 0x8d, 0x09, 0x42, 0x51, 0xa2, 0x4a, 0x20, 0x2c, 0xd4, 0xb3, 0x12, 0x08, 0x8b, 0x75, 0x4d, - 0x82, 0x50, 0xbe, 0xd7, 0x25, 0x10, 0x96, 0x56, 0xa1, 0x12, 0x08, 0xcb, 0x8b, 0x82, 0x7d, 0x0b, - 0x25, 0xb0, 0xb5, 0xfc, 0xb6, 0x21, 0xf3, 0x27, 0xa1, 0x2b, 0xaf, 0xec, 0xe8, 0xc1, 0x77, 0xb0, - 0xd4, 0x0b, 0x4e, 0xea, 0xe2, 0x2f, 0xaa, 0x8f, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x64, - 0x5c, 0xbc, 0xb2, 0x1a, 0x00, 0x00, -} diff --git a/weed/pb/messaging.proto b/weed/pb/messaging.proto new file mode 100644 index 000000000..04446ad16 --- /dev/null +++ b/weed/pb/messaging.proto @@ -0,0 +1,135 @@ +syntax = "proto3"; + +package messaging_pb; + +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"; +option java_package = "seaweedfs.client"; +option java_outer_classname = "MessagingProto"; + +////////////////////////////////////////////////// + +service SeaweedMessaging { + + rpc Subscribe (stream SubscriberMessage) returns (stream BrokerMessage) { + } + + rpc Publish (stream PublishRequest) returns (stream PublishResponse) { + } + + rpc DeleteTopic (DeleteTopicRequest) returns (DeleteTopicResponse) { + } + + rpc ConfigureTopic (ConfigureTopicRequest) returns (ConfigureTopicResponse) { + } + + rpc GetTopicConfiguration (GetTopicConfigurationRequest) returns (GetTopicConfigurationResponse) { + } + + rpc FindBroker (FindBrokerRequest) returns (FindBrokerResponse) { + } + +} + +////////////////////////////////////////////////// + +message SubscriberMessage { + message InitMessage { + string namespace = 1; + string topic = 2; + int32 partition = 3; + enum StartPosition { + LATEST = 0; // Start at the newest message + EARLIEST = 1; // Start at the oldest message + TIMESTAMP = 2; // Start after a specified timestamp, exclusive + } + StartPosition startPosition = 4; // Where to begin consuming from + int64 timestampNs = 5; // timestamp in nano seconds + string subscriber_id = 6; // uniquely identify a subscriber to track consumption + } + InitMessage init = 1; + message AckMessage { + int64 message_id = 1; + } + AckMessage ack = 2; + bool is_close = 3; +} + +message Message { + int64 event_time_ns = 1 [jstype = JS_STRING]; + bytes key = 2; // Message key + bytes value = 3; // Message payload + map headers = 4; // Message headers + bool is_close = 5; +} + +message BrokerMessage { + Message data = 1; +} + +message PublishRequest { + message InitMessage { + string namespace = 1; // only needed on the initial request + string topic = 2; // only needed on the initial request + int32 partition = 3; + } + InitMessage init = 1; + Message data = 2; +} + +message PublishResponse { + message ConfigMessage { + int32 partition_count = 1; + } + ConfigMessage config = 1; + message RedirectMessage { + string new_broker = 1; + } + RedirectMessage redirect = 2; + bool is_closed = 3; +} + +message DeleteTopicRequest { + string namespace = 1; + string topic = 2; +} +message DeleteTopicResponse { +} + +message ConfigureTopicRequest { + string namespace = 1; + string topic = 2; + TopicConfiguration configuration = 3; +} +message ConfigureTopicResponse { +} + +message GetTopicConfigurationRequest { + string namespace = 1; + string topic = 2; +} +message GetTopicConfigurationResponse { + TopicConfiguration configuration = 1; +} + +message FindBrokerRequest { + string namespace = 1; + string topic = 2; + int32 parition = 3; +} + +message FindBrokerResponse { + string broker = 1; +} + +message TopicConfiguration { + int32 partition_count = 1; + string collection = 2; + string replication = 3; + bool is_transient = 4; + enum Partitioning { + NonNullKeyHash = 0; // If not null, hash by key value. If null, round robin + KeyHash = 1; // hash by key value + RoundRobin = 2; // round robin pick one partition + } + Partitioning partitoning = 5; +} diff --git a/weed/pb/messaging_pb/messaging.pb.go b/weed/pb/messaging_pb/messaging.pb.go new file mode 100644 index 000000000..591406347 --- /dev/null +++ b/weed/pb/messaging_pb/messaging.pb.go @@ -0,0 +1,2053 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 +// source: messaging.proto + +package messaging_pb + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type SubscriberMessage_InitMessage_StartPosition int32 + +const ( + SubscriberMessage_InitMessage_LATEST SubscriberMessage_InitMessage_StartPosition = 0 // Start at the newest message + SubscriberMessage_InitMessage_EARLIEST SubscriberMessage_InitMessage_StartPosition = 1 // Start at the oldest message + SubscriberMessage_InitMessage_TIMESTAMP SubscriberMessage_InitMessage_StartPosition = 2 // Start after a specified timestamp, exclusive +) + +// Enum value maps for SubscriberMessage_InitMessage_StartPosition. +var ( + SubscriberMessage_InitMessage_StartPosition_name = map[int32]string{ + 0: "LATEST", + 1: "EARLIEST", + 2: "TIMESTAMP", + } + SubscriberMessage_InitMessage_StartPosition_value = map[string]int32{ + "LATEST": 0, + "EARLIEST": 1, + "TIMESTAMP": 2, + } +) + +func (x SubscriberMessage_InitMessage_StartPosition) Enum() *SubscriberMessage_InitMessage_StartPosition { + p := new(SubscriberMessage_InitMessage_StartPosition) + *p = x + return p +} + +func (x SubscriberMessage_InitMessage_StartPosition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SubscriberMessage_InitMessage_StartPosition) Descriptor() protoreflect.EnumDescriptor { + return file_messaging_proto_enumTypes[0].Descriptor() +} + +func (SubscriberMessage_InitMessage_StartPosition) Type() protoreflect.EnumType { + return &file_messaging_proto_enumTypes[0] +} + +func (x SubscriberMessage_InitMessage_StartPosition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SubscriberMessage_InitMessage_StartPosition.Descriptor instead. +func (SubscriberMessage_InitMessage_StartPosition) EnumDescriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 0, 0} +} + +type TopicConfiguration_Partitioning int32 + +const ( + TopicConfiguration_NonNullKeyHash TopicConfiguration_Partitioning = 0 // If not null, hash by key value. If null, round robin + TopicConfiguration_KeyHash TopicConfiguration_Partitioning = 1 // hash by key value + TopicConfiguration_RoundRobin TopicConfiguration_Partitioning = 2 // round robin pick one partition +) + +// Enum value maps for TopicConfiguration_Partitioning. +var ( + TopicConfiguration_Partitioning_name = map[int32]string{ + 0: "NonNullKeyHash", + 1: "KeyHash", + 2: "RoundRobin", + } + TopicConfiguration_Partitioning_value = map[string]int32{ + "NonNullKeyHash": 0, + "KeyHash": 1, + "RoundRobin": 2, + } +) + +func (x TopicConfiguration_Partitioning) Enum() *TopicConfiguration_Partitioning { + p := new(TopicConfiguration_Partitioning) + *p = x + return p +} + +func (x TopicConfiguration_Partitioning) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TopicConfiguration_Partitioning) Descriptor() protoreflect.EnumDescriptor { + return file_messaging_proto_enumTypes[1].Descriptor() +} + +func (TopicConfiguration_Partitioning) Type() protoreflect.EnumType { + return &file_messaging_proto_enumTypes[1] +} + +func (x TopicConfiguration_Partitioning) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TopicConfiguration_Partitioning.Descriptor instead. +func (TopicConfiguration_Partitioning) EnumDescriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{13, 0} +} + +type SubscriberMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Init *SubscriberMessage_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"` + Ack *SubscriberMessage_AckMessage `protobuf:"bytes,2,opt,name=ack,proto3" json:"ack,omitempty"` + IsClose bool `protobuf:"varint,3,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"` +} + +func (x *SubscriberMessage) Reset() { + *x = SubscriberMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage) ProtoMessage() {} + +func (x *SubscriberMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0} +} + +func (x *SubscriberMessage) GetInit() *SubscriberMessage_InitMessage { + if x != nil { + return x.Init + } + return nil +} + +func (x *SubscriberMessage) GetAck() *SubscriberMessage_AckMessage { + if x != nil { + return x.Ack + } + return nil +} + +func (x *SubscriberMessage) GetIsClose() bool { + if x != nil { + return x.IsClose + } + return false +} + +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EventTimeNs int64 `protobuf:"varint,1,opt,name=event_time_ns,json=eventTimeNs,proto3" json:"event_time_ns,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` // Message key + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` // Message payload + Headers map[string][]byte `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Message headers + IsClose bool `protobuf:"varint,5,opt,name=is_close,json=isClose,proto3" json:"is_close,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{1} +} + +func (x *Message) GetEventTimeNs() int64 { + if x != nil { + return x.EventTimeNs + } + return 0 +} + +func (x *Message) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *Message) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *Message) GetHeaders() map[string][]byte { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Message) GetIsClose() bool { + if x != nil { + return x.IsClose + } + return false +} + +type BrokerMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data *Message `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *BrokerMessage) Reset() { + *x = BrokerMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BrokerMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BrokerMessage) ProtoMessage() {} + +func (x *BrokerMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BrokerMessage.ProtoReflect.Descriptor instead. +func (*BrokerMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{2} +} + +func (x *BrokerMessage) GetData() *Message { + if x != nil { + return x.Data + } + return nil +} + +type PublishRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Init *PublishRequest_InitMessage `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"` + Data *Message `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{3} +} + +func (x *PublishRequest) GetInit() *PublishRequest_InitMessage { + if x != nil { + return x.Init + } + return nil +} + +func (x *PublishRequest) GetData() *Message { + if x != nil { + return x.Data + } + return nil +} + +type PublishResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *PublishResponse_ConfigMessage `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Redirect *PublishResponse_RedirectMessage `protobuf:"bytes,2,opt,name=redirect,proto3" json:"redirect,omitempty"` + IsClosed bool `protobuf:"varint,3,opt,name=is_closed,json=isClosed,proto3" json:"is_closed,omitempty"` +} + +func (x *PublishResponse) Reset() { + *x = PublishResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse) ProtoMessage() {} + +func (x *PublishResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead. +func (*PublishResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4} +} + +func (x *PublishResponse) GetConfig() *PublishResponse_ConfigMessage { + if x != nil { + return x.Config + } + return nil +} + +func (x *PublishResponse) GetRedirect() *PublishResponse_RedirectMessage { + if x != nil { + return x.Redirect + } + return nil +} + +func (x *PublishResponse) GetIsClosed() bool { + if x != nil { + return x.IsClosed + } + return false +} + +type DeleteTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *DeleteTopicRequest) Reset() { + *x = DeleteTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTopicRequest) ProtoMessage() {} + +func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead. +func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{5} +} + +func (x *DeleteTopicRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *DeleteTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +type DeleteTopicResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteTopicResponse) Reset() { + *x = DeleteTopicResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteTopicResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTopicResponse) ProtoMessage() {} + +func (x *DeleteTopicResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTopicResponse.ProtoReflect.Descriptor instead. +func (*DeleteTopicResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{6} +} + +type ConfigureTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Configuration *TopicConfiguration `protobuf:"bytes,3,opt,name=configuration,proto3" json:"configuration,omitempty"` +} + +func (x *ConfigureTopicRequest) Reset() { + *x = ConfigureTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureTopicRequest) ProtoMessage() {} + +func (x *ConfigureTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureTopicRequest.ProtoReflect.Descriptor instead. +func (*ConfigureTopicRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{7} +} + +func (x *ConfigureTopicRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ConfigureTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *ConfigureTopicRequest) GetConfiguration() *TopicConfiguration { + if x != nil { + return x.Configuration + } + return nil +} + +type ConfigureTopicResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ConfigureTopicResponse) Reset() { + *x = ConfigureTopicResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigureTopicResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigureTopicResponse) ProtoMessage() {} + +func (x *ConfigureTopicResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigureTopicResponse.ProtoReflect.Descriptor instead. +func (*ConfigureTopicResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{8} +} + +type GetTopicConfigurationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *GetTopicConfigurationRequest) Reset() { + *x = GetTopicConfigurationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTopicConfigurationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTopicConfigurationRequest) ProtoMessage() {} + +func (x *GetTopicConfigurationRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTopicConfigurationRequest.ProtoReflect.Descriptor instead. +func (*GetTopicConfigurationRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{9} +} + +func (x *GetTopicConfigurationRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *GetTopicConfigurationRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +type GetTopicConfigurationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Configuration *TopicConfiguration `protobuf:"bytes,1,opt,name=configuration,proto3" json:"configuration,omitempty"` +} + +func (x *GetTopicConfigurationResponse) Reset() { + *x = GetTopicConfigurationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTopicConfigurationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTopicConfigurationResponse) ProtoMessage() {} + +func (x *GetTopicConfigurationResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTopicConfigurationResponse.ProtoReflect.Descriptor instead. +func (*GetTopicConfigurationResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{10} +} + +func (x *GetTopicConfigurationResponse) GetConfiguration() *TopicConfiguration { + if x != nil { + return x.Configuration + } + return nil +} + +type FindBrokerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Parition int32 `protobuf:"varint,3,opt,name=parition,proto3" json:"parition,omitempty"` +} + +func (x *FindBrokerRequest) Reset() { + *x = FindBrokerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindBrokerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindBrokerRequest) ProtoMessage() {} + +func (x *FindBrokerRequest) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindBrokerRequest.ProtoReflect.Descriptor instead. +func (*FindBrokerRequest) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{11} +} + +func (x *FindBrokerRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *FindBrokerRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *FindBrokerRequest) GetParition() int32 { + if x != nil { + return x.Parition + } + return 0 +} + +type FindBrokerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Broker string `protobuf:"bytes,1,opt,name=broker,proto3" json:"broker,omitempty"` +} + +func (x *FindBrokerResponse) Reset() { + *x = FindBrokerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindBrokerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindBrokerResponse) ProtoMessage() {} + +func (x *FindBrokerResponse) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindBrokerResponse.ProtoReflect.Descriptor instead. +func (*FindBrokerResponse) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{12} +} + +func (x *FindBrokerResponse) GetBroker() string { + if x != nil { + return x.Broker + } + return "" +} + +type TopicConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + IsTransient bool `protobuf:"varint,4,opt,name=is_transient,json=isTransient,proto3" json:"is_transient,omitempty"` + Partitoning TopicConfiguration_Partitioning `protobuf:"varint,5,opt,name=partitoning,proto3,enum=messaging_pb.TopicConfiguration_Partitioning" json:"partitoning,omitempty"` +} + +func (x *TopicConfiguration) Reset() { + *x = TopicConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TopicConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TopicConfiguration) ProtoMessage() {} + +func (x *TopicConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TopicConfiguration.ProtoReflect.Descriptor instead. +func (*TopicConfiguration) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{13} +} + +func (x *TopicConfiguration) GetPartitionCount() int32 { + if x != nil { + return x.PartitionCount + } + return 0 +} + +func (x *TopicConfiguration) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *TopicConfiguration) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *TopicConfiguration) GetIsTransient() bool { + if x != nil { + return x.IsTransient + } + return false +} + +func (x *TopicConfiguration) GetPartitoning() TopicConfiguration_Partitioning { + if x != nil { + return x.Partitoning + } + return TopicConfiguration_NonNullKeyHash +} + +type SubscriberMessage_InitMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"` + StartPosition SubscriberMessage_InitMessage_StartPosition `protobuf:"varint,4,opt,name=startPosition,proto3,enum=messaging_pb.SubscriberMessage_InitMessage_StartPosition" json:"startPosition,omitempty"` // Where to begin consuming from + TimestampNs int64 `protobuf:"varint,5,opt,name=timestampNs,proto3" json:"timestampNs,omitempty"` // timestamp in nano seconds + SubscriberId string `protobuf:"bytes,6,opt,name=subscriber_id,json=subscriberId,proto3" json:"subscriber_id,omitempty"` // uniquely identify a subscriber to track consumption +} + +func (x *SubscriberMessage_InitMessage) Reset() { + *x = SubscriberMessage_InitMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage_InitMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage_InitMessage) ProtoMessage() {} + +func (x *SubscriberMessage_InitMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage_InitMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage_InitMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *SubscriberMessage_InitMessage) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *SubscriberMessage_InitMessage) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *SubscriberMessage_InitMessage) GetPartition() int32 { + if x != nil { + return x.Partition + } + return 0 +} + +func (x *SubscriberMessage_InitMessage) GetStartPosition() SubscriberMessage_InitMessage_StartPosition { + if x != nil { + return x.StartPosition + } + return SubscriberMessage_InitMessage_LATEST +} + +func (x *SubscriberMessage_InitMessage) GetTimestampNs() int64 { + if x != nil { + return x.TimestampNs + } + return 0 +} + +func (x *SubscriberMessage_InitMessage) GetSubscriberId() string { + if x != nil { + return x.SubscriberId + } + return "" +} + +type SubscriberMessage_AckMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` +} + +func (x *SubscriberMessage_AckMessage) Reset() { + *x = SubscriberMessage_AckMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriberMessage_AckMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriberMessage_AckMessage) ProtoMessage() {} + +func (x *SubscriberMessage_AckMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriberMessage_AckMessage.ProtoReflect.Descriptor instead. +func (*SubscriberMessage_AckMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{0, 1} +} + +func (x *SubscriberMessage_AckMessage) GetMessageId() int64 { + if x != nil { + return x.MessageId + } + return 0 +} + +type PublishRequest_InitMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` // only needed on the initial request + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` // only needed on the initial request + Partition int32 `protobuf:"varint,3,opt,name=partition,proto3" json:"partition,omitempty"` +} + +func (x *PublishRequest_InitMessage) Reset() { + *x = PublishRequest_InitMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest_InitMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest_InitMessage) ProtoMessage() {} + +func (x *PublishRequest_InitMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest_InitMessage.ProtoReflect.Descriptor instead. +func (*PublishRequest_InitMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *PublishRequest_InitMessage) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *PublishRequest_InitMessage) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *PublishRequest_InitMessage) GetPartition() int32 { + if x != nil { + return x.Partition + } + return 0 +} + +type PublishResponse_ConfigMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PartitionCount int32 `protobuf:"varint,1,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"` +} + +func (x *PublishResponse_ConfigMessage) Reset() { + *x = PublishResponse_ConfigMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse_ConfigMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse_ConfigMessage) ProtoMessage() {} + +func (x *PublishResponse_ConfigMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse_ConfigMessage.ProtoReflect.Descriptor instead. +func (*PublishResponse_ConfigMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *PublishResponse_ConfigMessage) GetPartitionCount() int32 { + if x != nil { + return x.PartitionCount + } + return 0 +} + +type PublishResponse_RedirectMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewBroker string `protobuf:"bytes,1,opt,name=new_broker,json=newBroker,proto3" json:"new_broker,omitempty"` +} + +func (x *PublishResponse_RedirectMessage) Reset() { + *x = PublishResponse_RedirectMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_messaging_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse_RedirectMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse_RedirectMessage) ProtoMessage() {} + +func (x *PublishResponse_RedirectMessage) ProtoReflect() protoreflect.Message { + mi := &file_messaging_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse_RedirectMessage.ProtoReflect.Descriptor instead. +func (*PublishResponse_RedirectMessage) Descriptor() ([]byte, []int) { + return file_messaging_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *PublishResponse_RedirectMessage) GetNewBroker() string { + if x != nil { + return x.NewBroker + } + return "" +} + +var File_messaging_proto protoreflect.FileDescriptor + +var file_messaging_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, + 0x9e, 0x04, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x3c, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x03, 0x61, 0x63, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a, + 0xc1, 0x02, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x5f, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4e, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x49, 0x64, 0x22, 0x38, 0x0a, 0x0d, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41, + 0x54, 0x45, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x41, 0x52, 0x4c, 0x49, 0x45, + 0x53, 0x54, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, + 0x50, 0x10, 0x02, 0x1a, 0x2b, 0x0a, 0x0a, 0x41, 0x63, 0x6b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, + 0x22, 0xee, 0x01, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0d, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x02, 0x30, 0x01, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, + 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, + 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, + 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x3a, 0x0a, 0x0d, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xda, 0x01, + 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3c, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x69, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x29, + 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5f, 0x0a, 0x0b, 0x49, 0x6e, 0x69, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1c, 0x0a, 0x09, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x02, 0x0a, 0x0f, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, + 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x69, 0x73, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x1a, 0x38, 0x0a, 0x0d, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x0f, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x77, 0x5f, + 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, + 0x77, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0x48, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x46, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x18, + 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x67, 0x0a, 0x1d, + 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, + 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x63, 0x0a, 0x11, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x70, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2c, 0x0a, 0x12, 0x46, 0x69, + 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x22, 0xb4, 0x02, 0x0a, 0x12, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, + 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x69, 0x73, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4f, 0x0a, + 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x3f, + 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12, + 0x0a, 0x0e, 0x4e, 0x6f, 0x6e, 0x4e, 0x75, 0x6c, 0x6c, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x10, 0x02, 0x32, + 0xad, 0x04, 0x0a, 0x10, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, + 0x65, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, + 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x1a, 0x1b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, + 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, + 0x12, 0x1c, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, + 0x01, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x12, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, + 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, + 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x23, 0x2e, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x24, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2a, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, + 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0a, + 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42, 0x72, + 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x42, + 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x57, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x42, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, + 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_messaging_proto_rawDescOnce sync.Once + file_messaging_proto_rawDescData = file_messaging_proto_rawDesc +) + +func file_messaging_proto_rawDescGZIP() []byte { + file_messaging_proto_rawDescOnce.Do(func() { + file_messaging_proto_rawDescData = protoimpl.X.CompressGZIP(file_messaging_proto_rawDescData) + }) + return file_messaging_proto_rawDescData +} + +var file_messaging_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_messaging_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_messaging_proto_goTypes = []interface{}{ + (SubscriberMessage_InitMessage_StartPosition)(0), // 0: messaging_pb.SubscriberMessage.InitMessage.StartPosition + (TopicConfiguration_Partitioning)(0), // 1: messaging_pb.TopicConfiguration.Partitioning + (*SubscriberMessage)(nil), // 2: messaging_pb.SubscriberMessage + (*Message)(nil), // 3: messaging_pb.Message + (*BrokerMessage)(nil), // 4: messaging_pb.BrokerMessage + (*PublishRequest)(nil), // 5: messaging_pb.PublishRequest + (*PublishResponse)(nil), // 6: messaging_pb.PublishResponse + (*DeleteTopicRequest)(nil), // 7: messaging_pb.DeleteTopicRequest + (*DeleteTopicResponse)(nil), // 8: messaging_pb.DeleteTopicResponse + (*ConfigureTopicRequest)(nil), // 9: messaging_pb.ConfigureTopicRequest + (*ConfigureTopicResponse)(nil), // 10: messaging_pb.ConfigureTopicResponse + (*GetTopicConfigurationRequest)(nil), // 11: messaging_pb.GetTopicConfigurationRequest + (*GetTopicConfigurationResponse)(nil), // 12: messaging_pb.GetTopicConfigurationResponse + (*FindBrokerRequest)(nil), // 13: messaging_pb.FindBrokerRequest + (*FindBrokerResponse)(nil), // 14: messaging_pb.FindBrokerResponse + (*TopicConfiguration)(nil), // 15: messaging_pb.TopicConfiguration + (*SubscriberMessage_InitMessage)(nil), // 16: messaging_pb.SubscriberMessage.InitMessage + (*SubscriberMessage_AckMessage)(nil), // 17: messaging_pb.SubscriberMessage.AckMessage + nil, // 18: messaging_pb.Message.HeadersEntry + (*PublishRequest_InitMessage)(nil), // 19: messaging_pb.PublishRequest.InitMessage + (*PublishResponse_ConfigMessage)(nil), // 20: messaging_pb.PublishResponse.ConfigMessage + (*PublishResponse_RedirectMessage)(nil), // 21: messaging_pb.PublishResponse.RedirectMessage +} +var file_messaging_proto_depIdxs = []int32{ + 16, // 0: messaging_pb.SubscriberMessage.init:type_name -> messaging_pb.SubscriberMessage.InitMessage + 17, // 1: messaging_pb.SubscriberMessage.ack:type_name -> messaging_pb.SubscriberMessage.AckMessage + 18, // 2: messaging_pb.Message.headers:type_name -> messaging_pb.Message.HeadersEntry + 3, // 3: messaging_pb.BrokerMessage.data:type_name -> messaging_pb.Message + 19, // 4: messaging_pb.PublishRequest.init:type_name -> messaging_pb.PublishRequest.InitMessage + 3, // 5: messaging_pb.PublishRequest.data:type_name -> messaging_pb.Message + 20, // 6: messaging_pb.PublishResponse.config:type_name -> messaging_pb.PublishResponse.ConfigMessage + 21, // 7: messaging_pb.PublishResponse.redirect:type_name -> messaging_pb.PublishResponse.RedirectMessage + 15, // 8: messaging_pb.ConfigureTopicRequest.configuration:type_name -> messaging_pb.TopicConfiguration + 15, // 9: messaging_pb.GetTopicConfigurationResponse.configuration:type_name -> messaging_pb.TopicConfiguration + 1, // 10: messaging_pb.TopicConfiguration.partitoning:type_name -> messaging_pb.TopicConfiguration.Partitioning + 0, // 11: messaging_pb.SubscriberMessage.InitMessage.startPosition:type_name -> messaging_pb.SubscriberMessage.InitMessage.StartPosition + 2, // 12: messaging_pb.SeaweedMessaging.Subscribe:input_type -> messaging_pb.SubscriberMessage + 5, // 13: messaging_pb.SeaweedMessaging.Publish:input_type -> messaging_pb.PublishRequest + 7, // 14: messaging_pb.SeaweedMessaging.DeleteTopic:input_type -> messaging_pb.DeleteTopicRequest + 9, // 15: messaging_pb.SeaweedMessaging.ConfigureTopic:input_type -> messaging_pb.ConfigureTopicRequest + 11, // 16: messaging_pb.SeaweedMessaging.GetTopicConfiguration:input_type -> messaging_pb.GetTopicConfigurationRequest + 13, // 17: messaging_pb.SeaweedMessaging.FindBroker:input_type -> messaging_pb.FindBrokerRequest + 4, // 18: messaging_pb.SeaweedMessaging.Subscribe:output_type -> messaging_pb.BrokerMessage + 6, // 19: messaging_pb.SeaweedMessaging.Publish:output_type -> messaging_pb.PublishResponse + 8, // 20: messaging_pb.SeaweedMessaging.DeleteTopic:output_type -> messaging_pb.DeleteTopicResponse + 10, // 21: messaging_pb.SeaweedMessaging.ConfigureTopic:output_type -> messaging_pb.ConfigureTopicResponse + 12, // 22: messaging_pb.SeaweedMessaging.GetTopicConfiguration:output_type -> messaging_pb.GetTopicConfigurationResponse + 14, // 23: messaging_pb.SeaweedMessaging.FindBroker:output_type -> messaging_pb.FindBrokerResponse + 18, // [18:24] is the sub-list for method output_type + 12, // [12:18] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_messaging_proto_init() } +func file_messaging_proto_init() { + if File_messaging_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_messaging_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BrokerMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTopicResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigureTopicResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopicConfigurationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopicConfigurationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindBrokerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindBrokerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopicConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage_InitMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriberMessage_AckMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest_InitMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse_ConfigMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_messaging_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse_RedirectMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_messaging_proto_rawDesc, + NumEnums: 2, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_messaging_proto_goTypes, + DependencyIndexes: file_messaging_proto_depIdxs, + EnumInfos: file_messaging_proto_enumTypes, + MessageInfos: file_messaging_proto_msgTypes, + }.Build() + File_messaging_proto = out.File + file_messaging_proto_rawDesc = nil + file_messaging_proto_goTypes = nil + file_messaging_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// SeaweedMessagingClient is the client API for SeaweedMessaging service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SeaweedMessagingClient interface { + Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) + Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) + DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) + ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) + GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) + FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) +} + +type seaweedMessagingClient struct { + cc grpc.ClientConnInterface +} + +func NewSeaweedMessagingClient(cc grpc.ClientConnInterface) SeaweedMessagingClient { + return &seaweedMessagingClient{cc} +} + +func (c *seaweedMessagingClient) Subscribe(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_SubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[0], "/messaging_pb.SeaweedMessaging/Subscribe", opts...) + if err != nil { + return nil, err + } + x := &seaweedMessagingSubscribeClient{stream} + return x, nil +} + +type SeaweedMessaging_SubscribeClient interface { + Send(*SubscriberMessage) error + Recv() (*BrokerMessage, error) + grpc.ClientStream +} + +type seaweedMessagingSubscribeClient struct { + grpc.ClientStream +} + +func (x *seaweedMessagingSubscribeClient) Send(m *SubscriberMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedMessagingSubscribeClient) Recv() (*BrokerMessage, error) { + m := new(BrokerMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedMessagingClient) Publish(ctx context.Context, opts ...grpc.CallOption) (SeaweedMessaging_PublishClient, error) { + stream, err := c.cc.NewStream(ctx, &_SeaweedMessaging_serviceDesc.Streams[1], "/messaging_pb.SeaweedMessaging/Publish", opts...) + if err != nil { + return nil, err + } + x := &seaweedMessagingPublishClient{stream} + return x, nil +} + +type SeaweedMessaging_PublishClient interface { + Send(*PublishRequest) error + Recv() (*PublishResponse, error) + grpc.ClientStream +} + +type seaweedMessagingPublishClient struct { + grpc.ClientStream +} + +func (x *seaweedMessagingPublishClient) Send(m *PublishRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *seaweedMessagingPublishClient) Recv() (*PublishResponse, error) { + m := new(PublishResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *seaweedMessagingClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*DeleteTopicResponse, error) { + out := new(DeleteTopicResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/DeleteTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) ConfigureTopic(ctx context.Context, in *ConfigureTopicRequest, opts ...grpc.CallOption) (*ConfigureTopicResponse, error) { + out := new(ConfigureTopicResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/ConfigureTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) GetTopicConfiguration(ctx context.Context, in *GetTopicConfigurationRequest, opts ...grpc.CallOption) (*GetTopicConfigurationResponse, error) { + out := new(GetTopicConfigurationResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *seaweedMessagingClient) FindBroker(ctx context.Context, in *FindBrokerRequest, opts ...grpc.CallOption) (*FindBrokerResponse, error) { + out := new(FindBrokerResponse) + err := c.cc.Invoke(ctx, "/messaging_pb.SeaweedMessaging/FindBroker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SeaweedMessagingServer is the server API for SeaweedMessaging service. +type SeaweedMessagingServer interface { + Subscribe(SeaweedMessaging_SubscribeServer) error + Publish(SeaweedMessaging_PublishServer) error + DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) + ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) + GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) + FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error) +} + +// UnimplementedSeaweedMessagingServer can be embedded to have forward compatible implementations. +type UnimplementedSeaweedMessagingServer struct { +} + +func (*UnimplementedSeaweedMessagingServer) Subscribe(SeaweedMessaging_SubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") +} +func (*UnimplementedSeaweedMessagingServer) Publish(SeaweedMessaging_PublishServer) error { + return status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (*UnimplementedSeaweedMessagingServer) DeleteTopic(context.Context, *DeleteTopicRequest) (*DeleteTopicResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteTopic not implemented") +} +func (*UnimplementedSeaweedMessagingServer) ConfigureTopic(context.Context, *ConfigureTopicRequest) (*ConfigureTopicResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ConfigureTopic not implemented") +} +func (*UnimplementedSeaweedMessagingServer) GetTopicConfiguration(context.Context, *GetTopicConfigurationRequest) (*GetTopicConfigurationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTopicConfiguration not implemented") +} +func (*UnimplementedSeaweedMessagingServer) FindBroker(context.Context, *FindBrokerRequest) (*FindBrokerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FindBroker not implemented") +} + +func RegisterSeaweedMessagingServer(s *grpc.Server, srv SeaweedMessagingServer) { + s.RegisterService(&_SeaweedMessaging_serviceDesc, srv) +} + +func _SeaweedMessaging_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedMessagingServer).Subscribe(&seaweedMessagingSubscribeServer{stream}) +} + +type SeaweedMessaging_SubscribeServer interface { + Send(*BrokerMessage) error + Recv() (*SubscriberMessage, error) + grpc.ServerStream +} + +type seaweedMessagingSubscribeServer struct { + grpc.ServerStream +} + +func (x *seaweedMessagingSubscribeServer) Send(m *BrokerMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedMessagingSubscribeServer) Recv() (*SubscriberMessage, error) { + m := new(SubscriberMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedMessaging_Publish_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SeaweedMessagingServer).Publish(&seaweedMessagingPublishServer{stream}) +} + +type SeaweedMessaging_PublishServer interface { + Send(*PublishResponse) error + Recv() (*PublishRequest, error) + grpc.ServerStream +} + +type seaweedMessagingPublishServer struct { + grpc.ServerStream +} + +func (x *seaweedMessagingPublishServer) Send(m *PublishResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *seaweedMessagingPublishServer) Recv() (*PublishRequest, error) { + m := new(PublishRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _SeaweedMessaging_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).DeleteTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/DeleteTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_ConfigureTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ConfigureTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/ConfigureTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).ConfigureTopic(ctx, req.(*ConfigureTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_GetTopicConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTopicConfigurationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/GetTopicConfiguration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).GetTopicConfiguration(ctx, req.(*GetTopicConfigurationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SeaweedMessaging_FindBroker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FindBrokerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SeaweedMessagingServer).FindBroker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/messaging_pb.SeaweedMessaging/FindBroker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SeaweedMessagingServer).FindBroker(ctx, req.(*FindBrokerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SeaweedMessaging_serviceDesc = grpc.ServiceDesc{ + ServiceName: "messaging_pb.SeaweedMessaging", + HandlerType: (*SeaweedMessagingServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeleteTopic", + Handler: _SeaweedMessaging_DeleteTopic_Handler, + }, + { + MethodName: "ConfigureTopic", + Handler: _SeaweedMessaging_ConfigureTopic_Handler, + }, + { + MethodName: "GetTopicConfiguration", + Handler: _SeaweedMessaging_GetTopicConfiguration_Handler, + }, + { + MethodName: "FindBroker", + Handler: _SeaweedMessaging_FindBroker_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _SeaweedMessaging_Subscribe_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Publish", + Handler: _SeaweedMessaging_Publish_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "messaging.proto", +} diff --git a/weed/pb/shared_values.go b/weed/pb/shared_values.go new file mode 100644 index 000000000..1af19e51a --- /dev/null +++ b/weed/pb/shared_values.go @@ -0,0 +1,5 @@ +package pb + +const ( + AdminShellClient = "adminShell" +) diff --git a/weed/pb/volume_info.go b/weed/pb/volume_info.go index b2edf9c5e..cae9e018f 100644 --- a/weed/pb/volume_info.go +++ b/weed/pb/volume_info.go @@ -15,39 +15,49 @@ import ( ) // MaybeLoadVolumeInfo load the file data as *volume_server_pb.VolumeInfo, the returned volumeInfo will not be nil -func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool) { +func MaybeLoadVolumeInfo(fileName string) (volumeInfo *volume_server_pb.VolumeInfo, hasRemoteFile bool, hasVolumeInfoFile bool, err error) { - volumeInfo := &volume_server_pb.VolumeInfo{} + volumeInfo = &volume_server_pb.VolumeInfo{} glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName) if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead { if !exists { - return volumeInfo, false + return } + hasVolumeInfoFile = true if !canRead { glog.Warningf("can not read %s", fileName) + err = fmt.Errorf("can not read %s", fileName) + return } - return volumeInfo, false + return } + hasVolumeInfoFile = true + glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName) tierData, readErr := ioutil.ReadFile(fileName) if readErr != nil { glog.Warningf("fail to read %s : %v", fileName, readErr) - return volumeInfo, false + err = fmt.Errorf("fail to read %s : %v", fileName, readErr) + return + } glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName) - if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { + if err = jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil { glog.Warningf("unmarshal error: %v", err) - return volumeInfo, false + err = fmt.Errorf("unmarshal error: %v", err) + return } if len(volumeInfo.GetFiles()) == 0 { - return volumeInfo, false + return } - return volumeInfo, true + hasRemoteFile = true + + return } func SaveVolumeInfo(fileName string, volumeInfo *volume_server_pb.VolumeInfo) error { diff --git a/weed/pb/volume_server.proto b/weed/pb/volume_server.proto index 9cf7272ef..f9836c402 100644 --- a/weed/pb/volume_server.proto +++ b/weed/pb/volume_server.proto @@ -1,6 +1,7 @@ syntax = "proto3"; package volume_server_pb; +option go_package = "github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"; ////////////////////////////////////////////////// @@ -8,6 +9,7 @@ service VolumeServer { //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) { } + rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) { } rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (VacuumVolumeCompactResponse) { @@ -35,6 +37,12 @@ service VolumeServer { } rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) { } + rpc VolumeMarkWritable (VolumeMarkWritableRequest) returns (VolumeMarkWritableResponse) { + } + rpc VolumeConfigure (VolumeConfigureRequest) returns (VolumeConfigureResponse) { + } + rpc VolumeStatus (VolumeStatusRequest) returns (VolumeStatusResponse) { + } // copy the .idx .dat files, and mount this volume rpc VolumeCopy (VolumeCopyRequest) returns (VolumeCopyResponse) { @@ -44,6 +52,11 @@ service VolumeServer { rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) { } + rpc ReadNeedleBlob (ReadNeedleBlobRequest) returns (ReadNeedleBlobResponse) { + } + rpc WriteNeedleBlob (WriteNeedleBlobRequest) returns (WriteNeedleBlobResponse) { + } + rpc VolumeTailSender (VolumeTailSenderRequest) returns (stream VolumeTailSenderResponse) { } rpc VolumeTailReceiver (VolumeTailReceiverRequest) returns (VolumeTailReceiverResponse) { @@ -75,16 +88,24 @@ service VolumeServer { rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) { } - // query + rpc VolumeServerStatus (VolumeServerStatusRequest) returns (VolumeServerStatusResponse) { + } + rpc VolumeServerLeave (VolumeServerLeaveRequest) returns (VolumeServerLeaveResponse) { + } + + // query rpc Query (QueryRequest) returns (stream QueriedStripe) { } + rpc VolumeNeedleStatus (VolumeNeedleStatusRequest) returns (VolumeNeedleStatusResponse) { + } } ////////////////////////////////////////////////// message BatchDeleteRequest { repeated string file_ids = 1; + bool skip_cookie_check = 2; } message BatchDeleteResponse { @@ -119,6 +140,7 @@ message VacuumVolumeCommitRequest { uint32 volume_id = 1; } message VacuumVolumeCommitResponse { + bool is_read_only = 1; } message VacuumVolumeCleanupRequest { @@ -140,6 +162,7 @@ message AllocateVolumeRequest { string replication = 4; string ttl = 5; uint32 memory_map_max_size_mb = 6; + string disk_type = 7; } message AllocateVolumeResponse { } @@ -189,12 +212,34 @@ message VolumeMarkReadonlyRequest { message VolumeMarkReadonlyResponse { } +message VolumeMarkWritableRequest { + uint32 volume_id = 1; +} +message VolumeMarkWritableResponse { +} + +message VolumeConfigureRequest { + uint32 volume_id = 1; + string replication = 2; +} +message VolumeConfigureResponse { + string error = 1; +} + +message VolumeStatusRequest { + uint32 volume_id = 1; +} +message VolumeStatusResponse { + bool is_read_only = 1; +} + message VolumeCopyRequest { uint32 volume_id = 1; string collection = 2; string replication = 3; string ttl = 4; string source_data_node = 5; + string disk_type = 6; } message VolumeCopyResponse { uint64 last_append_at_ns = 1; @@ -213,6 +258,25 @@ message CopyFileResponse { bytes file_content = 1; } +message ReadNeedleBlobRequest { + uint32 volume_id = 1; + uint64 needle_id = 2; + int64 offset = 3; // actual offset + int32 size = 4; +} +message ReadNeedleBlobResponse { + bytes needle_blob = 1; +} + +message WriteNeedleBlobRequest { + uint32 volume_id = 1; + uint64 needle_id = 2; + int32 size = 3; + bytes needle_blob = 4; +} +message WriteNeedleBlobResponse { +} + message VolumeTailSenderRequest { uint32 volume_id = 1; uint64 since_ns = 2; @@ -323,6 +387,7 @@ message ReadVolumeFileStatusResponse { uint64 file_count = 6; uint32 compaction_revision = 7; string collection = 8; + string disk_type = 9; } message DiskStatus { @@ -330,6 +395,9 @@ message DiskStatus { uint64 all = 2; uint64 used = 3; uint64 free = 4; + float percent_free = 5; + float percent_used = 6; + string disk_type = 7; } message MemStatus { @@ -355,6 +423,7 @@ message RemoteFile { message VolumeInfo { repeated RemoteFile files = 1; uint32 version = 2; + string replication = 3; } message VolumeTierMoveDatToRemoteRequest { @@ -378,6 +447,19 @@ message VolumeTierMoveDatFromRemoteResponse { float processedPercentage = 2; } +message VolumeServerStatusRequest { + +} +message VolumeServerStatusResponse { + repeated DiskStatus disk_statuses = 1; + MemStatus memory_status = 2; +} + +message VolumeServerLeaveRequest { +} +message VolumeServerLeaveResponse { +} + // select on volume servers message QueryRequest { repeated string selections = 1; @@ -435,3 +517,16 @@ message QueryRequest { message QueriedStripe { bytes records = 1; } + +message VolumeNeedleStatusRequest { + uint32 volume_id = 1; + uint64 needle_id = 2; +} +message VolumeNeedleStatusResponse { + uint64 needle_id = 1; + uint32 cookie = 2; + uint32 size = 3; + uint64 last_modified = 4; + uint32 crc = 5; + string ttl = 6; +} diff --git a/weed/pb/volume_server_pb/volume_server.pb.go b/weed/pb/volume_server_pb/volume_server.pb.go index 1c2e10d8e..c642142ba 100644 --- a/weed/pb/volume_server_pb/volume_server.pb.go +++ b/weed/pb/volume_server_pb/volume_server.pb.go @@ -1,2114 +1,7262 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.3 // source: volume_server.proto -// DO NOT EDIT! - -/* -Package volume_server_pb is a generated protocol buffer package. - -It is generated from these files: - volume_server.proto - -It has these top-level messages: - BatchDeleteRequest - BatchDeleteResponse - DeleteResult - Empty - VacuumVolumeCheckRequest - VacuumVolumeCheckResponse - VacuumVolumeCompactRequest - VacuumVolumeCompactResponse - VacuumVolumeCommitRequest - VacuumVolumeCommitResponse - VacuumVolumeCleanupRequest - VacuumVolumeCleanupResponse - DeleteCollectionRequest - DeleteCollectionResponse - AllocateVolumeRequest - AllocateVolumeResponse - VolumeSyncStatusRequest - VolumeSyncStatusResponse - VolumeIncrementalCopyRequest - VolumeIncrementalCopyResponse - VolumeMountRequest - VolumeMountResponse - VolumeUnmountRequest - VolumeUnmountResponse - VolumeDeleteRequest - VolumeDeleteResponse - VolumeMarkReadonlyRequest - VolumeMarkReadonlyResponse - VolumeCopyRequest - VolumeCopyResponse - CopyFileRequest - CopyFileResponse - VolumeTailSenderRequest - VolumeTailSenderResponse - VolumeTailReceiverRequest - VolumeTailReceiverResponse - VolumeEcShardsGenerateRequest - VolumeEcShardsGenerateResponse - VolumeEcShardsRebuildRequest - VolumeEcShardsRebuildResponse - VolumeEcShardsCopyRequest - VolumeEcShardsCopyResponse - VolumeEcShardsDeleteRequest - VolumeEcShardsDeleteResponse - VolumeEcShardsMountRequest - VolumeEcShardsMountResponse - VolumeEcShardsUnmountRequest - VolumeEcShardsUnmountResponse - VolumeEcShardReadRequest - VolumeEcShardReadResponse - VolumeEcBlobDeleteRequest - VolumeEcBlobDeleteResponse - VolumeEcShardsToVolumeRequest - VolumeEcShardsToVolumeResponse - ReadVolumeFileStatusRequest - ReadVolumeFileStatusResponse - DiskStatus - MemStatus - RemoteFile - VolumeInfo - VolumeTierMoveDatToRemoteRequest - VolumeTierMoveDatToRemoteResponse - VolumeTierMoveDatFromRemoteRequest - VolumeTierMoveDatFromRemoteResponse - QueryRequest - QueriedStripe -*/ -package volume_server_pb -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package volume_server_pb import ( - context "golang.org/x/net/context" + context "context" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 type BatchDeleteRequest struct { - FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds" json:"file_ids,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileIds []string `protobuf:"bytes,1,rep,name=file_ids,json=fileIds,proto3" json:"file_ids,omitempty"` + SkipCookieCheck bool `protobuf:"varint,2,opt,name=skip_cookie_check,json=skipCookieCheck,proto3" json:"skip_cookie_check,omitempty"` +} + +func (x *BatchDeleteRequest) Reset() { + *x = BatchDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchDeleteRequest) ProtoMessage() {} + +func (x *BatchDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *BatchDeleteRequest) Reset() { *m = BatchDeleteRequest{} } -func (m *BatchDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*BatchDeleteRequest) ProtoMessage() {} -func (*BatchDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +// Deprecated: Use BatchDeleteRequest.ProtoReflect.Descriptor instead. +func (*BatchDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{0} +} -func (m *BatchDeleteRequest) GetFileIds() []string { - if m != nil { - return m.FileIds +func (x *BatchDeleteRequest) GetFileIds() []string { + if x != nil { + return x.FileIds } return nil } +func (x *BatchDeleteRequest) GetSkipCookieCheck() bool { + if x != nil { + return x.SkipCookieCheck + } + return false +} + type BatchDeleteResponse struct { - Results []*DeleteResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []*DeleteResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *BatchDeleteResponse) Reset() { + *x = BatchDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BatchDeleteResponse) Reset() { *m = BatchDeleteResponse{} } -func (m *BatchDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*BatchDeleteResponse) ProtoMessage() {} -func (*BatchDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (x *BatchDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchDeleteResponse) ProtoMessage() {} + +func (x *BatchDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchDeleteResponse.ProtoReflect.Descriptor instead. +func (*BatchDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{1} +} -func (m *BatchDeleteResponse) GetResults() []*DeleteResult { - if m != nil { - return m.Results +func (x *BatchDeleteResponse) GetResults() []*DeleteResult { + if x != nil { + return x.Results } return nil } type DeleteResult struct { - FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId" json:"file_id,omitempty"` - Status int32 `protobuf:"varint,2,opt,name=status" json:"status,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error" json:"error,omitempty"` - Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` - Version uint32 `protobuf:"varint,5,opt,name=version" json:"version,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` + Status int32 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` + Size uint32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + Version uint32 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` } -func (m *DeleteResult) Reset() { *m = DeleteResult{} } -func (m *DeleteResult) String() string { return proto.CompactTextString(m) } -func (*DeleteResult) ProtoMessage() {} -func (*DeleteResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (x *DeleteResult) Reset() { + *x = DeleteResult{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResult) ProtoMessage() {} + +func (x *DeleteResult) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -func (m *DeleteResult) GetFileId() string { - if m != nil { - return m.FileId +// Deprecated: Use DeleteResult.ProtoReflect.Descriptor instead. +func (*DeleteResult) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{2} +} + +func (x *DeleteResult) GetFileId() string { + if x != nil { + return x.FileId } return "" } -func (m *DeleteResult) GetStatus() int32 { - if m != nil { - return m.Status +func (x *DeleteResult) GetStatus() int32 { + if x != nil { + return x.Status } return 0 } -func (m *DeleteResult) GetError() string { - if m != nil { - return m.Error +func (x *DeleteResult) GetError() string { + if x != nil { + return x.Error } return "" } -func (m *DeleteResult) GetSize() uint32 { - if m != nil { - return m.Size +func (x *DeleteResult) GetSize() uint32 { + if x != nil { + return x.Size } return 0 } -func (m *DeleteResult) GetVersion() uint32 { - if m != nil { - return m.Version +func (x *DeleteResult) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{3} +} type VacuumVolumeCheckRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VacuumVolumeCheckRequest) Reset() { + *x = VacuumVolumeCheckRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VacuumVolumeCheckRequest) Reset() { *m = VacuumVolumeCheckRequest{} } -func (m *VacuumVolumeCheckRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCheckRequest) ProtoMessage() {} -func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (x *VacuumVolumeCheckRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeCheckRequest) ProtoMessage() {} + +func (x *VacuumVolumeCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VacuumVolumeCheckRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCheckRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{4} +} -func (m *VacuumVolumeCheckRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VacuumVolumeCheckRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } type VacuumVolumeCheckResponse struct { - GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio" json:"garbage_ratio,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GarbageRatio float64 `protobuf:"fixed64,1,opt,name=garbage_ratio,json=garbageRatio,proto3" json:"garbage_ratio,omitempty"` } -func (m *VacuumVolumeCheckResponse) Reset() { *m = VacuumVolumeCheckResponse{} } -func (m *VacuumVolumeCheckResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCheckResponse) ProtoMessage() {} -func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (x *VacuumVolumeCheckResponse) Reset() { + *x = VacuumVolumeCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VacuumVolumeCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeCheckResponse) ProtoMessage() {} + +func (x *VacuumVolumeCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -func (m *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { - if m != nil { - return m.GarbageRatio +// Deprecated: Use VacuumVolumeCheckResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCheckResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{5} +} + +func (x *VacuumVolumeCheckResponse) GetGarbageRatio() float64 { + if x != nil { + return x.GarbageRatio } return 0 } type VacuumVolumeCompactRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Preallocate int64 `protobuf:"varint,2,opt,name=preallocate" json:"preallocate,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Preallocate int64 `protobuf:"varint,2,opt,name=preallocate,proto3" json:"preallocate,omitempty"` +} + +func (x *VacuumVolumeCompactRequest) Reset() { + *x = VacuumVolumeCompactRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VacuumVolumeCompactRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VacuumVolumeCompactRequest) Reset() { *m = VacuumVolumeCompactRequest{} } -func (m *VacuumVolumeCompactRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCompactRequest) ProtoMessage() {} -func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*VacuumVolumeCompactRequest) ProtoMessage() {} -func (m *VacuumVolumeCompactRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VacuumVolumeCompactRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VacuumVolumeCompactRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCompactRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{6} +} + +func (x *VacuumVolumeCompactRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VacuumVolumeCompactRequest) GetPreallocate() int64 { - if m != nil { - return m.Preallocate +func (x *VacuumVolumeCompactRequest) GetPreallocate() int64 { + if x != nil { + return x.Preallocate } return 0 } type VacuumVolumeCompactResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VacuumVolumeCompactResponse) Reset() { *m = VacuumVolumeCompactResponse{} } -func (m *VacuumVolumeCompactResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCompactResponse) ProtoMessage() {} -func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (x *VacuumVolumeCompactResponse) Reset() { + *x = VacuumVolumeCompactResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type VacuumVolumeCommitRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +func (x *VacuumVolumeCompactResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VacuumVolumeCommitRequest) Reset() { *m = VacuumVolumeCommitRequest{} } -func (m *VacuumVolumeCommitRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCommitRequest) ProtoMessage() {} -func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*VacuumVolumeCompactResponse) ProtoMessage() {} -func (m *VacuumVolumeCommitRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VacuumVolumeCompactResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -type VacuumVolumeCommitResponse struct { +// Deprecated: Use VacuumVolumeCompactResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCompactResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{7} } -func (m *VacuumVolumeCommitResponse) Reset() { *m = VacuumVolumeCommitResponse{} } -func (m *VacuumVolumeCommitResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCommitResponse) ProtoMessage() {} -func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +type VacuumVolumeCommitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type VacuumVolumeCleanupRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } -func (m *VacuumVolumeCleanupRequest) Reset() { *m = VacuumVolumeCleanupRequest{} } -func (m *VacuumVolumeCleanupRequest) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCleanupRequest) ProtoMessage() {} -func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -func (m *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VacuumVolumeCommitRequest) Reset() { + *x = VacuumVolumeCommitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type VacuumVolumeCleanupResponse struct { +func (x *VacuumVolumeCommitRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VacuumVolumeCleanupResponse) Reset() { *m = VacuumVolumeCleanupResponse{} } -func (m *VacuumVolumeCleanupResponse) String() string { return proto.CompactTextString(m) } -func (*VacuumVolumeCleanupResponse) ProtoMessage() {} -func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*VacuumVolumeCommitRequest) ProtoMessage() {} -type DeleteCollectionRequest struct { - Collection string `protobuf:"bytes,1,opt,name=collection" json:"collection,omitempty"` +func (x *VacuumVolumeCommitRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *DeleteCollectionRequest) Reset() { *m = DeleteCollectionRequest{} } -func (m *DeleteCollectionRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionRequest) ProtoMessage() {} -func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +// Deprecated: Use VacuumVolumeCommitRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCommitRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{8} +} -func (m *DeleteCollectionRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VacuumVolumeCommitRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } - return "" + return 0 } -type DeleteCollectionResponse struct { +type VacuumVolumeCommitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` } -func (m *DeleteCollectionResponse) Reset() { *m = DeleteCollectionResponse{} } -func (m *DeleteCollectionResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteCollectionResponse) ProtoMessage() {} -func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (x *VacuumVolumeCommitResponse) Reset() { + *x = VacuumVolumeCommitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type AllocateVolumeRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Preallocate int64 `protobuf:"varint,3,opt,name=preallocate" json:"preallocate,omitempty"` - Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` - MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb" json:"memory_map_max_size_mb,omitempty"` +func (x *VacuumVolumeCommitResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AllocateVolumeRequest) Reset() { *m = AllocateVolumeRequest{} } -func (m *AllocateVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateVolumeRequest) ProtoMessage() {} -func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*VacuumVolumeCommitResponse) ProtoMessage() {} -func (m *AllocateVolumeRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VacuumVolumeCommitResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *AllocateVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +// Deprecated: Use VacuumVolumeCommitResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCommitResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{9} } -func (m *AllocateVolumeRequest) GetPreallocate() int64 { - if m != nil { - return m.Preallocate +func (x *VacuumVolumeCommitResponse) GetIsReadOnly() bool { + if x != nil { + return x.IsReadOnly } - return 0 + return false +} + +type VacuumVolumeCleanupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } -func (m *AllocateVolumeRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *VacuumVolumeCleanupRequest) Reset() { + *x = VacuumVolumeCleanupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (m *AllocateVolumeRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *VacuumVolumeCleanupRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VacuumVolumeCleanupRequest) ProtoMessage() {} + +func (x *VacuumVolumeCleanupRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *AllocateVolumeRequest) GetMemoryMapMaxSizeMb() uint32 { - if m != nil { - return m.MemoryMapMaxSizeMb +// Deprecated: Use VacuumVolumeCleanupRequest.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCleanupRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{10} +} + +func (x *VacuumVolumeCleanupRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -type AllocateVolumeResponse struct { +type VacuumVolumeCleanupResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *AllocateVolumeResponse) Reset() { *m = AllocateVolumeResponse{} } -func (m *AllocateVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateVolumeResponse) ProtoMessage() {} -func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (x *VacuumVolumeCleanupResponse) Reset() { + *x = VacuumVolumeCleanupResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type VolumeSyncStatusRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +func (x *VacuumVolumeCleanupResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeSyncStatusRequest) Reset() { *m = VolumeSyncStatusRequest{} } -func (m *VolumeSyncStatusRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncStatusRequest) ProtoMessage() {} -func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*VacuumVolumeCleanupResponse) ProtoMessage() {} -func (m *VolumeSyncStatusRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VacuumVolumeCleanupResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -type VolumeSyncStatusResponse struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Replication string `protobuf:"bytes,4,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,5,opt,name=ttl" json:"ttl,omitempty"` - TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset" json:"tail_offset,omitempty"` - CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision" json:"compact_revision,omitempty"` - IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"` +// Deprecated: Use VacuumVolumeCleanupResponse.ProtoReflect.Descriptor instead. +func (*VacuumVolumeCleanupResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{11} } -func (m *VolumeSyncStatusResponse) Reset() { *m = VolumeSyncStatusResponse{} } -func (m *VolumeSyncStatusResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeSyncStatusResponse) ProtoMessage() {} -func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +type DeleteCollectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *VolumeSyncStatusResponse) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId - } - return 0 + Collection string `protobuf:"bytes,1,opt,name=collection,proto3" json:"collection,omitempty"` } -func (m *VolumeSyncStatusResponse) GetCollection() string { - if m != nil { - return m.Collection +func (x *DeleteCollectionRequest) Reset() { + *x = DeleteCollectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (m *VolumeSyncStatusResponse) GetReplication() string { - if m != nil { - return m.Replication +func (x *DeleteCollectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCollectionRequest) ProtoMessage() {} + +func (x *DeleteCollectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCollectionRequest.ProtoReflect.Descriptor instead. +func (*DeleteCollectionRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{12} } -func (m *VolumeSyncStatusResponse) GetTtl() string { - if m != nil { - return m.Ttl +func (x *DeleteCollectionRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeSyncStatusResponse) GetTailOffset() uint64 { - if m != nil { - return m.TailOffset +type DeleteCollectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteCollectionResponse) Reset() { + *x = DeleteCollectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *VolumeSyncStatusResponse) GetCompactRevision() uint32 { - if m != nil { - return m.CompactRevision +func (x *DeleteCollectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCollectionResponse) ProtoMessage() {} + +func (x *DeleteCollectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCollectionResponse.ProtoReflect.Descriptor instead. +func (*DeleteCollectionResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{13} +} + +type AllocateVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Preallocate int64 `protobuf:"varint,3,opt,name=preallocate,proto3" json:"preallocate,omitempty"` + Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + MemoryMapMaxSizeMb uint32 `protobuf:"varint,6,opt,name=memory_map_max_size_mb,json=memoryMapMaxSizeMb,proto3" json:"memory_map_max_size_mb,omitempty"` + DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` } -func (m *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { - if m != nil { - return m.IdxFileSize +func (x *AllocateVolumeRequest) Reset() { + *x = AllocateVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type VolumeIncrementalCopyRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"` +func (x *AllocateVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeIncrementalCopyRequest) Reset() { *m = VolumeIncrementalCopyRequest{} } -func (m *VolumeIncrementalCopyRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeIncrementalCopyRequest) ProtoMessage() {} -func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*AllocateVolumeRequest) ProtoMessage() {} -func (m *VolumeIncrementalCopyRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *AllocateVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *VolumeIncrementalCopyRequest) GetSinceNs() uint64 { - if m != nil { - return m.SinceNs +// Deprecated: Use AllocateVolumeRequest.ProtoReflect.Descriptor instead. +func (*AllocateVolumeRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{14} +} + +func (x *AllocateVolumeRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -type VolumeIncrementalCopyResponse struct { - FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` +func (x *AllocateVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" } -func (m *VolumeIncrementalCopyResponse) Reset() { *m = VolumeIncrementalCopyResponse{} } -func (m *VolumeIncrementalCopyResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeIncrementalCopyResponse) ProtoMessage() {} -func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *VolumeIncrementalCopyResponse) GetFileContent() []byte { - if m != nil { - return m.FileContent +func (x *AllocateVolumeRequest) GetPreallocate() int64 { + if x != nil { + return x.Preallocate } - return nil + return 0 } -type VolumeMountRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +func (x *AllocateVolumeRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" } -func (m *VolumeMountRequest) Reset() { *m = VolumeMountRequest{} } -func (m *VolumeMountRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeMountRequest) ProtoMessage() {} -func (*VolumeMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (x *AllocateVolumeRequest) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} -func (m *VolumeMountRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *AllocateVolumeRequest) GetMemoryMapMaxSizeMb() uint32 { + if x != nil { + return x.MemoryMapMaxSizeMb } return 0 } -type VolumeMountResponse struct { +func (x *AllocateVolumeRequest) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + +type AllocateVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VolumeMountResponse) Reset() { *m = VolumeMountResponse{} } -func (m *VolumeMountResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeMountResponse) ProtoMessage() {} -func (*VolumeMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (x *AllocateVolumeResponse) Reset() { + *x = AllocateVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type VolumeUnmountRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +func (x *AllocateVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeUnmountRequest) Reset() { *m = VolumeUnmountRequest{} } -func (m *VolumeUnmountRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeUnmountRequest) ProtoMessage() {} -func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (*AllocateVolumeResponse) ProtoMessage() {} -func (m *VolumeUnmountRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *AllocateVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -type VolumeUnmountResponse struct { +// Deprecated: Use AllocateVolumeResponse.ProtoReflect.Descriptor instead. +func (*AllocateVolumeResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{15} } -func (m *VolumeUnmountResponse) Reset() { *m = VolumeUnmountResponse{} } -func (m *VolumeUnmountResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeUnmountResponse) ProtoMessage() {} -func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +type VolumeSyncStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type VolumeDeleteRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } -func (m *VolumeDeleteRequest) Reset() { *m = VolumeDeleteRequest{} } -func (m *VolumeDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeDeleteRequest) ProtoMessage() {} -func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } - -func (m *VolumeDeleteRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeSyncStatusRequest) Reset() { + *x = VolumeSyncStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type VolumeDeleteResponse struct { +func (x *VolumeSyncStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeDeleteResponse) Reset() { *m = VolumeDeleteResponse{} } -func (m *VolumeDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeDeleteResponse) ProtoMessage() {} -func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (*VolumeSyncStatusRequest) ProtoMessage() {} -type VolumeMarkReadonlyRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +func (x *VolumeSyncStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeMarkReadonlyRequest) Reset() { *m = VolumeMarkReadonlyRequest{} } -func (m *VolumeMarkReadonlyRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeMarkReadonlyRequest) ProtoMessage() {} -func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +// Deprecated: Use VolumeSyncStatusRequest.ProtoReflect.Descriptor instead. +func (*VolumeSyncStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{16} +} -func (m *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeSyncStatusRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -type VolumeMarkReadonlyResponse struct { +type VolumeSyncStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,4,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + TailOffset uint64 `protobuf:"varint,6,opt,name=tail_offset,json=tailOffset,proto3" json:"tail_offset,omitempty"` + CompactRevision uint32 `protobuf:"varint,7,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + IdxFileSize uint64 `protobuf:"varint,8,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` } -func (m *VolumeMarkReadonlyResponse) Reset() { *m = VolumeMarkReadonlyResponse{} } -func (m *VolumeMarkReadonlyResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeMarkReadonlyResponse) ProtoMessage() {} -func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (x *VolumeSyncStatusResponse) Reset() { + *x = VolumeSyncStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type VolumeCopyRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - Replication string `protobuf:"bytes,3,opt,name=replication" json:"replication,omitempty"` - Ttl string `protobuf:"bytes,4,opt,name=ttl" json:"ttl,omitempty"` - SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"` +func (x *VolumeSyncStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeCopyRequest) Reset() { *m = VolumeCopyRequest{} } -func (m *VolumeCopyRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeCopyRequest) ProtoMessage() {} -func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +func (*VolumeSyncStatusResponse) ProtoMessage() {} -func (m *VolumeCopyRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeSyncStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeSyncStatusResponse.ProtoReflect.Descriptor instead. +func (*VolumeSyncStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{17} } -func (m *VolumeCopyRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeSyncStatusResponse) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } - return "" + return 0 } -func (m *VolumeCopyRequest) GetReplication() string { - if m != nil { - return m.Replication +func (x *VolumeSyncStatusResponse) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeCopyRequest) GetTtl() string { - if m != nil { - return m.Ttl +func (x *VolumeSyncStatusResponse) GetReplication() string { + if x != nil { + return x.Replication } return "" } -func (m *VolumeCopyRequest) GetSourceDataNode() string { - if m != nil { - return m.SourceDataNode +func (x *VolumeSyncStatusResponse) GetTtl() string { + if x != nil { + return x.Ttl } return "" } -type VolumeCopyResponse struct { - LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs" json:"last_append_at_ns,omitempty"` +func (x *VolumeSyncStatusResponse) GetTailOffset() uint64 { + if x != nil { + return x.TailOffset + } + return 0 } -func (m *VolumeCopyResponse) Reset() { *m = VolumeCopyResponse{} } -func (m *VolumeCopyResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeCopyResponse) ProtoMessage() {} -func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } - -func (m *VolumeCopyResponse) GetLastAppendAtNs() uint64 { - if m != nil { - return m.LastAppendAtNs +func (x *VolumeSyncStatusResponse) GetCompactRevision() uint32 { + if x != nil { + return x.CompactRevision } return 0 } -type CopyFileRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Ext string `protobuf:"bytes,2,opt,name=ext" json:"ext,omitempty"` - CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"` - StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset" json:"stop_offset,omitempty"` - Collection string `protobuf:"bytes,5,opt,name=collection" json:"collection,omitempty"` - IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume" json:"is_ec_volume,omitempty"` - IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound" json:"ignore_source_file_not_found,omitempty"` +func (x *VolumeSyncStatusResponse) GetIdxFileSize() uint64 { + if x != nil { + return x.IdxFileSize + } + return 0 } -func (m *CopyFileRequest) Reset() { *m = CopyFileRequest{} } -func (m *CopyFileRequest) String() string { return proto.CompactTextString(m) } -func (*CopyFileRequest) ProtoMessage() {} -func (*CopyFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +type VolumeIncrementalCopyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` +} -func (m *CopyFileRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeIncrementalCopyRequest) Reset() { + *x = VolumeIncrementalCopyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *CopyFileRequest) GetExt() string { - if m != nil { - return m.Ext +func (x *VolumeIncrementalCopyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeIncrementalCopyRequest) ProtoMessage() {} + +func (x *VolumeIncrementalCopyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *CopyFileRequest) GetCompactionRevision() uint32 { - if m != nil { - return m.CompactionRevision +// Deprecated: Use VolumeIncrementalCopyRequest.ProtoReflect.Descriptor instead. +func (*VolumeIncrementalCopyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{18} +} + +func (x *VolumeIncrementalCopyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *CopyFileRequest) GetStopOffset() uint64 { - if m != nil { - return m.StopOffset +func (x *VolumeIncrementalCopyRequest) GetSinceNs() uint64 { + if x != nil { + return x.SinceNs } return 0 } -func (m *CopyFileRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +type VolumeIncrementalCopyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` } -func (m *CopyFileRequest) GetIsEcVolume() bool { - if m != nil { - return m.IsEcVolume +func (x *VolumeIncrementalCopyResponse) Reset() { + *x = VolumeIncrementalCopyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -func (m *CopyFileRequest) GetIgnoreSourceFileNotFound() bool { - if m != nil { - return m.IgnoreSourceFileNotFound - } - return false +func (x *VolumeIncrementalCopyResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -type CopyFileResponse struct { - FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` +func (*VolumeIncrementalCopyResponse) ProtoMessage() {} + +func (x *VolumeIncrementalCopyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *CopyFileResponse) Reset() { *m = CopyFileResponse{} } -func (m *CopyFileResponse) String() string { return proto.CompactTextString(m) } -func (*CopyFileResponse) ProtoMessage() {} -func (*CopyFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +// Deprecated: Use VolumeIncrementalCopyResponse.ProtoReflect.Descriptor instead. +func (*VolumeIncrementalCopyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{19} +} -func (m *CopyFileResponse) GetFileContent() []byte { - if m != nil { - return m.FileContent +func (x *VolumeIncrementalCopyResponse) GetFileContent() []byte { + if x != nil { + return x.FileContent } return nil } -type VolumeTailSenderRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"` - IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds" json:"idle_timeout_seconds,omitempty"` +type VolumeMountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeMountRequest) Reset() { + *x = VolumeMountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMountRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeTailSenderRequest) Reset() { *m = VolumeTailSenderRequest{} } -func (m *VolumeTailSenderRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeTailSenderRequest) ProtoMessage() {} -func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (*VolumeMountRequest) ProtoMessage() {} -func (m *VolumeTailSenderRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeMountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *VolumeTailSenderRequest) GetSinceNs() uint64 { - if m != nil { - return m.SinceNs +// Deprecated: Use VolumeMountRequest.ProtoReflect.Descriptor instead. +func (*VolumeMountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{20} +} + +func (x *VolumeMountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 { - if m != nil { - return m.IdleTimeoutSeconds +type VolumeMountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeMountResponse) Reset() { + *x = VolumeMountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -type VolumeTailSenderResponse struct { - NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"` - NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"` - IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk" json:"is_last_chunk,omitempty"` +func (x *VolumeMountResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeTailSenderResponse) Reset() { *m = VolumeTailSenderResponse{} } -func (m *VolumeTailSenderResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeTailSenderResponse) ProtoMessage() {} -func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +func (*VolumeMountResponse) ProtoMessage() {} -func (m *VolumeTailSenderResponse) GetNeedleHeader() []byte { - if m != nil { - return m.NeedleHeader +func (x *VolumeMountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMountResponse.ProtoReflect.Descriptor instead. +func (*VolumeMountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{21} +} + +type VolumeUnmountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } -func (m *VolumeTailSenderResponse) GetNeedleBody() []byte { - if m != nil { - return m.NeedleBody +func (x *VolumeUnmountRequest) Reset() { + *x = VolumeUnmountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeUnmountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeUnmountRequest) ProtoMessage() {} + +func (x *VolumeUnmountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeUnmountRequest.ProtoReflect.Descriptor instead. +func (*VolumeUnmountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{22} +} + +func (x *VolumeUnmountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeUnmountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeUnmountResponse) Reset() { + *x = VolumeUnmountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeUnmountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeUnmountResponse) ProtoMessage() {} + +func (x *VolumeUnmountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeUnmountResponse.ProtoReflect.Descriptor instead. +func (*VolumeUnmountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{23} +} + +type VolumeDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeDeleteRequest) Reset() { + *x = VolumeDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeDeleteRequest) ProtoMessage() {} + +func (x *VolumeDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeDeleteRequest.ProtoReflect.Descriptor instead. +func (*VolumeDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{24} +} + +func (x *VolumeDeleteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeDeleteResponse) Reset() { + *x = VolumeDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeDeleteResponse) ProtoMessage() {} + +func (x *VolumeDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeDeleteResponse.ProtoReflect.Descriptor instead. +func (*VolumeDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{25} +} + +type VolumeMarkReadonlyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeMarkReadonlyRequest) Reset() { + *x = VolumeMarkReadonlyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMarkReadonlyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMarkReadonlyRequest) ProtoMessage() {} + +func (x *VolumeMarkReadonlyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMarkReadonlyRequest.ProtoReflect.Descriptor instead. +func (*VolumeMarkReadonlyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{26} +} + +func (x *VolumeMarkReadonlyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeMarkReadonlyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeMarkReadonlyResponse) Reset() { + *x = VolumeMarkReadonlyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMarkReadonlyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMarkReadonlyResponse) ProtoMessage() {} + +func (x *VolumeMarkReadonlyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMarkReadonlyResponse.ProtoReflect.Descriptor instead. +func (*VolumeMarkReadonlyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{27} +} + +type VolumeMarkWritableRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeMarkWritableRequest) Reset() { + *x = VolumeMarkWritableRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMarkWritableRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMarkWritableRequest) ProtoMessage() {} + +func (x *VolumeMarkWritableRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMarkWritableRequest.ProtoReflect.Descriptor instead. +func (*VolumeMarkWritableRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{28} +} + +func (x *VolumeMarkWritableRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeMarkWritableResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeMarkWritableResponse) Reset() { + *x = VolumeMarkWritableResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeMarkWritableResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeMarkWritableResponse) ProtoMessage() {} + +func (x *VolumeMarkWritableResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeMarkWritableResponse.ProtoReflect.Descriptor instead. +func (*VolumeMarkWritableResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{29} +} + +type VolumeConfigureRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Replication string `protobuf:"bytes,2,opt,name=replication,proto3" json:"replication,omitempty"` +} + +func (x *VolumeConfigureRequest) Reset() { + *x = VolumeConfigureRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeConfigureRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeConfigureRequest) ProtoMessage() {} + +func (x *VolumeConfigureRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeConfigureRequest.ProtoReflect.Descriptor instead. +func (*VolumeConfigureRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{30} +} + +func (x *VolumeConfigureRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeConfigureRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +type VolumeConfigureResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *VolumeConfigureResponse) Reset() { + *x = VolumeConfigureResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeConfigureResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeConfigureResponse) ProtoMessage() {} + +func (x *VolumeConfigureResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeConfigureResponse.ProtoReflect.Descriptor instead. +func (*VolumeConfigureResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{31} +} + +func (x *VolumeConfigureResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type VolumeStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeStatusRequest) Reset() { + *x = VolumeStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeStatusRequest) ProtoMessage() {} + +func (x *VolumeStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeStatusRequest.ProtoReflect.Descriptor instead. +func (*VolumeStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{32} +} + +func (x *VolumeStatusRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type VolumeStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IsReadOnly bool `protobuf:"varint,1,opt,name=is_read_only,json=isReadOnly,proto3" json:"is_read_only,omitempty"` +} + +func (x *VolumeStatusResponse) Reset() { + *x = VolumeStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeStatusResponse) ProtoMessage() {} + +func (x *VolumeStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeStatusResponse.ProtoReflect.Descriptor instead. +func (*VolumeStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{33} +} + +func (x *VolumeStatusResponse) GetIsReadOnly() bool { + if x != nil { + return x.IsReadOnly + } + return false +} + +type VolumeCopyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` + Ttl string `protobuf:"bytes,4,opt,name=ttl,proto3" json:"ttl,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` + DiskType string `protobuf:"bytes,6,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *VolumeCopyRequest) Reset() { + *x = VolumeCopyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeCopyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeCopyRequest) ProtoMessage() {} + +func (x *VolumeCopyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeCopyRequest.ProtoReflect.Descriptor instead. +func (*VolumeCopyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{34} +} + +func (x *VolumeCopyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeCopyRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeCopyRequest) GetReplication() string { + if x != nil { + return x.Replication + } + return "" +} + +func (x *VolumeCopyRequest) GetTtl() string { + if x != nil { + return x.Ttl + } + return "" +} + +func (x *VolumeCopyRequest) GetSourceDataNode() string { + if x != nil { + return x.SourceDataNode + } + return "" +} + +func (x *VolumeCopyRequest) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + +type VolumeCopyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LastAppendAtNs uint64 `protobuf:"varint,1,opt,name=last_append_at_ns,json=lastAppendAtNs,proto3" json:"last_append_at_ns,omitempty"` +} + +func (x *VolumeCopyResponse) Reset() { + *x = VolumeCopyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeCopyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeCopyResponse) ProtoMessage() {} + +func (x *VolumeCopyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeCopyResponse.ProtoReflect.Descriptor instead. +func (*VolumeCopyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{35} +} + +func (x *VolumeCopyResponse) GetLastAppendAtNs() uint64 { + if x != nil { + return x.LastAppendAtNs + } + return 0 +} + +type CopyFileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Ext string `protobuf:"bytes,2,opt,name=ext,proto3" json:"ext,omitempty"` + CompactionRevision uint32 `protobuf:"varint,3,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` + StopOffset uint64 `protobuf:"varint,4,opt,name=stop_offset,json=stopOffset,proto3" json:"stop_offset,omitempty"` + Collection string `protobuf:"bytes,5,opt,name=collection,proto3" json:"collection,omitempty"` + IsEcVolume bool `protobuf:"varint,6,opt,name=is_ec_volume,json=isEcVolume,proto3" json:"is_ec_volume,omitempty"` + IgnoreSourceFileNotFound bool `protobuf:"varint,7,opt,name=ignore_source_file_not_found,json=ignoreSourceFileNotFound,proto3" json:"ignore_source_file_not_found,omitempty"` +} + +func (x *CopyFileRequest) Reset() { + *x = CopyFileRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyFileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyFileRequest) ProtoMessage() {} + +func (x *CopyFileRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyFileRequest.ProtoReflect.Descriptor instead. +func (*CopyFileRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{36} +} + +func (x *CopyFileRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *CopyFileRequest) GetExt() string { + if x != nil { + return x.Ext + } + return "" +} + +func (x *CopyFileRequest) GetCompactionRevision() uint32 { + if x != nil { + return x.CompactionRevision + } + return 0 +} + +func (x *CopyFileRequest) GetStopOffset() uint64 { + if x != nil { + return x.StopOffset + } + return 0 +} + +func (x *CopyFileRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *CopyFileRequest) GetIsEcVolume() bool { + if x != nil { + return x.IsEcVolume + } + return false +} + +func (x *CopyFileRequest) GetIgnoreSourceFileNotFound() bool { + if x != nil { + return x.IgnoreSourceFileNotFound + } + return false +} + +type CopyFileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileContent []byte `protobuf:"bytes,1,opt,name=file_content,json=fileContent,proto3" json:"file_content,omitempty"` +} + +func (x *CopyFileResponse) Reset() { + *x = CopyFileResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CopyFileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CopyFileResponse) ProtoMessage() {} + +func (x *CopyFileResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CopyFileResponse.ProtoReflect.Descriptor instead. +func (*CopyFileResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{37} +} + +func (x *CopyFileResponse) GetFileContent() []byte { + if x != nil { + return x.FileContent + } + return nil +} + +type ReadNeedleBlobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` // actual offset + Size int32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *ReadNeedleBlobRequest) Reset() { + *x = ReadNeedleBlobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadNeedleBlobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadNeedleBlobRequest) ProtoMessage() {} + +func (x *ReadNeedleBlobRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadNeedleBlobRequest.ProtoReflect.Descriptor instead. +func (*ReadNeedleBlobRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{38} +} + +func (x *ReadNeedleBlobRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *ReadNeedleBlobRequest) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId + } + return 0 +} + +func (x *ReadNeedleBlobRequest) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *ReadNeedleBlobRequest) GetSize() int32 { + if x != nil { + return x.Size + } + return 0 +} + +type ReadNeedleBlobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NeedleBlob []byte `protobuf:"bytes,1,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` +} + +func (x *ReadNeedleBlobResponse) Reset() { + *x = ReadNeedleBlobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadNeedleBlobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadNeedleBlobResponse) ProtoMessage() {} + +func (x *ReadNeedleBlobResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadNeedleBlobResponse.ProtoReflect.Descriptor instead. +func (*ReadNeedleBlobResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{39} +} + +func (x *ReadNeedleBlobResponse) GetNeedleBlob() []byte { + if x != nil { + return x.NeedleBlob + } + return nil +} + +type WriteNeedleBlobRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Size int32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + NeedleBlob []byte `protobuf:"bytes,4,opt,name=needle_blob,json=needleBlob,proto3" json:"needle_blob,omitempty"` +} + +func (x *WriteNeedleBlobRequest) Reset() { + *x = WriteNeedleBlobRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteNeedleBlobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteNeedleBlobRequest) ProtoMessage() {} + +func (x *WriteNeedleBlobRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteNeedleBlobRequest.ProtoReflect.Descriptor instead. +func (*WriteNeedleBlobRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{40} +} + +func (x *WriteNeedleBlobRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *WriteNeedleBlobRequest) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId + } + return 0 +} + +func (x *WriteNeedleBlobRequest) GetSize() int32 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *WriteNeedleBlobRequest) GetNeedleBlob() []byte { + if x != nil { + return x.NeedleBlob + } + return nil +} + +type WriteNeedleBlobResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *WriteNeedleBlobResponse) Reset() { + *x = WriteNeedleBlobResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WriteNeedleBlobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WriteNeedleBlobResponse) ProtoMessage() {} + +func (x *WriteNeedleBlobResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WriteNeedleBlobResponse.ProtoReflect.Descriptor instead. +func (*WriteNeedleBlobResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{41} +} + +type VolumeTailSenderRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` +} + +func (x *VolumeTailSenderRequest) Reset() { + *x = VolumeTailSenderRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailSenderRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailSenderRequest) ProtoMessage() {} + +func (x *VolumeTailSenderRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailSenderRequest.ProtoReflect.Descriptor instead. +func (*VolumeTailSenderRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{42} +} + +func (x *VolumeTailSenderRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeTailSenderRequest) GetSinceNs() uint64 { + if x != nil { + return x.SinceNs + } + return 0 +} + +func (x *VolumeTailSenderRequest) GetIdleTimeoutSeconds() uint32 { + if x != nil { + return x.IdleTimeoutSeconds + } + return 0 +} + +type VolumeTailSenderResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NeedleHeader []byte `protobuf:"bytes,1,opt,name=needle_header,json=needleHeader,proto3" json:"needle_header,omitempty"` + NeedleBody []byte `protobuf:"bytes,2,opt,name=needle_body,json=needleBody,proto3" json:"needle_body,omitempty"` + IsLastChunk bool `protobuf:"varint,3,opt,name=is_last_chunk,json=isLastChunk,proto3" json:"is_last_chunk,omitempty"` +} + +func (x *VolumeTailSenderResponse) Reset() { + *x = VolumeTailSenderResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailSenderResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailSenderResponse) ProtoMessage() {} + +func (x *VolumeTailSenderResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailSenderResponse.ProtoReflect.Descriptor instead. +func (*VolumeTailSenderResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{43} +} + +func (x *VolumeTailSenderResponse) GetNeedleHeader() []byte { + if x != nil { + return x.NeedleHeader + } + return nil +} + +func (x *VolumeTailSenderResponse) GetNeedleBody() []byte { + if x != nil { + return x.NeedleBody + } + return nil +} + +func (x *VolumeTailSenderResponse) GetIsLastChunk() bool { + if x != nil { + return x.IsLastChunk + } + return false +} + +type VolumeTailReceiverRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs,proto3" json:"since_ns,omitempty"` + IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds,proto3" json:"idle_timeout_seconds,omitempty"` + SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer,proto3" json:"source_volume_server,omitempty"` +} + +func (x *VolumeTailReceiverRequest) Reset() { + *x = VolumeTailReceiverRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailReceiverRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailReceiverRequest) ProtoMessage() {} + +func (x *VolumeTailReceiverRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailReceiverRequest.ProtoReflect.Descriptor instead. +func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{44} +} + +func (x *VolumeTailReceiverRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeTailReceiverRequest) GetSinceNs() uint64 { + if x != nil { + return x.SinceNs + } + return 0 +} + +func (x *VolumeTailReceiverRequest) GetIdleTimeoutSeconds() uint32 { + if x != nil { + return x.IdleTimeoutSeconds + } + return 0 +} + +func (x *VolumeTailReceiverRequest) GetSourceVolumeServer() string { + if x != nil { + return x.SourceVolumeServer + } + return "" +} + +type VolumeTailReceiverResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeTailReceiverResponse) Reset() { + *x = VolumeTailReceiverResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeTailReceiverResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeTailReceiverResponse) ProtoMessage() {} + +func (x *VolumeTailReceiverResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeTailReceiverResponse.ProtoReflect.Descriptor instead. +func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{45} +} + +type VolumeEcShardsGenerateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *VolumeEcShardsGenerateRequest) Reset() { + *x = VolumeEcShardsGenerateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsGenerateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} + +func (x *VolumeEcShardsGenerateRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsGenerateRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{46} +} + +func (x *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsGenerateRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +type VolumeEcShardsGenerateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsGenerateResponse) Reset() { + *x = VolumeEcShardsGenerateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsGenerateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} + +func (x *VolumeEcShardsGenerateResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsGenerateResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{47} +} + +type VolumeEcShardsRebuildRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *VolumeEcShardsRebuildRequest) Reset() { + *x = VolumeEcShardsRebuildRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsRebuildRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} + +func (x *VolumeEcShardsRebuildRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsRebuildRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{48} +} + +func (x *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsRebuildRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +type VolumeEcShardsRebuildResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds,proto3" json:"rebuilt_shard_ids,omitempty"` +} + +func (x *VolumeEcShardsRebuildResponse) Reset() { + *x = VolumeEcShardsRebuildResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsRebuildResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} + +func (x *VolumeEcShardsRebuildResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsRebuildResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{49} +} + +func (x *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { + if x != nil { + return x.RebuiltShardIds + } + return nil +} + +type VolumeEcShardsCopyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` + CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile,proto3" json:"copy_ecx_file,omitempty"` + SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode,proto3" json:"source_data_node,omitempty"` + CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile,proto3" json:"copy_ecj_file,omitempty"` + CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile,proto3" json:"copy_vif_file,omitempty"` +} + +func (x *VolumeEcShardsCopyRequest) Reset() { + *x = VolumeEcShardsCopyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsCopyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsCopyRequest) ProtoMessage() {} + +func (x *VolumeEcShardsCopyRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsCopyRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{50} +} + +func (x *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsCopyRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeEcShardsCopyRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds + } + return nil +} + +func (x *VolumeEcShardsCopyRequest) GetCopyEcxFile() bool { + if x != nil { + return x.CopyEcxFile + } + return false +} + +func (x *VolumeEcShardsCopyRequest) GetSourceDataNode() string { + if x != nil { + return x.SourceDataNode + } + return "" +} + +func (x *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool { + if x != nil { + return x.CopyEcjFile + } + return false +} + +func (x *VolumeEcShardsCopyRequest) GetCopyVifFile() bool { + if x != nil { + return x.CopyVifFile + } + return false +} + +type VolumeEcShardsCopyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsCopyResponse) Reset() { + *x = VolumeEcShardsCopyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsCopyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsCopyResponse) ProtoMessage() {} + +func (x *VolumeEcShardsCopyResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsCopyResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{51} +} + +type VolumeEcShardsDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` +} + +func (x *VolumeEcShardsDeleteRequest) Reset() { + *x = VolumeEcShardsDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} + +func (x *VolumeEcShardsDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsDeleteRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{52} +} + +func (x *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsDeleteRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds + } + return nil +} + +type VolumeEcShardsDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsDeleteResponse) Reset() { + *x = VolumeEcShardsDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} + +func (x *VolumeEcShardsDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsDeleteResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{53} +} + +type VolumeEcShardsMountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` +} + +func (x *VolumeEcShardsMountRequest) Reset() { + *x = VolumeEcShardsMountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsMountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsMountRequest) ProtoMessage() {} + +func (x *VolumeEcShardsMountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsMountRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{54} +} + +func (x *VolumeEcShardsMountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsMountRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeEcShardsMountRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds + } + return nil +} + +type VolumeEcShardsMountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsMountResponse) Reset() { + *x = VolumeEcShardsMountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsMountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsMountResponse) ProtoMessage() {} + +func (x *VolumeEcShardsMountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsMountResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{55} +} + +type VolumeEcShardsUnmountRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` +} + +func (x *VolumeEcShardsUnmountRequest) Reset() { + *x = VolumeEcShardsUnmountRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsUnmountRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} + +func (x *VolumeEcShardsUnmountRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsUnmountRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{56} +} + +func (x *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 { + if x != nil { + return x.ShardIds + } + return nil +} + +type VolumeEcShardsUnmountResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsUnmountResponse) Reset() { + *x = VolumeEcShardsUnmountResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsUnmountResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} + +func (x *VolumeEcShardsUnmountResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsUnmountResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{57} +} + +type VolumeEcShardReadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Offset int64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` + Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` +} + +func (x *VolumeEcShardReadRequest) Reset() { + *x = VolumeEcShardReadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardReadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardReadRequest) ProtoMessage() {} + +func (x *VolumeEcShardReadRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardReadRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{58} +} + +func (x *VolumeEcShardReadRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardReadRequest) GetShardId() uint32 { + if x != nil { + return x.ShardId + } + return 0 +} + +func (x *VolumeEcShardReadRequest) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *VolumeEcShardReadRequest) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *VolumeEcShardReadRequest) GetFileKey() uint64 { + if x != nil { + return x.FileKey + } + return 0 +} + +type VolumeEcShardReadResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted,proto3" json:"is_deleted,omitempty"` +} + +func (x *VolumeEcShardReadResponse) Reset() { + *x = VolumeEcShardReadResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardReadResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardReadResponse) ProtoMessage() {} + +func (x *VolumeEcShardReadResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardReadResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{59} +} + +func (x *VolumeEcShardReadResponse) GetData() []byte { + if x != nil { + return x.Data } return nil } -func (m *VolumeTailSenderResponse) GetIsLastChunk() bool { - if m != nil { - return m.IsLastChunk +func (x *VolumeEcShardReadResponse) GetIsDeleted() bool { + if x != nil { + return x.IsDeleted + } + return false +} + +type VolumeEcBlobDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Version uint32 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *VolumeEcBlobDeleteRequest) Reset() { + *x = VolumeEcBlobDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcBlobDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} + +func (x *VolumeEcBlobDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcBlobDeleteRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{60} +} + +func (x *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcBlobDeleteRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *VolumeEcBlobDeleteRequest) GetFileKey() uint64 { + if x != nil { + return x.FileKey + } + return 0 +} + +func (x *VolumeEcBlobDeleteRequest) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +type VolumeEcBlobDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcBlobDeleteResponse) Reset() { + *x = VolumeEcBlobDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcBlobDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} + +func (x *VolumeEcBlobDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcBlobDeleteResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{61} +} + +type VolumeEcShardsToVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` +} + +func (x *VolumeEcShardsToVolumeRequest) Reset() { + *x = VolumeEcShardsToVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsToVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} + +func (x *VolumeEcShardsToVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsToVolumeRequest.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{62} +} + +func (x *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *VolumeEcShardsToVolumeRequest) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +type VolumeEcShardsToVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeEcShardsToVolumeResponse) Reset() { + *x = VolumeEcShardsToVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VolumeEcShardsToVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} + +func (x *VolumeEcShardsToVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeEcShardsToVolumeResponse.ProtoReflect.Descriptor instead. +func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{63} +} + +type ReadVolumeFileStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *ReadVolumeFileStatusRequest) Reset() { + *x = ReadVolumeFileStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadVolumeFileStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadVolumeFileStatusRequest) ProtoMessage() {} + +func (x *ReadVolumeFileStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVolumeFileStatusRequest.ProtoReflect.Descriptor instead. +func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{64} +} + +func (x *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +type ReadVolumeFileStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds,proto3" json:"idx_file_timestamp_seconds,omitempty"` + IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize,proto3" json:"idx_file_size,omitempty"` + DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds,proto3" json:"dat_file_timestamp_seconds,omitempty"` + DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize,proto3" json:"dat_file_size,omitempty"` + FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount,proto3" json:"file_count,omitempty"` + CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision,proto3" json:"compaction_revision,omitempty"` + Collection string `protobuf:"bytes,8,opt,name=collection,proto3" json:"collection,omitempty"` + DiskType string `protobuf:"bytes,9,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *ReadVolumeFileStatusResponse) Reset() { + *x = ReadVolumeFileStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadVolumeFileStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadVolumeFileStatusResponse) ProtoMessage() {} + +func (x *ReadVolumeFileStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVolumeFileStatusResponse.ProtoReflect.Descriptor instead. +func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{65} +} + +func (x *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetIdxFileTimestampSeconds() uint64 { + if x != nil { + return x.IdxFileTimestampSeconds + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 { + if x != nil { + return x.IdxFileSize + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetDatFileTimestampSeconds() uint64 { + if x != nil { + return x.DatFileTimestampSeconds + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 { + if x != nil { + return x.DatFileSize + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetFileCount() uint64 { + if x != nil { + return x.FileCount + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetCompactionRevision() uint32 { + if x != nil { + return x.CompactionRevision + } + return 0 +} + +func (x *ReadVolumeFileStatusResponse) GetCollection() string { + if x != nil { + return x.Collection + } + return "" +} + +func (x *ReadVolumeFileStatusResponse) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + +type DiskStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dir string `protobuf:"bytes,1,opt,name=dir,proto3" json:"dir,omitempty"` + All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` + Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` + PercentFree float32 `protobuf:"fixed32,5,opt,name=percent_free,json=percentFree,proto3" json:"percent_free,omitempty"` + PercentUsed float32 `protobuf:"fixed32,6,opt,name=percent_used,json=percentUsed,proto3" json:"percent_used,omitempty"` + DiskType string `protobuf:"bytes,7,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"` +} + +func (x *DiskStatus) Reset() { + *x = DiskStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DiskStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiskStatus) ProtoMessage() {} + +func (x *DiskStatus) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DiskStatus.ProtoReflect.Descriptor instead. +func (*DiskStatus) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{66} +} + +func (x *DiskStatus) GetDir() string { + if x != nil { + return x.Dir + } + return "" +} + +func (x *DiskStatus) GetAll() uint64 { + if x != nil { + return x.All + } + return 0 +} + +func (x *DiskStatus) GetUsed() uint64 { + if x != nil { + return x.Used + } + return 0 +} + +func (x *DiskStatus) GetFree() uint64 { + if x != nil { + return x.Free + } + return 0 +} + +func (x *DiskStatus) GetPercentFree() float32 { + if x != nil { + return x.PercentFree + } + return 0 +} + +func (x *DiskStatus) GetPercentUsed() float32 { + if x != nil { + return x.PercentUsed + } + return 0 +} + +func (x *DiskStatus) GetDiskType() string { + if x != nil { + return x.DiskType + } + return "" +} + +type MemStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Goroutines int32 `protobuf:"varint,1,opt,name=goroutines,proto3" json:"goroutines,omitempty"` + All uint64 `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` + Used uint64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + Free uint64 `protobuf:"varint,4,opt,name=free,proto3" json:"free,omitempty"` + Self uint64 `protobuf:"varint,5,opt,name=self,proto3" json:"self,omitempty"` + Heap uint64 `protobuf:"varint,6,opt,name=heap,proto3" json:"heap,omitempty"` + Stack uint64 `protobuf:"varint,7,opt,name=stack,proto3" json:"stack,omitempty"` +} + +func (x *MemStatus) Reset() { + *x = MemStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemStatus) ProtoMessage() {} + +func (x *MemStatus) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemStatus.ProtoReflect.Descriptor instead. +func (*MemStatus) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{67} +} + +func (x *MemStatus) GetGoroutines() int32 { + if x != nil { + return x.Goroutines + } + return 0 +} + +func (x *MemStatus) GetAll() uint64 { + if x != nil { + return x.All + } + return 0 +} + +func (x *MemStatus) GetUsed() uint64 { + if x != nil { + return x.Used + } + return 0 +} + +func (x *MemStatus) GetFree() uint64 { + if x != nil { + return x.Free + } + return 0 +} + +func (x *MemStatus) GetSelf() uint64 { + if x != nil { + return x.Self + } + return 0 +} + +func (x *MemStatus) GetHeap() uint64 { + if x != nil { + return x.Heap + } + return 0 +} + +func (x *MemStatus) GetStack() uint64 { + if x != nil { + return x.Stack + } + return 0 +} + +// tired storage on volume servers +type RemoteFile struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType,proto3" json:"backend_type,omitempty"` + BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId,proto3" json:"backend_id,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Offset uint64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"` + ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime,proto3" json:"modified_time,omitempty"` + Extension string `protobuf:"bytes,7,opt,name=extension,proto3" json:"extension,omitempty"` +} + +func (x *RemoteFile) Reset() { + *x = RemoteFile{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemoteFile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoteFile) ProtoMessage() {} + +func (x *RemoteFile) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoteFile.ProtoReflect.Descriptor instead. +func (*RemoteFile) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{68} +} + +func (x *RemoteFile) GetBackendType() string { + if x != nil { + return x.BackendType + } + return "" +} + +func (x *RemoteFile) GetBackendId() string { + if x != nil { + return x.BackendId } - return false + return "" } -type VolumeTailReceiverRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - SinceNs uint64 `protobuf:"varint,2,opt,name=since_ns,json=sinceNs" json:"since_ns,omitempty"` - IdleTimeoutSeconds uint32 `protobuf:"varint,3,opt,name=idle_timeout_seconds,json=idleTimeoutSeconds" json:"idle_timeout_seconds,omitempty"` - SourceVolumeServer string `protobuf:"bytes,4,opt,name=source_volume_server,json=sourceVolumeServer" json:"source_volume_server,omitempty"` +func (x *RemoteFile) GetKey() string { + if x != nil { + return x.Key + } + return "" } -func (m *VolumeTailReceiverRequest) Reset() { *m = VolumeTailReceiverRequest{} } -func (m *VolumeTailReceiverRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeTailReceiverRequest) ProtoMessage() {} -func (*VolumeTailReceiverRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } - -func (m *VolumeTailReceiverRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *RemoteFile) GetOffset() uint64 { + if x != nil { + return x.Offset } return 0 } -func (m *VolumeTailReceiverRequest) GetSinceNs() uint64 { - if m != nil { - return m.SinceNs +func (x *RemoteFile) GetFileSize() uint64 { + if x != nil { + return x.FileSize } return 0 } -func (m *VolumeTailReceiverRequest) GetIdleTimeoutSeconds() uint32 { - if m != nil { - return m.IdleTimeoutSeconds +func (x *RemoteFile) GetModifiedTime() uint64 { + if x != nil { + return x.ModifiedTime } return 0 } -func (m *VolumeTailReceiverRequest) GetSourceVolumeServer() string { - if m != nil { - return m.SourceVolumeServer +func (x *RemoteFile) GetExtension() string { + if x != nil { + return x.Extension } return "" } -type VolumeTailReceiverResponse struct { -} - -func (m *VolumeTailReceiverResponse) Reset() { *m = VolumeTailReceiverResponse{} } -func (m *VolumeTailReceiverResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeTailReceiverResponse) ProtoMessage() {} -func (*VolumeTailReceiverResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +type VolumeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type VolumeEcShardsGenerateRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` + Files []*RemoteFile `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` + Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + Replication string `protobuf:"bytes,3,opt,name=replication,proto3" json:"replication,omitempty"` } -func (m *VolumeEcShardsGenerateRequest) Reset() { *m = VolumeEcShardsGenerateRequest{} } -func (m *VolumeEcShardsGenerateRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsGenerateRequest) ProtoMessage() {} -func (*VolumeEcShardsGenerateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } - -func (m *VolumeEcShardsGenerateRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeInfo) Reset() { + *x = VolumeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *VolumeEcShardsGenerateRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +func (x *VolumeInfo) String() string { + return protoimpl.X.MessageStringOf(x) } -type VolumeEcShardsGenerateResponse struct { -} +func (*VolumeInfo) ProtoMessage() {} -func (m *VolumeEcShardsGenerateResponse) Reset() { *m = VolumeEcShardsGenerateResponse{} } -func (m *VolumeEcShardsGenerateResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsGenerateResponse) ProtoMessage() {} -func (*VolumeEcShardsGenerateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (x *VolumeInfo) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -type VolumeEcShardsRebuildRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` +// Deprecated: Use VolumeInfo.ProtoReflect.Descriptor instead. +func (*VolumeInfo) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{69} } -func (m *VolumeEcShardsRebuildRequest) Reset() { *m = VolumeEcShardsRebuildRequest{} } -func (m *VolumeEcShardsRebuildRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsRebuildRequest) ProtoMessage() {} -func (*VolumeEcShardsRebuildRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } +func (x *VolumeInfo) GetFiles() []*RemoteFile { + if x != nil { + return x.Files + } + return nil +} -func (m *VolumeEcShardsRebuildRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeInfo) GetVersion() uint32 { + if x != nil { + return x.Version } return 0 } -func (m *VolumeEcShardsRebuildRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeInfo) GetReplication() string { + if x != nil { + return x.Replication } return "" } -type VolumeEcShardsRebuildResponse struct { - RebuiltShardIds []uint32 `protobuf:"varint,1,rep,packed,name=rebuilt_shard_ids,json=rebuiltShardIds" json:"rebuilt_shard_ids,omitempty"` -} +type VolumeTierMoveDatToRemoteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *VolumeEcShardsRebuildResponse) Reset() { *m = VolumeEcShardsRebuildResponse{} } -func (m *VolumeEcShardsRebuildResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsRebuildResponse) ProtoMessage() {} -func (*VolumeEcShardsRebuildResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName,proto3" json:"destination_backend_name,omitempty"` + KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile,proto3" json:"keep_local_dat_file,omitempty"` +} -func (m *VolumeEcShardsRebuildResponse) GetRebuiltShardIds() []uint32 { - if m != nil { - return m.RebuiltShardIds +func (x *VolumeTierMoveDatToRemoteRequest) Reset() { + *x = VolumeTierMoveDatToRemoteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type VolumeEcShardsCopyRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` - CopyEcxFile bool `protobuf:"varint,4,opt,name=copy_ecx_file,json=copyEcxFile" json:"copy_ecx_file,omitempty"` - SourceDataNode string `protobuf:"bytes,5,opt,name=source_data_node,json=sourceDataNode" json:"source_data_node,omitempty"` - CopyEcjFile bool `protobuf:"varint,6,opt,name=copy_ecj_file,json=copyEcjFile" json:"copy_ecj_file,omitempty"` - CopyVifFile bool `protobuf:"varint,7,opt,name=copy_vif_file,json=copyVifFile" json:"copy_vif_file,omitempty"` +func (x *VolumeTierMoveDatToRemoteRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardsCopyRequest) Reset() { *m = VolumeEcShardsCopyRequest{} } -func (m *VolumeEcShardsCopyRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsCopyRequest) ProtoMessage() {} -func (*VolumeEcShardsCopyRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } +func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} -func (m *VolumeEcShardsCopyRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeTierMoveDatToRemoteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *VolumeEcShardsCopyRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +// Deprecated: Use VolumeTierMoveDatToRemoteRequest.ProtoReflect.Descriptor instead. +func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{70} } -func (m *VolumeEcShardsCopyRequest) GetShardIds() []uint32 { - if m != nil { - return m.ShardIds +func (x *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } - return nil + return 0 } -func (m *VolumeEcShardsCopyRequest) GetCopyEcxFile() bool { - if m != nil { - return m.CopyEcxFile +func (x *VolumeTierMoveDatToRemoteRequest) GetCollection() string { + if x != nil { + return x.Collection } - return false + return "" } -func (m *VolumeEcShardsCopyRequest) GetSourceDataNode() string { - if m != nil { - return m.SourceDataNode +func (x *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string { + if x != nil { + return x.DestinationBackendName } return "" } -func (m *VolumeEcShardsCopyRequest) GetCopyEcjFile() bool { - if m != nil { - return m.CopyEcjFile +func (x *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool { + if x != nil { + return x.KeepLocalDatFile } return false } -func (m *VolumeEcShardsCopyRequest) GetCopyVifFile() bool { - if m != nil { - return m.CopyVifFile +type VolumeTierMoveDatToRemoteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` +} + +func (x *VolumeTierMoveDatToRemoteResponse) Reset() { + *x = VolumeTierMoveDatToRemoteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -type VolumeEcShardsCopyResponse struct { +func (x *VolumeTierMoveDatToRemoteResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardsCopyResponse) Reset() { *m = VolumeEcShardsCopyResponse{} } -func (m *VolumeEcShardsCopyResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsCopyResponse) ProtoMessage() {} -func (*VolumeEcShardsCopyResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } +func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} -type VolumeEcShardsDeleteRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` +func (x *VolumeTierMoveDatToRemoteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeEcShardsDeleteRequest) Reset() { *m = VolumeEcShardsDeleteRequest{} } -func (m *VolumeEcShardsDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsDeleteRequest) ProtoMessage() {} -func (*VolumeEcShardsDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } +// Deprecated: Use VolumeTierMoveDatToRemoteResponse.ProtoReflect.Descriptor instead. +func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{71} +} -func (m *VolumeEcShardsDeleteRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { + if x != nil { + return x.Processed } return 0 } -func (m *VolumeEcShardsDeleteRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 { + if x != nil { + return x.ProcessedPercentage } - return "" + return 0 +} + +type VolumeTierMoveDatFromRemoteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + Collection string `protobuf:"bytes,2,opt,name=collection,proto3" json:"collection,omitempty"` + KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile,proto3" json:"keep_remote_dat_file,omitempty"` } -func (m *VolumeEcShardsDeleteRequest) GetShardIds() []uint32 { - if m != nil { - return m.ShardIds +func (x *VolumeTierMoveDatFromRemoteRequest) Reset() { + *x = VolumeTierMoveDatFromRemoteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type VolumeEcShardsDeleteResponse struct { +func (x *VolumeTierMoveDatFromRemoteRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardsDeleteResponse) Reset() { *m = VolumeEcShardsDeleteResponse{} } -func (m *VolumeEcShardsDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsDeleteResponse) ProtoMessage() {} -func (*VolumeEcShardsDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } +func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} -type VolumeEcShardsMountRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` +func (x *VolumeTierMoveDatFromRemoteRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeEcShardsMountRequest) Reset() { *m = VolumeEcShardsMountRequest{} } -func (m *VolumeEcShardsMountRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsMountRequest) ProtoMessage() {} -func (*VolumeEcShardsMountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +// Deprecated: Use VolumeTierMoveDatFromRemoteRequest.ProtoReflect.Descriptor instead. +func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{72} +} -func (m *VolumeEcShardsMountRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *VolumeEcShardsMountRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeTierMoveDatFromRemoteRequest) GetCollection() string { + if x != nil { + return x.Collection } return "" } -func (m *VolumeEcShardsMountRequest) GetShardIds() []uint32 { - if m != nil { - return m.ShardIds +func (x *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool { + if x != nil { + return x.KeepRemoteDatFile } - return nil -} - -type VolumeEcShardsMountResponse struct { -} - -func (m *VolumeEcShardsMountResponse) Reset() { *m = VolumeEcShardsMountResponse{} } -func (m *VolumeEcShardsMountResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsMountResponse) ProtoMessage() {} -func (*VolumeEcShardsMountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } - -type VolumeEcShardsUnmountRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - ShardIds []uint32 `protobuf:"varint,3,rep,packed,name=shard_ids,json=shardIds" json:"shard_ids,omitempty"` + return false } -func (m *VolumeEcShardsUnmountRequest) Reset() { *m = VolumeEcShardsUnmountRequest{} } -func (m *VolumeEcShardsUnmountRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsUnmountRequest) ProtoMessage() {} -func (*VolumeEcShardsUnmountRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } +type VolumeTierMoveDatFromRemoteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *VolumeEcShardsUnmountRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId - } - return 0 + Processed int64 `protobuf:"varint,1,opt,name=processed,proto3" json:"processed,omitempty"` + ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage,proto3" json:"processedPercentage,omitempty"` } -func (m *VolumeEcShardsUnmountRequest) GetShardIds() []uint32 { - if m != nil { - return m.ShardIds +func (x *VolumeTierMoveDatFromRemoteResponse) Reset() { + *x = VolumeTierMoveDatFromRemoteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type VolumeEcShardsUnmountResponse struct { +func (x *VolumeTierMoveDatFromRemoteResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardsUnmountResponse) Reset() { *m = VolumeEcShardsUnmountResponse{} } -func (m *VolumeEcShardsUnmountResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsUnmountResponse) ProtoMessage() {} -func (*VolumeEcShardsUnmountResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} -type VolumeEcShardReadRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - ShardId uint32 `protobuf:"varint,2,opt,name=shard_id,json=shardId" json:"shard_id,omitempty"` - Offset int64 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"` - Size int64 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` - FileKey uint64 `protobuf:"varint,5,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` +func (x *VolumeTierMoveDatFromRemoteResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeEcShardReadRequest) Reset() { *m = VolumeEcShardReadRequest{} } -func (m *VolumeEcShardReadRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardReadRequest) ProtoMessage() {} -func (*VolumeEcShardReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +// Deprecated: Use VolumeTierMoveDatFromRemoteResponse.ProtoReflect.Descriptor instead. +func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{73} +} -func (m *VolumeEcShardReadRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { + if x != nil { + return x.Processed } return 0 } -func (m *VolumeEcShardReadRequest) GetShardId() uint32 { - if m != nil { - return m.ShardId +func (x *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 { + if x != nil { + return x.ProcessedPercentage } return 0 } -func (m *VolumeEcShardReadRequest) GetOffset() int64 { - if m != nil { - return m.Offset - } - return 0 +type VolumeServerStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VolumeEcShardReadRequest) GetSize() int64 { - if m != nil { - return m.Size +func (x *VolumeServerStatusRequest) Reset() { + *x = VolumeServerStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *VolumeEcShardReadRequest) GetFileKey() uint64 { - if m != nil { - return m.FileKey +func (x *VolumeServerStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeServerStatusRequest) ProtoMessage() {} + +func (x *VolumeServerStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -type VolumeEcShardReadResponse struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - IsDeleted bool `protobuf:"varint,2,opt,name=is_deleted,json=isDeleted" json:"is_deleted,omitempty"` +// Deprecated: Use VolumeServerStatusRequest.ProtoReflect.Descriptor instead. +func (*VolumeServerStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{74} } -func (m *VolumeEcShardReadResponse) Reset() { *m = VolumeEcShardReadResponse{} } -func (m *VolumeEcShardReadResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardReadResponse) ProtoMessage() {} -func (*VolumeEcShardReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } +type VolumeServerStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *VolumeEcShardReadResponse) GetData() []byte { - if m != nil { - return m.Data - } - return nil + DiskStatuses []*DiskStatus `protobuf:"bytes,1,rep,name=disk_statuses,json=diskStatuses,proto3" json:"disk_statuses,omitempty"` + MemoryStatus *MemStatus `protobuf:"bytes,2,opt,name=memory_status,json=memoryStatus,proto3" json:"memory_status,omitempty"` } -func (m *VolumeEcShardReadResponse) GetIsDeleted() bool { - if m != nil { - return m.IsDeleted +func (x *VolumeServerStatusResponse) Reset() { + *x = VolumeServerStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -type VolumeEcBlobDeleteRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - FileKey uint64 `protobuf:"varint,3,opt,name=file_key,json=fileKey" json:"file_key,omitempty"` - Version uint32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` +func (x *VolumeServerStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcBlobDeleteRequest) Reset() { *m = VolumeEcBlobDeleteRequest{} } -func (m *VolumeEcBlobDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcBlobDeleteRequest) ProtoMessage() {} -func (*VolumeEcBlobDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } +func (*VolumeServerStatusResponse) ProtoMessage() {} -func (m *VolumeEcBlobDeleteRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeServerStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *VolumeEcBlobDeleteRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +// Deprecated: Use VolumeServerStatusResponse.ProtoReflect.Descriptor instead. +func (*VolumeServerStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{75} } -func (m *VolumeEcBlobDeleteRequest) GetFileKey() uint64 { - if m != nil { - return m.FileKey +func (x *VolumeServerStatusResponse) GetDiskStatuses() []*DiskStatus { + if x != nil { + return x.DiskStatuses } - return 0 + return nil } -func (m *VolumeEcBlobDeleteRequest) GetVersion() uint32 { - if m != nil { - return m.Version +func (x *VolumeServerStatusResponse) GetMemoryStatus() *MemStatus { + if x != nil { + return x.MemoryStatus } - return 0 + return nil } -type VolumeEcBlobDeleteResponse struct { +type VolumeServerLeaveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VolumeEcBlobDeleteResponse) Reset() { *m = VolumeEcBlobDeleteResponse{} } -func (m *VolumeEcBlobDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcBlobDeleteResponse) ProtoMessage() {} -func (*VolumeEcBlobDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (x *VolumeServerLeaveRequest) Reset() { + *x = VolumeServerLeaveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type VolumeEcShardsToVolumeRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` +func (x *VolumeServerLeaveRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeEcShardsToVolumeRequest) Reset() { *m = VolumeEcShardsToVolumeRequest{} } -func (m *VolumeEcShardsToVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsToVolumeRequest) ProtoMessage() {} -func (*VolumeEcShardsToVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } +func (*VolumeServerLeaveRequest) ProtoMessage() {} -func (m *VolumeEcShardsToVolumeRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeServerLeaveRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *VolumeEcShardsToVolumeRequest) GetCollection() string { - if m != nil { - return m.Collection - } - return "" +// Deprecated: Use VolumeServerLeaveRequest.ProtoReflect.Descriptor instead. +func (*VolumeServerLeaveRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{76} } -type VolumeEcShardsToVolumeResponse struct { +type VolumeServerLeaveResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *VolumeEcShardsToVolumeResponse) Reset() { *m = VolumeEcShardsToVolumeResponse{} } -func (m *VolumeEcShardsToVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeEcShardsToVolumeResponse) ProtoMessage() {} -func (*VolumeEcShardsToVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +func (x *VolumeServerLeaveResponse) Reset() { + *x = VolumeServerLeaveResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type ReadVolumeFileStatusRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` +func (x *VolumeServerLeaveResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ReadVolumeFileStatusRequest) Reset() { *m = ReadVolumeFileStatusRequest{} } -func (m *ReadVolumeFileStatusRequest) String() string { return proto.CompactTextString(m) } -func (*ReadVolumeFileStatusRequest) ProtoMessage() {} -func (*ReadVolumeFileStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } +func (*VolumeServerLeaveResponse) ProtoMessage() {} -func (m *ReadVolumeFileStatusRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeServerLeaveResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[77] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -type ReadVolumeFileStatusResponse struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - IdxFileTimestampSeconds uint64 `protobuf:"varint,2,opt,name=idx_file_timestamp_seconds,json=idxFileTimestampSeconds" json:"idx_file_timestamp_seconds,omitempty"` - IdxFileSize uint64 `protobuf:"varint,3,opt,name=idx_file_size,json=idxFileSize" json:"idx_file_size,omitempty"` - DatFileTimestampSeconds uint64 `protobuf:"varint,4,opt,name=dat_file_timestamp_seconds,json=datFileTimestampSeconds" json:"dat_file_timestamp_seconds,omitempty"` - DatFileSize uint64 `protobuf:"varint,5,opt,name=dat_file_size,json=datFileSize" json:"dat_file_size,omitempty"` - FileCount uint64 `protobuf:"varint,6,opt,name=file_count,json=fileCount" json:"file_count,omitempty"` - CompactionRevision uint32 `protobuf:"varint,7,opt,name=compaction_revision,json=compactionRevision" json:"compaction_revision,omitempty"` - Collection string `protobuf:"bytes,8,opt,name=collection" json:"collection,omitempty"` +// Deprecated: Use VolumeServerLeaveResponse.ProtoReflect.Descriptor instead. +func (*VolumeServerLeaveResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{77} } -func (m *ReadVolumeFileStatusResponse) Reset() { *m = ReadVolumeFileStatusResponse{} } -func (m *ReadVolumeFileStatusResponse) String() string { return proto.CompactTextString(m) } -func (*ReadVolumeFileStatusResponse) ProtoMessage() {} -func (*ReadVolumeFileStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } +// select on volume servers +type QueryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ReadVolumeFileStatusResponse) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId - } - return 0 + Selections []string `protobuf:"bytes,1,rep,name=selections,proto3" json:"selections,omitempty"` + FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds,proto3" json:"from_file_ids,omitempty"` + Filter *QueryRequest_Filter `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` + InputSerialization *QueryRequest_InputSerialization `protobuf:"bytes,4,opt,name=input_serialization,json=inputSerialization,proto3" json:"input_serialization,omitempty"` + OutputSerialization *QueryRequest_OutputSerialization `protobuf:"bytes,5,opt,name=output_serialization,json=outputSerialization,proto3" json:"output_serialization,omitempty"` } -func (m *ReadVolumeFileStatusResponse) GetIdxFileTimestampSeconds() uint64 { - if m != nil { - return m.IdxFileTimestampSeconds +func (x *QueryRequest) Reset() { + *x = QueryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *ReadVolumeFileStatusResponse) GetIdxFileSize() uint64 { - if m != nil { - return m.IdxFileSize - } - return 0 +func (x *QueryRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ReadVolumeFileStatusResponse) GetDatFileTimestampSeconds() uint64 { - if m != nil { - return m.DatFileTimestampSeconds - } - return 0 -} +func (*QueryRequest) ProtoMessage() {} -func (m *ReadVolumeFileStatusResponse) GetDatFileSize() uint64 { - if m != nil { - return m.DatFileSize +func (x *QueryRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[78] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *ReadVolumeFileStatusResponse) GetFileCount() uint64 { - if m != nil { - return m.FileCount - } - return 0 +// Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. +func (*QueryRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78} } -func (m *ReadVolumeFileStatusResponse) GetCompactionRevision() uint32 { - if m != nil { - return m.CompactionRevision +func (x *QueryRequest) GetSelections() []string { + if x != nil { + return x.Selections } - return 0 + return nil } -func (m *ReadVolumeFileStatusResponse) GetCollection() string { - if m != nil { - return m.Collection +func (x *QueryRequest) GetFromFileIds() []string { + if x != nil { + return x.FromFileIds } - return "" -} - -type DiskStatus struct { - Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` + return nil } -func (m *DiskStatus) Reset() { *m = DiskStatus{} } -func (m *DiskStatus) String() string { return proto.CompactTextString(m) } -func (*DiskStatus) ProtoMessage() {} -func (*DiskStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } - -func (m *DiskStatus) GetDir() string { - if m != nil { - return m.Dir +func (x *QueryRequest) GetFilter() *QueryRequest_Filter { + if x != nil { + return x.Filter } - return "" + return nil } -func (m *DiskStatus) GetAll() uint64 { - if m != nil { - return m.All +func (x *QueryRequest) GetInputSerialization() *QueryRequest_InputSerialization { + if x != nil { + return x.InputSerialization } - return 0 + return nil } -func (m *DiskStatus) GetUsed() uint64 { - if m != nil { - return m.Used +func (x *QueryRequest) GetOutputSerialization() *QueryRequest_OutputSerialization { + if x != nil { + return x.OutputSerialization } - return 0 + return nil } -func (m *DiskStatus) GetFree() uint64 { - if m != nil { - return m.Free - } - return 0 -} +type QueriedStripe struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type MemStatus struct { - Goroutines int32 `protobuf:"varint,1,opt,name=goroutines" json:"goroutines,omitempty"` - All uint64 `protobuf:"varint,2,opt,name=all" json:"all,omitempty"` - Used uint64 `protobuf:"varint,3,opt,name=used" json:"used,omitempty"` - Free uint64 `protobuf:"varint,4,opt,name=free" json:"free,omitempty"` - Self uint64 `protobuf:"varint,5,opt,name=self" json:"self,omitempty"` - Heap uint64 `protobuf:"varint,6,opt,name=heap" json:"heap,omitempty"` - Stack uint64 `protobuf:"varint,7,opt,name=stack" json:"stack,omitempty"` + Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"` } -func (m *MemStatus) Reset() { *m = MemStatus{} } -func (m *MemStatus) String() string { return proto.CompactTextString(m) } -func (*MemStatus) ProtoMessage() {} -func (*MemStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } - -func (m *MemStatus) GetGoroutines() int32 { - if m != nil { - return m.Goroutines +func (x *QueriedStripe) Reset() { + *x = QueriedStripe{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *MemStatus) GetAll() uint64 { - if m != nil { - return m.All - } - return 0 +func (x *QueriedStripe) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MemStatus) GetUsed() uint64 { - if m != nil { - return m.Used +func (*QueriedStripe) ProtoMessage() {} + +func (x *QueriedStripe) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[79] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (m *MemStatus) GetFree() uint64 { - if m != nil { - return m.Free - } - return 0 +// Deprecated: Use QueriedStripe.ProtoReflect.Descriptor instead. +func (*QueriedStripe) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{79} } -func (m *MemStatus) GetSelf() uint64 { - if m != nil { - return m.Self +func (x *QueriedStripe) GetRecords() []byte { + if x != nil { + return x.Records } - return 0 + return nil } -func (m *MemStatus) GetHeap() uint64 { - if m != nil { - return m.Heap - } - return 0 +type VolumeNeedleStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + NeedleId uint64 `protobuf:"varint,2,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` } -func (m *MemStatus) GetStack() uint64 { - if m != nil { - return m.Stack +func (x *VolumeNeedleStatusRequest) Reset() { + *x = VolumeNeedleStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -// tired storage on volume servers -type RemoteFile struct { - BackendType string `protobuf:"bytes,1,opt,name=backend_type,json=backendType" json:"backend_type,omitempty"` - BackendId string `protobuf:"bytes,2,opt,name=backend_id,json=backendId" json:"backend_id,omitempty"` - Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` - Offset uint64 `protobuf:"varint,4,opt,name=offset" json:"offset,omitempty"` - FileSize uint64 `protobuf:"varint,5,opt,name=file_size,json=fileSize" json:"file_size,omitempty"` - ModifiedTime uint64 `protobuf:"varint,6,opt,name=modified_time,json=modifiedTime" json:"modified_time,omitempty"` - Extension string `protobuf:"bytes,7,opt,name=extension" json:"extension,omitempty"` +func (x *VolumeNeedleStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *RemoteFile) Reset() { *m = RemoteFile{} } -func (m *RemoteFile) String() string { return proto.CompactTextString(m) } -func (*RemoteFile) ProtoMessage() {} -func (*RemoteFile) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } +func (*VolumeNeedleStatusRequest) ProtoMessage() {} -func (m *RemoteFile) GetBackendType() string { - if m != nil { - return m.BackendType +func (x *VolumeNeedleStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[80] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" -} - -func (m *RemoteFile) GetBackendId() string { - if m != nil { - return m.BackendId - } - return "" + return mi.MessageOf(x) } -func (m *RemoteFile) GetKey() string { - if m != nil { - return m.Key - } - return "" +// Deprecated: Use VolumeNeedleStatusRequest.ProtoReflect.Descriptor instead. +func (*VolumeNeedleStatusRequest) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{80} } -func (m *RemoteFile) GetOffset() uint64 { - if m != nil { - return m.Offset +func (x *VolumeNeedleStatusRequest) GetVolumeId() uint32 { + if x != nil { + return x.VolumeId } return 0 } -func (m *RemoteFile) GetFileSize() uint64 { - if m != nil { - return m.FileSize +func (x *VolumeNeedleStatusRequest) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId } return 0 } -func (m *RemoteFile) GetModifiedTime() uint64 { - if m != nil { - return m.ModifiedTime - } - return 0 +type VolumeNeedleStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NeedleId uint64 `protobuf:"varint,1,opt,name=needle_id,json=needleId,proto3" json:"needle_id,omitempty"` + Cookie uint32 `protobuf:"varint,2,opt,name=cookie,proto3" json:"cookie,omitempty"` + Size uint32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + LastModified uint64 `protobuf:"varint,4,opt,name=last_modified,json=lastModified,proto3" json:"last_modified,omitempty"` + Crc uint32 `protobuf:"varint,5,opt,name=crc,proto3" json:"crc,omitempty"` + Ttl string `protobuf:"bytes,6,opt,name=ttl,proto3" json:"ttl,omitempty"` } -func (m *RemoteFile) GetExtension() string { - if m != nil { - return m.Extension +func (x *VolumeNeedleStatusResponse) Reset() { + *x = VolumeNeedleStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[81] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type VolumeInfo struct { - Files []*RemoteFile `protobuf:"bytes,1,rep,name=files" json:"files,omitempty"` - Version uint32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` +func (x *VolumeNeedleStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeInfo) Reset() { *m = VolumeInfo{} } -func (m *VolumeInfo) String() string { return proto.CompactTextString(m) } -func (*VolumeInfo) ProtoMessage() {} -func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } +func (*VolumeNeedleStatusResponse) ProtoMessage() {} -func (m *VolumeInfo) GetFiles() []*RemoteFile { - if m != nil { - return m.Files +func (x *VolumeNeedleStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[81] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (m *VolumeInfo) GetVersion() uint32 { - if m != nil { - return m.Version - } - return 0 +// Deprecated: Use VolumeNeedleStatusResponse.ProtoReflect.Descriptor instead. +func (*VolumeNeedleStatusResponse) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{81} } -type VolumeTierMoveDatToRemoteRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - DestinationBackendName string `protobuf:"bytes,3,opt,name=destination_backend_name,json=destinationBackendName" json:"destination_backend_name,omitempty"` - KeepLocalDatFile bool `protobuf:"varint,4,opt,name=keep_local_dat_file,json=keepLocalDatFile" json:"keep_local_dat_file,omitempty"` +func (x *VolumeNeedleStatusResponse) GetNeedleId() uint64 { + if x != nil { + return x.NeedleId + } + return 0 } -func (m *VolumeTierMoveDatToRemoteRequest) Reset() { *m = VolumeTierMoveDatToRemoteRequest{} } -func (m *VolumeTierMoveDatToRemoteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeTierMoveDatToRemoteRequest) ProtoMessage() {} -func (*VolumeTierMoveDatToRemoteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{60} +func (x *VolumeNeedleStatusResponse) GetCookie() uint32 { + if x != nil { + return x.Cookie + } + return 0 } -func (m *VolumeTierMoveDatToRemoteRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId +func (x *VolumeNeedleStatusResponse) GetSize() uint32 { + if x != nil { + return x.Size } return 0 } -func (m *VolumeTierMoveDatToRemoteRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *VolumeNeedleStatusResponse) GetLastModified() uint64 { + if x != nil { + return x.LastModified } - return "" + return 0 } -func (m *VolumeTierMoveDatToRemoteRequest) GetDestinationBackendName() string { - if m != nil { - return m.DestinationBackendName +func (x *VolumeNeedleStatusResponse) GetCrc() uint32 { + if x != nil { + return x.Crc } - return "" + return 0 } -func (m *VolumeTierMoveDatToRemoteRequest) GetKeepLocalDatFile() bool { - if m != nil { - return m.KeepLocalDatFile +func (x *VolumeNeedleStatusResponse) GetTtl() string { + if x != nil { + return x.Ttl } - return false + return "" } -type VolumeTierMoveDatToRemoteResponse struct { - Processed int64 `protobuf:"varint,1,opt,name=processed" json:"processed,omitempty"` - ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage" json:"processedPercentage,omitempty"` -} +type QueryRequest_Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *VolumeTierMoveDatToRemoteResponse) Reset() { *m = VolumeTierMoveDatToRemoteResponse{} } -func (m *VolumeTierMoveDatToRemoteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeTierMoveDatToRemoteResponse) ProtoMessage() {} -func (*VolumeTierMoveDatToRemoteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{61} + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + Operand string `protobuf:"bytes,2,opt,name=operand,proto3" json:"operand,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` } -func (m *VolumeTierMoveDatToRemoteResponse) GetProcessed() int64 { - if m != nil { - return m.Processed +func (x *QueryRequest_Filter) Reset() { + *x = QueryRequest_Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[82] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (m *VolumeTierMoveDatToRemoteResponse) GetProcessedPercentage() float32 { - if m != nil { - return m.ProcessedPercentage - } - return 0 +func (x *QueryRequest_Filter) String() string { + return protoimpl.X.MessageStringOf(x) } -type VolumeTierMoveDatFromRemoteRequest struct { - VolumeId uint32 `protobuf:"varint,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - Collection string `protobuf:"bytes,2,opt,name=collection" json:"collection,omitempty"` - KeepRemoteDatFile bool `protobuf:"varint,3,opt,name=keep_remote_dat_file,json=keepRemoteDatFile" json:"keep_remote_dat_file,omitempty"` -} +func (*QueryRequest_Filter) ProtoMessage() {} -func (m *VolumeTierMoveDatFromRemoteRequest) Reset() { *m = VolumeTierMoveDatFromRemoteRequest{} } -func (m *VolumeTierMoveDatFromRemoteRequest) String() string { return proto.CompactTextString(m) } -func (*VolumeTierMoveDatFromRemoteRequest) ProtoMessage() {} -func (*VolumeTierMoveDatFromRemoteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{62} +func (x *QueryRequest_Filter) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[82] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *VolumeTierMoveDatFromRemoteRequest) GetVolumeId() uint32 { - if m != nil { - return m.VolumeId - } - return 0 +// Deprecated: Use QueryRequest_Filter.ProtoReflect.Descriptor instead. +func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78, 0} } -func (m *VolumeTierMoveDatFromRemoteRequest) GetCollection() string { - if m != nil { - return m.Collection +func (x *QueryRequest_Filter) GetField() string { + if x != nil { + return x.Field } return "" } -func (m *VolumeTierMoveDatFromRemoteRequest) GetKeepRemoteDatFile() bool { - if m != nil { - return m.KeepRemoteDatFile +func (x *QueryRequest_Filter) GetOperand() string { + if x != nil { + return x.Operand } - return false + return "" } -type VolumeTierMoveDatFromRemoteResponse struct { - Processed int64 `protobuf:"varint,1,opt,name=processed" json:"processed,omitempty"` - ProcessedPercentage float32 `protobuf:"fixed32,2,opt,name=processedPercentage" json:"processedPercentage,omitempty"` +func (x *QueryRequest_Filter) GetValue() string { + if x != nil { + return x.Value + } + return "" } -func (m *VolumeTierMoveDatFromRemoteResponse) Reset() { *m = VolumeTierMoveDatFromRemoteResponse{} } -func (m *VolumeTierMoveDatFromRemoteResponse) String() string { return proto.CompactTextString(m) } -func (*VolumeTierMoveDatFromRemoteResponse) ProtoMessage() {} -func (*VolumeTierMoveDatFromRemoteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{63} -} +type QueryRequest_InputSerialization struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessed() int64 { - if m != nil { - return m.Processed - } - return 0 + // NONE | GZIP | BZIP2 + CompressionType string `protobuf:"bytes,1,opt,name=compression_type,json=compressionType,proto3" json:"compression_type,omitempty"` + CsvInput *QueryRequest_InputSerialization_CSVInput `protobuf:"bytes,2,opt,name=csv_input,json=csvInput,proto3" json:"csv_input,omitempty"` + JsonInput *QueryRequest_InputSerialization_JSONInput `protobuf:"bytes,3,opt,name=json_input,json=jsonInput,proto3" json:"json_input,omitempty"` + ParquetInput *QueryRequest_InputSerialization_ParquetInput `protobuf:"bytes,4,opt,name=parquet_input,json=parquetInput,proto3" json:"parquet_input,omitempty"` } -func (m *VolumeTierMoveDatFromRemoteResponse) GetProcessedPercentage() float32 { - if m != nil { - return m.ProcessedPercentage +func (x *QueryRequest_InputSerialization) Reset() { + *x = QueryRequest_InputSerialization{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[83] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -// select on volume servers -type QueryRequest struct { - Selections []string `protobuf:"bytes,1,rep,name=selections" json:"selections,omitempty"` - FromFileIds []string `protobuf:"bytes,2,rep,name=from_file_ids,json=fromFileIds" json:"from_file_ids,omitempty"` - Filter *QueryRequest_Filter `protobuf:"bytes,3,opt,name=filter" json:"filter,omitempty"` - InputSerialization *QueryRequest_InputSerialization `protobuf:"bytes,4,opt,name=input_serialization,json=inputSerialization" json:"input_serialization,omitempty"` - OutputSerialization *QueryRequest_OutputSerialization `protobuf:"bytes,5,opt,name=output_serialization,json=outputSerialization" json:"output_serialization,omitempty"` +func (x *QueryRequest_InputSerialization) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *QueryRequest) Reset() { *m = QueryRequest{} } -func (m *QueryRequest) String() string { return proto.CompactTextString(m) } -func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64} } +func (*QueryRequest_InputSerialization) ProtoMessage() {} -func (m *QueryRequest) GetSelections() []string { - if m != nil { - return m.Selections +func (x *QueryRequest_InputSerialization) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[83] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (m *QueryRequest) GetFromFileIds() []string { - if m != nil { - return m.FromFileIds - } - return nil +// Deprecated: Use QueryRequest_InputSerialization.ProtoReflect.Descriptor instead. +func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78, 1} } -func (m *QueryRequest) GetFilter() *QueryRequest_Filter { - if m != nil { - return m.Filter +func (x *QueryRequest_InputSerialization) GetCompressionType() string { + if x != nil { + return x.CompressionType } - return nil + return "" } -func (m *QueryRequest) GetInputSerialization() *QueryRequest_InputSerialization { - if m != nil { - return m.InputSerialization +func (x *QueryRequest_InputSerialization) GetCsvInput() *QueryRequest_InputSerialization_CSVInput { + if x != nil { + return x.CsvInput } return nil } -func (m *QueryRequest) GetOutputSerialization() *QueryRequest_OutputSerialization { - if m != nil { - return m.OutputSerialization +func (x *QueryRequest_InputSerialization) GetJsonInput() *QueryRequest_InputSerialization_JSONInput { + if x != nil { + return x.JsonInput } return nil } -type QueryRequest_Filter struct { - Field string `protobuf:"bytes,1,opt,name=field" json:"field,omitempty"` - Operand string `protobuf:"bytes,2,opt,name=operand" json:"operand,omitempty"` - Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` -} - -func (m *QueryRequest_Filter) Reset() { *m = QueryRequest_Filter{} } -func (m *QueryRequest_Filter) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_Filter) ProtoMessage() {} -func (*QueryRequest_Filter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{64, 0} } - -func (m *QueryRequest_Filter) GetField() string { - if m != nil { - return m.Field +func (x *QueryRequest_InputSerialization) GetParquetInput() *QueryRequest_InputSerialization_ParquetInput { + if x != nil { + return x.ParquetInput } - return "" + return nil } -func (m *QueryRequest_Filter) GetOperand() string { - if m != nil { - return m.Operand - } - return "" +type QueryRequest_OutputSerialization struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput,proto3" json:"csv_output,omitempty"` + JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput,proto3" json:"json_output,omitempty"` } -func (m *QueryRequest_Filter) GetValue() string { - if m != nil { - return m.Value +func (x *QueryRequest_OutputSerialization) Reset() { + *x = QueryRequest_OutputSerialization{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[84] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type QueryRequest_InputSerialization struct { - // NONE | GZIP | BZIP2 - CompressionType string `protobuf:"bytes,1,opt,name=compression_type,json=compressionType" json:"compression_type,omitempty"` - CsvInput *QueryRequest_InputSerialization_CSVInput `protobuf:"bytes,2,opt,name=csv_input,json=csvInput" json:"csv_input,omitempty"` - JsonInput *QueryRequest_InputSerialization_JSONInput `protobuf:"bytes,3,opt,name=json_input,json=jsonInput" json:"json_input,omitempty"` - ParquetInput *QueryRequest_InputSerialization_ParquetInput `protobuf:"bytes,4,opt,name=parquet_input,json=parquetInput" json:"parquet_input,omitempty"` +func (x *QueryRequest_OutputSerialization) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *QueryRequest_InputSerialization) Reset() { *m = QueryRequest_InputSerialization{} } -func (m *QueryRequest_InputSerialization) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_InputSerialization) ProtoMessage() {} -func (*QueryRequest_InputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1} -} +func (*QueryRequest_OutputSerialization) ProtoMessage() {} -func (m *QueryRequest_InputSerialization) GetCompressionType() string { - if m != nil { - return m.CompressionType +func (x *QueryRequest_OutputSerialization) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[84] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *QueryRequest_InputSerialization) GetCsvInput() *QueryRequest_InputSerialization_CSVInput { - if m != nil { - return m.CsvInput - } - return nil +// Deprecated: Use QueryRequest_OutputSerialization.ProtoReflect.Descriptor instead. +func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78, 2} } -func (m *QueryRequest_InputSerialization) GetJsonInput() *QueryRequest_InputSerialization_JSONInput { - if m != nil { - return m.JsonInput +func (x *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { + if x != nil { + return x.CsvOutput } return nil } -func (m *QueryRequest_InputSerialization) GetParquetInput() *QueryRequest_InputSerialization_ParquetInput { - if m != nil { - return m.ParquetInput +func (x *QueryRequest_OutputSerialization) GetJsonOutput() *QueryRequest_OutputSerialization_JSONOutput { + if x != nil { + return x.JsonOutput } return nil } type QueryRequest_InputSerialization_CSVInput struct { - FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo" json:"file_header_info,omitempty"` - RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter" json:"record_delimiter,omitempty"` - FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter" json:"field_delimiter,omitempty"` - QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer" json:"quote_charactoer,omitempty"` - QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter" json:"quote_escape_character,omitempty"` - Comments string `protobuf:"bytes,6,opt,name=comments" json:"comments,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FileHeaderInfo string `protobuf:"bytes,1,opt,name=file_header_info,json=fileHeaderInfo,proto3" json:"file_header_info,omitempty"` // Valid values: NONE | USE | IGNORE + RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n + FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , + QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: " + QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " + Comments string `protobuf:"bytes,6,opt,name=comments,proto3" json:"comments,omitempty"` // Default: # // If true, records might contain record delimiters within quote characters - AllowQuotedRecordDelimiter bool `protobuf:"varint,7,opt,name=allow_quoted_record_delimiter,json=allowQuotedRecordDelimiter" json:"allow_quoted_record_delimiter,omitempty"` + AllowQuotedRecordDelimiter bool `protobuf:"varint,7,opt,name=allow_quoted_record_delimiter,json=allowQuotedRecordDelimiter,proto3" json:"allow_quoted_record_delimiter,omitempty"` // default False. +} + +func (x *QueryRequest_InputSerialization_CSVInput) Reset() { + *x = QueryRequest_InputSerialization_CSVInput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[85] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest_InputSerialization_CSVInput) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *QueryRequest_InputSerialization_CSVInput) Reset() { - *m = QueryRequest_InputSerialization_CSVInput{} +func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} + +func (x *QueryRequest_InputSerialization_CSVInput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[85] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *QueryRequest_InputSerialization_CSVInput) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_InputSerialization_CSVInput) ProtoMessage() {} + +// Deprecated: Use QueryRequest_InputSerialization_CSVInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_CSVInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1, 0} + return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 0} +} + +func (x *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { + if x != nil { + return x.FileHeaderInfo + } + return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetFileHeaderInfo() string { - if m != nil { - return m.FileHeaderInfo +func (x *QueryRequest_InputSerialization_CSVInput) GetRecordDelimiter() string { + if x != nil { + return x.RecordDelimiter } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetRecordDelimiter() string { - if m != nil { - return m.RecordDelimiter +func (x *QueryRequest_InputSerialization_CSVInput) GetFieldDelimiter() string { + if x != nil { + return x.FieldDelimiter } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetFieldDelimiter() string { - if m != nil { - return m.FieldDelimiter +func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteCharactoer() string { + if x != nil { + return x.QuoteCharactoer } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetQuoteCharactoer() string { - if m != nil { - return m.QuoteCharactoer +func (x *QueryRequest_InputSerialization_CSVInput) GetQuoteEscapeCharacter() string { + if x != nil { + return x.QuoteEscapeCharacter } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetQuoteEscapeCharacter() string { - if m != nil { - return m.QuoteEscapeCharacter +func (x *QueryRequest_InputSerialization_CSVInput) GetComments() string { + if x != nil { + return x.Comments } return "" } -func (m *QueryRequest_InputSerialization_CSVInput) GetComments() string { - if m != nil { - return m.Comments - } - return "" +func (x *QueryRequest_InputSerialization_CSVInput) GetAllowQuotedRecordDelimiter() bool { + if x != nil { + return x.AllowQuotedRecordDelimiter + } + return false +} + +type QueryRequest_InputSerialization_JSONInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Valid values: DOCUMENT | LINES } -func (m *QueryRequest_InputSerialization_CSVInput) GetAllowQuotedRecordDelimiter() bool { - if m != nil { - return m.AllowQuotedRecordDelimiter +func (x *QueryRequest_InputSerialization_JSONInput) Reset() { + *x = QueryRequest_InputSerialization_JSONInput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[86] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -type QueryRequest_InputSerialization_JSONInput struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` +func (x *QueryRequest_InputSerialization_JSONInput) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *QueryRequest_InputSerialization_JSONInput) Reset() { - *m = QueryRequest_InputSerialization_JSONInput{} +func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} + +func (x *QueryRequest_InputSerialization_JSONInput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[86] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *QueryRequest_InputSerialization_JSONInput) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_InputSerialization_JSONInput) ProtoMessage() {} + +// Deprecated: Use QueryRequest_InputSerialization_JSONInput.ProtoReflect.Descriptor instead. func (*QueryRequest_InputSerialization_JSONInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1, 1} + return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 1} } -func (m *QueryRequest_InputSerialization_JSONInput) GetType() string { - if m != nil { - return m.Type +func (x *QueryRequest_InputSerialization_JSONInput) GetType() string { + if x != nil { + return x.Type } return "" } type QueryRequest_InputSerialization_ParquetInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *QueryRequest_InputSerialization_ParquetInput) Reset() { - *m = QueryRequest_InputSerialization_ParquetInput{} -} -func (m *QueryRequest_InputSerialization_ParquetInput) String() string { - return proto.CompactTextString(m) -} -func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} -func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 1, 2} +func (x *QueryRequest_InputSerialization_ParquetInput) Reset() { + *x = QueryRequest_InputSerialization_ParquetInput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[87] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -type QueryRequest_OutputSerialization struct { - CsvOutput *QueryRequest_OutputSerialization_CSVOutput `protobuf:"bytes,2,opt,name=csv_output,json=csvOutput" json:"csv_output,omitempty"` - JsonOutput *QueryRequest_OutputSerialization_JSONOutput `protobuf:"bytes,3,opt,name=json_output,json=jsonOutput" json:"json_output,omitempty"` +func (x *QueryRequest_InputSerialization_ParquetInput) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *QueryRequest_OutputSerialization) Reset() { *m = QueryRequest_OutputSerialization{} } -func (m *QueryRequest_OutputSerialization) String() string { return proto.CompactTextString(m) } -func (*QueryRequest_OutputSerialization) ProtoMessage() {} -func (*QueryRequest_OutputSerialization) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 2} -} +func (*QueryRequest_InputSerialization_ParquetInput) ProtoMessage() {} -func (m *QueryRequest_OutputSerialization) GetCsvOutput() *QueryRequest_OutputSerialization_CSVOutput { - if m != nil { - return m.CsvOutput +func (x *QueryRequest_InputSerialization_ParquetInput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[87] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (m *QueryRequest_OutputSerialization) GetJsonOutput() *QueryRequest_OutputSerialization_JSONOutput { - if m != nil { - return m.JsonOutput - } - return nil +// Deprecated: Use QueryRequest_InputSerialization_ParquetInput.ProtoReflect.Descriptor instead. +func (*QueryRequest_InputSerialization_ParquetInput) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78, 1, 2} } type QueryRequest_OutputSerialization_CSVOutput struct { - QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields" json:"quote_fields,omitempty"` - RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter" json:"record_delimiter,omitempty"` - FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter" json:"field_delimiter,omitempty"` - QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer" json:"quote_charactoer,omitempty"` - QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter" json:"quote_escape_character,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QuoteFields string `protobuf:"bytes,1,opt,name=quote_fields,json=quoteFields,proto3" json:"quote_fields,omitempty"` // Valid values: ALWAYS | ASNEEDED + RecordDelimiter string `protobuf:"bytes,2,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` // Default: \n + FieldDelimiter string `protobuf:"bytes,3,opt,name=field_delimiter,json=fieldDelimiter,proto3" json:"field_delimiter,omitempty"` // Default: , + QuoteCharactoer string `protobuf:"bytes,4,opt,name=quote_charactoer,json=quoteCharactoer,proto3" json:"quote_charactoer,omitempty"` // Default: " + QuoteEscapeCharacter string `protobuf:"bytes,5,opt,name=quote_escape_character,json=quoteEscapeCharacter,proto3" json:"quote_escape_character,omitempty"` // Default: " } -func (m *QueryRequest_OutputSerialization_CSVOutput) Reset() { - *m = QueryRequest_OutputSerialization_CSVOutput{} +func (x *QueryRequest_OutputSerialization_CSVOutput) Reset() { + *x = QueryRequest_OutputSerialization_CSVOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[88] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *QueryRequest_OutputSerialization_CSVOutput) String() string { - return proto.CompactTextString(m) + +func (x *QueryRequest_OutputSerialization_CSVOutput) String() string { + return protoimpl.X.MessageStringOf(x) } + func (*QueryRequest_OutputSerialization_CSVOutput) ProtoMessage() {} + +func (x *QueryRequest_OutputSerialization_CSVOutput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[88] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest_OutputSerialization_CSVOutput.ProtoReflect.Descriptor instead. func (*QueryRequest_OutputSerialization_CSVOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 2, 0} + return file_volume_server_proto_rawDescGZIP(), []int{78, 2, 0} } -func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { - if m != nil { - return m.QuoteFields +func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteFields() string { + if x != nil { + return x.QuoteFields } return "" } -func (m *QueryRequest_OutputSerialization_CSVOutput) GetRecordDelimiter() string { - if m != nil { - return m.RecordDelimiter +func (x *QueryRequest_OutputSerialization_CSVOutput) GetRecordDelimiter() string { + if x != nil { + return x.RecordDelimiter } return "" } -func (m *QueryRequest_OutputSerialization_CSVOutput) GetFieldDelimiter() string { - if m != nil { - return m.FieldDelimiter +func (x *QueryRequest_OutputSerialization_CSVOutput) GetFieldDelimiter() string { + if x != nil { + return x.FieldDelimiter } return "" } -func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharactoer() string { - if m != nil { - return m.QuoteCharactoer +func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteCharactoer() string { + if x != nil { + return x.QuoteCharactoer } return "" } -func (m *QueryRequest_OutputSerialization_CSVOutput) GetQuoteEscapeCharacter() string { - if m != nil { - return m.QuoteEscapeCharacter +func (x *QueryRequest_OutputSerialization_CSVOutput) GetQuoteEscapeCharacter() string { + if x != nil { + return x.QuoteEscapeCharacter } return "" } type QueryRequest_OutputSerialization_JSONOutput struct { - RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter" json:"record_delimiter,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RecordDelimiter string `protobuf:"bytes,1,opt,name=record_delimiter,json=recordDelimiter,proto3" json:"record_delimiter,omitempty"` } -func (m *QueryRequest_OutputSerialization_JSONOutput) Reset() { - *m = QueryRequest_OutputSerialization_JSONOutput{} +func (x *QueryRequest_OutputSerialization_JSONOutput) Reset() { + *x = QueryRequest_OutputSerialization_JSONOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_volume_server_proto_msgTypes[89] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *QueryRequest_OutputSerialization_JSONOutput) String() string { - return proto.CompactTextString(m) + +func (x *QueryRequest_OutputSerialization_JSONOutput) String() string { + return protoimpl.X.MessageStringOf(x) } + func (*QueryRequest_OutputSerialization_JSONOutput) ProtoMessage() {} -func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{64, 2, 1} -} -func (m *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { - if m != nil { - return m.RecordDelimiter +func (x *QueryRequest_OutputSerialization_JSONOutput) ProtoReflect() protoreflect.Message { + mi := &file_volume_server_proto_msgTypes[89] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -type QueriedStripe struct { - Records []byte `protobuf:"bytes,1,opt,name=records,proto3" json:"records,omitempty"` +// Deprecated: Use QueryRequest_OutputSerialization_JSONOutput.ProtoReflect.Descriptor instead. +func (*QueryRequest_OutputSerialization_JSONOutput) Descriptor() ([]byte, []int) { + return file_volume_server_proto_rawDescGZIP(), []int{78, 2, 1} } -func (m *QueriedStripe) Reset() { *m = QueriedStripe{} } -func (m *QueriedStripe) String() string { return proto.CompactTextString(m) } -func (*QueriedStripe) ProtoMessage() {} -func (*QueriedStripe) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{65} } - -func (m *QueriedStripe) GetRecords() []byte { - if m != nil { - return m.Records +func (x *QueryRequest_OutputSerialization_JSONOutput) GetRecordDelimiter() string { + if x != nil { + return x.RecordDelimiter } - return nil + return "" } -func init() { - proto.RegisterType((*BatchDeleteRequest)(nil), "volume_server_pb.BatchDeleteRequest") - proto.RegisterType((*BatchDeleteResponse)(nil), "volume_server_pb.BatchDeleteResponse") - proto.RegisterType((*DeleteResult)(nil), "volume_server_pb.DeleteResult") - proto.RegisterType((*Empty)(nil), "volume_server_pb.Empty") - proto.RegisterType((*VacuumVolumeCheckRequest)(nil), "volume_server_pb.VacuumVolumeCheckRequest") - proto.RegisterType((*VacuumVolumeCheckResponse)(nil), "volume_server_pb.VacuumVolumeCheckResponse") - proto.RegisterType((*VacuumVolumeCompactRequest)(nil), "volume_server_pb.VacuumVolumeCompactRequest") - proto.RegisterType((*VacuumVolumeCompactResponse)(nil), "volume_server_pb.VacuumVolumeCompactResponse") - proto.RegisterType((*VacuumVolumeCommitRequest)(nil), "volume_server_pb.VacuumVolumeCommitRequest") - proto.RegisterType((*VacuumVolumeCommitResponse)(nil), "volume_server_pb.VacuumVolumeCommitResponse") - proto.RegisterType((*VacuumVolumeCleanupRequest)(nil), "volume_server_pb.VacuumVolumeCleanupRequest") - proto.RegisterType((*VacuumVolumeCleanupResponse)(nil), "volume_server_pb.VacuumVolumeCleanupResponse") - proto.RegisterType((*DeleteCollectionRequest)(nil), "volume_server_pb.DeleteCollectionRequest") - proto.RegisterType((*DeleteCollectionResponse)(nil), "volume_server_pb.DeleteCollectionResponse") - proto.RegisterType((*AllocateVolumeRequest)(nil), "volume_server_pb.AllocateVolumeRequest") - proto.RegisterType((*AllocateVolumeResponse)(nil), "volume_server_pb.AllocateVolumeResponse") - proto.RegisterType((*VolumeSyncStatusRequest)(nil), "volume_server_pb.VolumeSyncStatusRequest") - proto.RegisterType((*VolumeSyncStatusResponse)(nil), "volume_server_pb.VolumeSyncStatusResponse") - proto.RegisterType((*VolumeIncrementalCopyRequest)(nil), "volume_server_pb.VolumeIncrementalCopyRequest") - proto.RegisterType((*VolumeIncrementalCopyResponse)(nil), "volume_server_pb.VolumeIncrementalCopyResponse") - proto.RegisterType((*VolumeMountRequest)(nil), "volume_server_pb.VolumeMountRequest") - proto.RegisterType((*VolumeMountResponse)(nil), "volume_server_pb.VolumeMountResponse") - proto.RegisterType((*VolumeUnmountRequest)(nil), "volume_server_pb.VolumeUnmountRequest") - proto.RegisterType((*VolumeUnmountResponse)(nil), "volume_server_pb.VolumeUnmountResponse") - proto.RegisterType((*VolumeDeleteRequest)(nil), "volume_server_pb.VolumeDeleteRequest") - proto.RegisterType((*VolumeDeleteResponse)(nil), "volume_server_pb.VolumeDeleteResponse") - proto.RegisterType((*VolumeMarkReadonlyRequest)(nil), "volume_server_pb.VolumeMarkReadonlyRequest") - proto.RegisterType((*VolumeMarkReadonlyResponse)(nil), "volume_server_pb.VolumeMarkReadonlyResponse") - proto.RegisterType((*VolumeCopyRequest)(nil), "volume_server_pb.VolumeCopyRequest") - proto.RegisterType((*VolumeCopyResponse)(nil), "volume_server_pb.VolumeCopyResponse") - proto.RegisterType((*CopyFileRequest)(nil), "volume_server_pb.CopyFileRequest") - proto.RegisterType((*CopyFileResponse)(nil), "volume_server_pb.CopyFileResponse") - proto.RegisterType((*VolumeTailSenderRequest)(nil), "volume_server_pb.VolumeTailSenderRequest") - proto.RegisterType((*VolumeTailSenderResponse)(nil), "volume_server_pb.VolumeTailSenderResponse") - proto.RegisterType((*VolumeTailReceiverRequest)(nil), "volume_server_pb.VolumeTailReceiverRequest") - proto.RegisterType((*VolumeTailReceiverResponse)(nil), "volume_server_pb.VolumeTailReceiverResponse") - proto.RegisterType((*VolumeEcShardsGenerateRequest)(nil), "volume_server_pb.VolumeEcShardsGenerateRequest") - proto.RegisterType((*VolumeEcShardsGenerateResponse)(nil), "volume_server_pb.VolumeEcShardsGenerateResponse") - proto.RegisterType((*VolumeEcShardsRebuildRequest)(nil), "volume_server_pb.VolumeEcShardsRebuildRequest") - proto.RegisterType((*VolumeEcShardsRebuildResponse)(nil), "volume_server_pb.VolumeEcShardsRebuildResponse") - proto.RegisterType((*VolumeEcShardsCopyRequest)(nil), "volume_server_pb.VolumeEcShardsCopyRequest") - proto.RegisterType((*VolumeEcShardsCopyResponse)(nil), "volume_server_pb.VolumeEcShardsCopyResponse") - proto.RegisterType((*VolumeEcShardsDeleteRequest)(nil), "volume_server_pb.VolumeEcShardsDeleteRequest") - proto.RegisterType((*VolumeEcShardsDeleteResponse)(nil), "volume_server_pb.VolumeEcShardsDeleteResponse") - proto.RegisterType((*VolumeEcShardsMountRequest)(nil), "volume_server_pb.VolumeEcShardsMountRequest") - proto.RegisterType((*VolumeEcShardsMountResponse)(nil), "volume_server_pb.VolumeEcShardsMountResponse") - proto.RegisterType((*VolumeEcShardsUnmountRequest)(nil), "volume_server_pb.VolumeEcShardsUnmountRequest") - proto.RegisterType((*VolumeEcShardsUnmountResponse)(nil), "volume_server_pb.VolumeEcShardsUnmountResponse") - proto.RegisterType((*VolumeEcShardReadRequest)(nil), "volume_server_pb.VolumeEcShardReadRequest") - proto.RegisterType((*VolumeEcShardReadResponse)(nil), "volume_server_pb.VolumeEcShardReadResponse") - proto.RegisterType((*VolumeEcBlobDeleteRequest)(nil), "volume_server_pb.VolumeEcBlobDeleteRequest") - proto.RegisterType((*VolumeEcBlobDeleteResponse)(nil), "volume_server_pb.VolumeEcBlobDeleteResponse") - proto.RegisterType((*VolumeEcShardsToVolumeRequest)(nil), "volume_server_pb.VolumeEcShardsToVolumeRequest") - proto.RegisterType((*VolumeEcShardsToVolumeResponse)(nil), "volume_server_pb.VolumeEcShardsToVolumeResponse") - proto.RegisterType((*ReadVolumeFileStatusRequest)(nil), "volume_server_pb.ReadVolumeFileStatusRequest") - proto.RegisterType((*ReadVolumeFileStatusResponse)(nil), "volume_server_pb.ReadVolumeFileStatusResponse") - proto.RegisterType((*DiskStatus)(nil), "volume_server_pb.DiskStatus") - proto.RegisterType((*MemStatus)(nil), "volume_server_pb.MemStatus") - proto.RegisterType((*RemoteFile)(nil), "volume_server_pb.RemoteFile") - proto.RegisterType((*VolumeInfo)(nil), "volume_server_pb.VolumeInfo") - proto.RegisterType((*VolumeTierMoveDatToRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteRequest") - proto.RegisterType((*VolumeTierMoveDatToRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatToRemoteResponse") - proto.RegisterType((*VolumeTierMoveDatFromRemoteRequest)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteRequest") - proto.RegisterType((*VolumeTierMoveDatFromRemoteResponse)(nil), "volume_server_pb.VolumeTierMoveDatFromRemoteResponse") - proto.RegisterType((*QueryRequest)(nil), "volume_server_pb.QueryRequest") - proto.RegisterType((*QueryRequest_Filter)(nil), "volume_server_pb.QueryRequest.Filter") - proto.RegisterType((*QueryRequest_InputSerialization)(nil), "volume_server_pb.QueryRequest.InputSerialization") - proto.RegisterType((*QueryRequest_InputSerialization_CSVInput)(nil), "volume_server_pb.QueryRequest.InputSerialization.CSVInput") - proto.RegisterType((*QueryRequest_InputSerialization_JSONInput)(nil), "volume_server_pb.QueryRequest.InputSerialization.JSONInput") - proto.RegisterType((*QueryRequest_InputSerialization_ParquetInput)(nil), "volume_server_pb.QueryRequest.InputSerialization.ParquetInput") - proto.RegisterType((*QueryRequest_OutputSerialization)(nil), "volume_server_pb.QueryRequest.OutputSerialization") - proto.RegisterType((*QueryRequest_OutputSerialization_CSVOutput)(nil), "volume_server_pb.QueryRequest.OutputSerialization.CSVOutput") - proto.RegisterType((*QueryRequest_OutputSerialization_JSONOutput)(nil), "volume_server_pb.QueryRequest.OutputSerialization.JSONOutput") - proto.RegisterType((*QueriedStripe)(nil), "volume_server_pb.QueriedStripe") +var File_volume_server_proto protoreflect.FileDescriptor + +var file_volume_server_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x22, 0x5b, 0x0a, 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, + 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x07, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x6b, 0x69, 0x70, + 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x22, 0x4f, 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x07, 0x0a, 0x05, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x0a, 0x18, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x40, 0x0a, + 0x19, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x67, 0x61, + 0x72, 0x62, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0c, 0x67, 0x61, 0x72, 0x62, 0x61, 0x67, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, + 0x5b, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x72, + 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x22, 0x1d, 0x0a, 0x1b, + 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, + 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x3e, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, + 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x39, 0x0a, 0x1a, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x39, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfb, 0x01, 0x0a, 0x15, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, + 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x74, 0x74, 0x6c, 0x12, 0x32, 0x0a, 0x16, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6d, 0x61, + 0x70, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x61, + 0x78, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, + 0x0a, 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0xfb, 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x74, 0x74, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x69, 0x6c, 0x5f, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x61, 0x69, 0x6c, 0x4f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x22, 0x56, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, + 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x22, 0x42, 0x0a, 0x1d, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x22, 0x31, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x33, 0x0a, 0x14, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, + 0x17, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x14, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, + 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1c, + 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, + 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x57, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, + 0x17, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x32, + 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x22, 0x38, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, + 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xcb, 0x01, 0x0a, + 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, + 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x20, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, 0x3f, 0x0a, 0x12, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x29, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, + 0x61, 0x74, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6c, 0x61, 0x73, + 0x74, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x4e, 0x73, 0x22, 0x94, 0x02, 0x0a, 0x0f, + 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x78, 0x74, 0x12, 0x2f, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x76, + 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x63, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, + 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x10, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x66, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x7d, 0x0a, 0x15, 0x52, 0x65, 0x61, + 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x39, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, + 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, + 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, + 0x6c, 0x6f, 0x62, 0x22, 0x87, 0x01, 0x0a, 0x16, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, + 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x22, 0x19, 0x0a, + 0x17, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, + 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x84, + 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, + 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6f, 0x64, + 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x73, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x75, + 0x6e, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4c, 0x61, 0x73, 0x74, + 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0xb7, 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x19, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x69, + 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x69, 0x64, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a, + 0x14, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22, + 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, + 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, + 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5b, 0x0a, + 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x72, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x8b, 0x02, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x12, + 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x78, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, + 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x65, 0x63, 0x6a, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x45, 0x63, 0x6a, 0x46, 0x69, 0x6c, + 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x76, 0x69, 0x66, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x56, 0x69, + 0x66, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, + 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, + 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1e, 0x0a, 0x1c, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x76, 0x0a, 0x1a, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x64, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x58, 0x0a, 0x1c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x22, 0x1f, 0x0a, + 0x1d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, + 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x99, + 0x01, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x4e, 0x0a, 0x19, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x69, + 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x69, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x19, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x65, 0x4b, 0x65, 0x79, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, 0x1a, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5c, 0x0a, 0x1d, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x1b, 0x52, 0x65, 0x61, 0x64, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x22, 0x8a, 0x03, 0x0a, 0x1c, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, + 0x22, 0x0a, 0x0d, 0x69, 0x64, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x69, 0x64, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x12, 0x22, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, + 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x72, 0x65, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x46, 0x72, 0x65, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x55, 0x73, + 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x22, + 0xa3, 0x01, 0x0a, 0x09, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x10, 0x0a, + 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x75, + 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x04, 0x66, 0x72, 0x65, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x65, 0x61, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x68, 0x65, 0x61, 0x70, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0xd8, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x46, 0x69, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, 0x63, 0x6b, + 0x65, 0x6e, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x7c, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x32, + 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, + 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8, + 0x01, 0x0a, 0x20, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x6b, 0x65, + 0x65, 0x70, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x44, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x73, 0x0a, 0x21, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, + 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, + 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, + 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x92, + 0x01, 0x0a, 0x22, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x14, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x5f, 0x64, 0x61, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x46, + 0x69, 0x6c, 0x65, 0x22, 0x75, 0x0a, 0x23, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, + 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x70, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x30, 0x0a, 0x13, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x65, 0x64, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, + 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x64, 0x69, 0x73, + 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0d, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x6d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1b, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf8, 0x0c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72, + 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x65, 0x49, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x13, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x14, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x4e, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x1a, 0xd5, 0x05, 0x0a, 0x12, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, + 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x57, 0x0a, 0x09, 0x63, 0x73, 0x76, 0x5f, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x52, 0x08, 0x63, 0x73, 0x76, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5a, + 0x0a, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, + 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x63, 0x0a, 0x0d, 0x70, 0x61, + 0x72, 0x71, 0x75, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x3e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, + 0xc8, 0x02, 0x0a, 0x08, 0x43, 0x53, 0x56, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x28, 0x0a, 0x10, + 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x72, 0x12, 0x27, 0x0a, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, + 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, + 0x63, 0x74, 0x6f, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, + 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, + 0x70, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, + 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x51, 0x75, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x1a, 0x1f, 0x0a, 0x09, 0x4a, 0x53, + 0x4f, 0x4e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x0e, 0x0a, 0x0c, 0x50, + 0x61, 0x72, 0x71, 0x75, 0x65, 0x74, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0xf1, 0x03, 0x0a, 0x13, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0a, 0x63, 0x73, 0x76, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x53, 0x56, 0x4f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x09, 0x63, 0x73, 0x76, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x12, 0x5e, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x1a, 0xe3, 0x01, 0x0a, 0x09, 0x43, 0x53, 0x56, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0f, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x63, + 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x43, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x65, 0x72, + 0x12, 0x34, 0x0a, 0x16, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x73, 0x63, 0x61, 0x70, 0x65, + 0x5f, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x14, 0x71, 0x75, 0x6f, 0x74, 0x65, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x43, 0x68, 0x61, + 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x1a, 0x37, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x22, + 0x29, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x22, 0x55, 0x0a, 0x19, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, + 0x64, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, + 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x63, + 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x10, + 0x0a, 0x03, 0x63, 0x72, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x63, 0x72, 0x63, + 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, + 0x74, 0x6c, 0x32, 0xa9, 0x21, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x0b, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x6e, 0x0a, 0x11, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, + 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x61, 0x63, 0x75, 0x75, + 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x2b, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, + 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x61, + 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, + 0x70, 0x12, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x6b, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, + 0x0e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, + 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x6c, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, + 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x79, 0x6e, + 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x7c, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, + 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x43, + 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x5c, 0x0a, 0x0b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, + 0x0d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, + 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, + 0x61, 0x72, 0x6b, 0x52, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, + 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x4d, 0x61, 0x72, 0x6b, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x25, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x0a, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, + 0x79, 0x12, 0x23, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x77, + 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x08, 0x43, 0x6f, 0x70, 0x79, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x21, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x65, + 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, + 0x12, 0x27, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, + 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x68, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, + 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x28, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, + 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x6d, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x12, 0x29, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, + 0x6c, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x53, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, + 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, + 0x69, 0x76, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, + 0x69, 0x6c, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x69, 0x6c, 0x52, + 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x7a, 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x12, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, + 0x70, 0x79, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x77, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x74, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7a, + 0x0a, 0x15, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x55, 0x6e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x12, + 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, + 0x6f, 0x62, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2c, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x42, 0x6c, 0x6f, 0x62, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x7d, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x2f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x54, 0x6f, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x88, + 0x01, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, + 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x33, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, + 0x76, 0x65, 0x44, 0x61, 0x74, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, + 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x34, 0x2e, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, + 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x35, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x54, 0x69, 0x65, 0x72, 0x4d, 0x6f, 0x76, + 0x65, 0x44, 0x61, 0x74, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6e, 0x0a, + 0x11, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, 0x61, + 0x76, 0x65, 0x12, 0x2a, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x4c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x65, + 0x61, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, + 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1e, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, + 0x64, 0x53, 0x74, 0x72, 0x69, 0x70, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x71, 0x0a, 0x12, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, + 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x65, 0x65, 0x64, 0x6c, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x39, + 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, + 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, + 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_volume_server_proto_rawDescOnce sync.Once + file_volume_server_proto_rawDescData = file_volume_server_proto_rawDesc +) + +func file_volume_server_proto_rawDescGZIP() []byte { + file_volume_server_proto_rawDescOnce.Do(func() { + file_volume_server_proto_rawDescData = protoimpl.X.CompressGZIP(file_volume_server_proto_rawDescData) + }) + return file_volume_server_proto_rawDescData +} + +var file_volume_server_proto_msgTypes = make([]protoimpl.MessageInfo, 90) +var file_volume_server_proto_goTypes = []interface{}{ + (*BatchDeleteRequest)(nil), // 0: volume_server_pb.BatchDeleteRequest + (*BatchDeleteResponse)(nil), // 1: volume_server_pb.BatchDeleteResponse + (*DeleteResult)(nil), // 2: volume_server_pb.DeleteResult + (*Empty)(nil), // 3: volume_server_pb.Empty + (*VacuumVolumeCheckRequest)(nil), // 4: volume_server_pb.VacuumVolumeCheckRequest + (*VacuumVolumeCheckResponse)(nil), // 5: volume_server_pb.VacuumVolumeCheckResponse + (*VacuumVolumeCompactRequest)(nil), // 6: volume_server_pb.VacuumVolumeCompactRequest + (*VacuumVolumeCompactResponse)(nil), // 7: volume_server_pb.VacuumVolumeCompactResponse + (*VacuumVolumeCommitRequest)(nil), // 8: volume_server_pb.VacuumVolumeCommitRequest + (*VacuumVolumeCommitResponse)(nil), // 9: volume_server_pb.VacuumVolumeCommitResponse + (*VacuumVolumeCleanupRequest)(nil), // 10: volume_server_pb.VacuumVolumeCleanupRequest + (*VacuumVolumeCleanupResponse)(nil), // 11: volume_server_pb.VacuumVolumeCleanupResponse + (*DeleteCollectionRequest)(nil), // 12: volume_server_pb.DeleteCollectionRequest + (*DeleteCollectionResponse)(nil), // 13: volume_server_pb.DeleteCollectionResponse + (*AllocateVolumeRequest)(nil), // 14: volume_server_pb.AllocateVolumeRequest + (*AllocateVolumeResponse)(nil), // 15: volume_server_pb.AllocateVolumeResponse + (*VolumeSyncStatusRequest)(nil), // 16: volume_server_pb.VolumeSyncStatusRequest + (*VolumeSyncStatusResponse)(nil), // 17: volume_server_pb.VolumeSyncStatusResponse + (*VolumeIncrementalCopyRequest)(nil), // 18: volume_server_pb.VolumeIncrementalCopyRequest + (*VolumeIncrementalCopyResponse)(nil), // 19: volume_server_pb.VolumeIncrementalCopyResponse + (*VolumeMountRequest)(nil), // 20: volume_server_pb.VolumeMountRequest + (*VolumeMountResponse)(nil), // 21: volume_server_pb.VolumeMountResponse + (*VolumeUnmountRequest)(nil), // 22: volume_server_pb.VolumeUnmountRequest + (*VolumeUnmountResponse)(nil), // 23: volume_server_pb.VolumeUnmountResponse + (*VolumeDeleteRequest)(nil), // 24: volume_server_pb.VolumeDeleteRequest + (*VolumeDeleteResponse)(nil), // 25: volume_server_pb.VolumeDeleteResponse + (*VolumeMarkReadonlyRequest)(nil), // 26: volume_server_pb.VolumeMarkReadonlyRequest + (*VolumeMarkReadonlyResponse)(nil), // 27: volume_server_pb.VolumeMarkReadonlyResponse + (*VolumeMarkWritableRequest)(nil), // 28: volume_server_pb.VolumeMarkWritableRequest + (*VolumeMarkWritableResponse)(nil), // 29: volume_server_pb.VolumeMarkWritableResponse + (*VolumeConfigureRequest)(nil), // 30: volume_server_pb.VolumeConfigureRequest + (*VolumeConfigureResponse)(nil), // 31: volume_server_pb.VolumeConfigureResponse + (*VolumeStatusRequest)(nil), // 32: volume_server_pb.VolumeStatusRequest + (*VolumeStatusResponse)(nil), // 33: volume_server_pb.VolumeStatusResponse + (*VolumeCopyRequest)(nil), // 34: volume_server_pb.VolumeCopyRequest + (*VolumeCopyResponse)(nil), // 35: volume_server_pb.VolumeCopyResponse + (*CopyFileRequest)(nil), // 36: volume_server_pb.CopyFileRequest + (*CopyFileResponse)(nil), // 37: volume_server_pb.CopyFileResponse + (*ReadNeedleBlobRequest)(nil), // 38: volume_server_pb.ReadNeedleBlobRequest + (*ReadNeedleBlobResponse)(nil), // 39: volume_server_pb.ReadNeedleBlobResponse + (*WriteNeedleBlobRequest)(nil), // 40: volume_server_pb.WriteNeedleBlobRequest + (*WriteNeedleBlobResponse)(nil), // 41: volume_server_pb.WriteNeedleBlobResponse + (*VolumeTailSenderRequest)(nil), // 42: volume_server_pb.VolumeTailSenderRequest + (*VolumeTailSenderResponse)(nil), // 43: volume_server_pb.VolumeTailSenderResponse + (*VolumeTailReceiverRequest)(nil), // 44: volume_server_pb.VolumeTailReceiverRequest + (*VolumeTailReceiverResponse)(nil), // 45: volume_server_pb.VolumeTailReceiverResponse + (*VolumeEcShardsGenerateRequest)(nil), // 46: volume_server_pb.VolumeEcShardsGenerateRequest + (*VolumeEcShardsGenerateResponse)(nil), // 47: volume_server_pb.VolumeEcShardsGenerateResponse + (*VolumeEcShardsRebuildRequest)(nil), // 48: volume_server_pb.VolumeEcShardsRebuildRequest + (*VolumeEcShardsRebuildResponse)(nil), // 49: volume_server_pb.VolumeEcShardsRebuildResponse + (*VolumeEcShardsCopyRequest)(nil), // 50: volume_server_pb.VolumeEcShardsCopyRequest + (*VolumeEcShardsCopyResponse)(nil), // 51: volume_server_pb.VolumeEcShardsCopyResponse + (*VolumeEcShardsDeleteRequest)(nil), // 52: volume_server_pb.VolumeEcShardsDeleteRequest + (*VolumeEcShardsDeleteResponse)(nil), // 53: volume_server_pb.VolumeEcShardsDeleteResponse + (*VolumeEcShardsMountRequest)(nil), // 54: volume_server_pb.VolumeEcShardsMountRequest + (*VolumeEcShardsMountResponse)(nil), // 55: volume_server_pb.VolumeEcShardsMountResponse + (*VolumeEcShardsUnmountRequest)(nil), // 56: volume_server_pb.VolumeEcShardsUnmountRequest + (*VolumeEcShardsUnmountResponse)(nil), // 57: volume_server_pb.VolumeEcShardsUnmountResponse + (*VolumeEcShardReadRequest)(nil), // 58: volume_server_pb.VolumeEcShardReadRequest + (*VolumeEcShardReadResponse)(nil), // 59: volume_server_pb.VolumeEcShardReadResponse + (*VolumeEcBlobDeleteRequest)(nil), // 60: volume_server_pb.VolumeEcBlobDeleteRequest + (*VolumeEcBlobDeleteResponse)(nil), // 61: volume_server_pb.VolumeEcBlobDeleteResponse + (*VolumeEcShardsToVolumeRequest)(nil), // 62: volume_server_pb.VolumeEcShardsToVolumeRequest + (*VolumeEcShardsToVolumeResponse)(nil), // 63: volume_server_pb.VolumeEcShardsToVolumeResponse + (*ReadVolumeFileStatusRequest)(nil), // 64: volume_server_pb.ReadVolumeFileStatusRequest + (*ReadVolumeFileStatusResponse)(nil), // 65: volume_server_pb.ReadVolumeFileStatusResponse + (*DiskStatus)(nil), // 66: volume_server_pb.DiskStatus + (*MemStatus)(nil), // 67: volume_server_pb.MemStatus + (*RemoteFile)(nil), // 68: volume_server_pb.RemoteFile + (*VolumeInfo)(nil), // 69: volume_server_pb.VolumeInfo + (*VolumeTierMoveDatToRemoteRequest)(nil), // 70: volume_server_pb.VolumeTierMoveDatToRemoteRequest + (*VolumeTierMoveDatToRemoteResponse)(nil), // 71: volume_server_pb.VolumeTierMoveDatToRemoteResponse + (*VolumeTierMoveDatFromRemoteRequest)(nil), // 72: volume_server_pb.VolumeTierMoveDatFromRemoteRequest + (*VolumeTierMoveDatFromRemoteResponse)(nil), // 73: volume_server_pb.VolumeTierMoveDatFromRemoteResponse + (*VolumeServerStatusRequest)(nil), // 74: volume_server_pb.VolumeServerStatusRequest + (*VolumeServerStatusResponse)(nil), // 75: volume_server_pb.VolumeServerStatusResponse + (*VolumeServerLeaveRequest)(nil), // 76: volume_server_pb.VolumeServerLeaveRequest + (*VolumeServerLeaveResponse)(nil), // 77: volume_server_pb.VolumeServerLeaveResponse + (*QueryRequest)(nil), // 78: volume_server_pb.QueryRequest + (*QueriedStripe)(nil), // 79: volume_server_pb.QueriedStripe + (*VolumeNeedleStatusRequest)(nil), // 80: volume_server_pb.VolumeNeedleStatusRequest + (*VolumeNeedleStatusResponse)(nil), // 81: volume_server_pb.VolumeNeedleStatusResponse + (*QueryRequest_Filter)(nil), // 82: volume_server_pb.QueryRequest.Filter + (*QueryRequest_InputSerialization)(nil), // 83: volume_server_pb.QueryRequest.InputSerialization + (*QueryRequest_OutputSerialization)(nil), // 84: volume_server_pb.QueryRequest.OutputSerialization + (*QueryRequest_InputSerialization_CSVInput)(nil), // 85: volume_server_pb.QueryRequest.InputSerialization.CSVInput + (*QueryRequest_InputSerialization_JSONInput)(nil), // 86: volume_server_pb.QueryRequest.InputSerialization.JSONInput + (*QueryRequest_InputSerialization_ParquetInput)(nil), // 87: volume_server_pb.QueryRequest.InputSerialization.ParquetInput + (*QueryRequest_OutputSerialization_CSVOutput)(nil), // 88: volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + (*QueryRequest_OutputSerialization_JSONOutput)(nil), // 89: volume_server_pb.QueryRequest.OutputSerialization.JSONOutput +} +var file_volume_server_proto_depIdxs = []int32{ + 2, // 0: volume_server_pb.BatchDeleteResponse.results:type_name -> volume_server_pb.DeleteResult + 68, // 1: volume_server_pb.VolumeInfo.files:type_name -> volume_server_pb.RemoteFile + 66, // 2: volume_server_pb.VolumeServerStatusResponse.disk_statuses:type_name -> volume_server_pb.DiskStatus + 67, // 3: volume_server_pb.VolumeServerStatusResponse.memory_status:type_name -> volume_server_pb.MemStatus + 82, // 4: volume_server_pb.QueryRequest.filter:type_name -> volume_server_pb.QueryRequest.Filter + 83, // 5: volume_server_pb.QueryRequest.input_serialization:type_name -> volume_server_pb.QueryRequest.InputSerialization + 84, // 6: volume_server_pb.QueryRequest.output_serialization:type_name -> volume_server_pb.QueryRequest.OutputSerialization + 85, // 7: volume_server_pb.QueryRequest.InputSerialization.csv_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.CSVInput + 86, // 8: volume_server_pb.QueryRequest.InputSerialization.json_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.JSONInput + 87, // 9: volume_server_pb.QueryRequest.InputSerialization.parquet_input:type_name -> volume_server_pb.QueryRequest.InputSerialization.ParquetInput + 88, // 10: volume_server_pb.QueryRequest.OutputSerialization.csv_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.CSVOutput + 89, // 11: volume_server_pb.QueryRequest.OutputSerialization.json_output:type_name -> volume_server_pb.QueryRequest.OutputSerialization.JSONOutput + 0, // 12: volume_server_pb.VolumeServer.BatchDelete:input_type -> volume_server_pb.BatchDeleteRequest + 4, // 13: volume_server_pb.VolumeServer.VacuumVolumeCheck:input_type -> volume_server_pb.VacuumVolumeCheckRequest + 6, // 14: volume_server_pb.VolumeServer.VacuumVolumeCompact:input_type -> volume_server_pb.VacuumVolumeCompactRequest + 8, // 15: volume_server_pb.VolumeServer.VacuumVolumeCommit:input_type -> volume_server_pb.VacuumVolumeCommitRequest + 10, // 16: volume_server_pb.VolumeServer.VacuumVolumeCleanup:input_type -> volume_server_pb.VacuumVolumeCleanupRequest + 12, // 17: volume_server_pb.VolumeServer.DeleteCollection:input_type -> volume_server_pb.DeleteCollectionRequest + 14, // 18: volume_server_pb.VolumeServer.AllocateVolume:input_type -> volume_server_pb.AllocateVolumeRequest + 16, // 19: volume_server_pb.VolumeServer.VolumeSyncStatus:input_type -> volume_server_pb.VolumeSyncStatusRequest + 18, // 20: volume_server_pb.VolumeServer.VolumeIncrementalCopy:input_type -> volume_server_pb.VolumeIncrementalCopyRequest + 20, // 21: volume_server_pb.VolumeServer.VolumeMount:input_type -> volume_server_pb.VolumeMountRequest + 22, // 22: volume_server_pb.VolumeServer.VolumeUnmount:input_type -> volume_server_pb.VolumeUnmountRequest + 24, // 23: volume_server_pb.VolumeServer.VolumeDelete:input_type -> volume_server_pb.VolumeDeleteRequest + 26, // 24: volume_server_pb.VolumeServer.VolumeMarkReadonly:input_type -> volume_server_pb.VolumeMarkReadonlyRequest + 28, // 25: volume_server_pb.VolumeServer.VolumeMarkWritable:input_type -> volume_server_pb.VolumeMarkWritableRequest + 30, // 26: volume_server_pb.VolumeServer.VolumeConfigure:input_type -> volume_server_pb.VolumeConfigureRequest + 32, // 27: volume_server_pb.VolumeServer.VolumeStatus:input_type -> volume_server_pb.VolumeStatusRequest + 34, // 28: volume_server_pb.VolumeServer.VolumeCopy:input_type -> volume_server_pb.VolumeCopyRequest + 64, // 29: volume_server_pb.VolumeServer.ReadVolumeFileStatus:input_type -> volume_server_pb.ReadVolumeFileStatusRequest + 36, // 30: volume_server_pb.VolumeServer.CopyFile:input_type -> volume_server_pb.CopyFileRequest + 38, // 31: volume_server_pb.VolumeServer.ReadNeedleBlob:input_type -> volume_server_pb.ReadNeedleBlobRequest + 40, // 32: volume_server_pb.VolumeServer.WriteNeedleBlob:input_type -> volume_server_pb.WriteNeedleBlobRequest + 42, // 33: volume_server_pb.VolumeServer.VolumeTailSender:input_type -> volume_server_pb.VolumeTailSenderRequest + 44, // 34: volume_server_pb.VolumeServer.VolumeTailReceiver:input_type -> volume_server_pb.VolumeTailReceiverRequest + 46, // 35: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:input_type -> volume_server_pb.VolumeEcShardsGenerateRequest + 48, // 36: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:input_type -> volume_server_pb.VolumeEcShardsRebuildRequest + 50, // 37: volume_server_pb.VolumeServer.VolumeEcShardsCopy:input_type -> volume_server_pb.VolumeEcShardsCopyRequest + 52, // 38: volume_server_pb.VolumeServer.VolumeEcShardsDelete:input_type -> volume_server_pb.VolumeEcShardsDeleteRequest + 54, // 39: volume_server_pb.VolumeServer.VolumeEcShardsMount:input_type -> volume_server_pb.VolumeEcShardsMountRequest + 56, // 40: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:input_type -> volume_server_pb.VolumeEcShardsUnmountRequest + 58, // 41: volume_server_pb.VolumeServer.VolumeEcShardRead:input_type -> volume_server_pb.VolumeEcShardReadRequest + 60, // 42: volume_server_pb.VolumeServer.VolumeEcBlobDelete:input_type -> volume_server_pb.VolumeEcBlobDeleteRequest + 62, // 43: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:input_type -> volume_server_pb.VolumeEcShardsToVolumeRequest + 70, // 44: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:input_type -> volume_server_pb.VolumeTierMoveDatToRemoteRequest + 72, // 45: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:input_type -> volume_server_pb.VolumeTierMoveDatFromRemoteRequest + 74, // 46: volume_server_pb.VolumeServer.VolumeServerStatus:input_type -> volume_server_pb.VolumeServerStatusRequest + 76, // 47: volume_server_pb.VolumeServer.VolumeServerLeave:input_type -> volume_server_pb.VolumeServerLeaveRequest + 78, // 48: volume_server_pb.VolumeServer.Query:input_type -> volume_server_pb.QueryRequest + 80, // 49: volume_server_pb.VolumeServer.VolumeNeedleStatus:input_type -> volume_server_pb.VolumeNeedleStatusRequest + 1, // 50: volume_server_pb.VolumeServer.BatchDelete:output_type -> volume_server_pb.BatchDeleteResponse + 5, // 51: volume_server_pb.VolumeServer.VacuumVolumeCheck:output_type -> volume_server_pb.VacuumVolumeCheckResponse + 7, // 52: volume_server_pb.VolumeServer.VacuumVolumeCompact:output_type -> volume_server_pb.VacuumVolumeCompactResponse + 9, // 53: volume_server_pb.VolumeServer.VacuumVolumeCommit:output_type -> volume_server_pb.VacuumVolumeCommitResponse + 11, // 54: volume_server_pb.VolumeServer.VacuumVolumeCleanup:output_type -> volume_server_pb.VacuumVolumeCleanupResponse + 13, // 55: volume_server_pb.VolumeServer.DeleteCollection:output_type -> volume_server_pb.DeleteCollectionResponse + 15, // 56: volume_server_pb.VolumeServer.AllocateVolume:output_type -> volume_server_pb.AllocateVolumeResponse + 17, // 57: volume_server_pb.VolumeServer.VolumeSyncStatus:output_type -> volume_server_pb.VolumeSyncStatusResponse + 19, // 58: volume_server_pb.VolumeServer.VolumeIncrementalCopy:output_type -> volume_server_pb.VolumeIncrementalCopyResponse + 21, // 59: volume_server_pb.VolumeServer.VolumeMount:output_type -> volume_server_pb.VolumeMountResponse + 23, // 60: volume_server_pb.VolumeServer.VolumeUnmount:output_type -> volume_server_pb.VolumeUnmountResponse + 25, // 61: volume_server_pb.VolumeServer.VolumeDelete:output_type -> volume_server_pb.VolumeDeleteResponse + 27, // 62: volume_server_pb.VolumeServer.VolumeMarkReadonly:output_type -> volume_server_pb.VolumeMarkReadonlyResponse + 29, // 63: volume_server_pb.VolumeServer.VolumeMarkWritable:output_type -> volume_server_pb.VolumeMarkWritableResponse + 31, // 64: volume_server_pb.VolumeServer.VolumeConfigure:output_type -> volume_server_pb.VolumeConfigureResponse + 33, // 65: volume_server_pb.VolumeServer.VolumeStatus:output_type -> volume_server_pb.VolumeStatusResponse + 35, // 66: volume_server_pb.VolumeServer.VolumeCopy:output_type -> volume_server_pb.VolumeCopyResponse + 65, // 67: volume_server_pb.VolumeServer.ReadVolumeFileStatus:output_type -> volume_server_pb.ReadVolumeFileStatusResponse + 37, // 68: volume_server_pb.VolumeServer.CopyFile:output_type -> volume_server_pb.CopyFileResponse + 39, // 69: volume_server_pb.VolumeServer.ReadNeedleBlob:output_type -> volume_server_pb.ReadNeedleBlobResponse + 41, // 70: volume_server_pb.VolumeServer.WriteNeedleBlob:output_type -> volume_server_pb.WriteNeedleBlobResponse + 43, // 71: volume_server_pb.VolumeServer.VolumeTailSender:output_type -> volume_server_pb.VolumeTailSenderResponse + 45, // 72: volume_server_pb.VolumeServer.VolumeTailReceiver:output_type -> volume_server_pb.VolumeTailReceiverResponse + 47, // 73: volume_server_pb.VolumeServer.VolumeEcShardsGenerate:output_type -> volume_server_pb.VolumeEcShardsGenerateResponse + 49, // 74: volume_server_pb.VolumeServer.VolumeEcShardsRebuild:output_type -> volume_server_pb.VolumeEcShardsRebuildResponse + 51, // 75: volume_server_pb.VolumeServer.VolumeEcShardsCopy:output_type -> volume_server_pb.VolumeEcShardsCopyResponse + 53, // 76: volume_server_pb.VolumeServer.VolumeEcShardsDelete:output_type -> volume_server_pb.VolumeEcShardsDeleteResponse + 55, // 77: volume_server_pb.VolumeServer.VolumeEcShardsMount:output_type -> volume_server_pb.VolumeEcShardsMountResponse + 57, // 78: volume_server_pb.VolumeServer.VolumeEcShardsUnmount:output_type -> volume_server_pb.VolumeEcShardsUnmountResponse + 59, // 79: volume_server_pb.VolumeServer.VolumeEcShardRead:output_type -> volume_server_pb.VolumeEcShardReadResponse + 61, // 80: volume_server_pb.VolumeServer.VolumeEcBlobDelete:output_type -> volume_server_pb.VolumeEcBlobDeleteResponse + 63, // 81: volume_server_pb.VolumeServer.VolumeEcShardsToVolume:output_type -> volume_server_pb.VolumeEcShardsToVolumeResponse + 71, // 82: volume_server_pb.VolumeServer.VolumeTierMoveDatToRemote:output_type -> volume_server_pb.VolumeTierMoveDatToRemoteResponse + 73, // 83: volume_server_pb.VolumeServer.VolumeTierMoveDatFromRemote:output_type -> volume_server_pb.VolumeTierMoveDatFromRemoteResponse + 75, // 84: volume_server_pb.VolumeServer.VolumeServerStatus:output_type -> volume_server_pb.VolumeServerStatusResponse + 77, // 85: volume_server_pb.VolumeServer.VolumeServerLeave:output_type -> volume_server_pb.VolumeServerLeaveResponse + 79, // 86: volume_server_pb.VolumeServer.Query:output_type -> volume_server_pb.QueriedStripe + 81, // 87: volume_server_pb.VolumeServer.VolumeNeedleStatus:output_type -> volume_server_pb.VolumeNeedleStatusResponse + 50, // [50:88] is the sub-list for method output_type + 12, // [12:50] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_volume_server_proto_init() } +func file_volume_server_proto_init() { + if File_volume_server_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_volume_server_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCompactRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCompactResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCommitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCommitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCleanupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VacuumVolumeCleanupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCollectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocateVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AllocateVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeSyncStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeSyncStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeIncrementalCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeIncrementalCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeUnmountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeUnmountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkReadonlyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkReadonlyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkWritableRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeMarkWritableResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeConfigureRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeConfigureResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyFileRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CopyFileResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadNeedleBlobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadNeedleBlobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteNeedleBlobRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteNeedleBlobResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailSenderRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailSenderResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailReceiverRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTailReceiverResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsGenerateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsGenerateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsRebuildRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsRebuildResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsCopyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsCopyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsMountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsMountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsUnmountRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsUnmountResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardReadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardReadResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcBlobDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcBlobDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsToVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeEcShardsToVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVolumeFileStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVolumeFileStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiskStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoteFile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatToRemoteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatToRemoteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatFromRemoteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeTierMoveDatFromRemoteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerLeaveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeServerLeaveResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueriedStripe); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeNeedleStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeNeedleStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_CSVInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_JSONInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_InputSerialization_ParquetInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization_CSVOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_volume_server_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryRequest_OutputSerialization_JSONOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_volume_server_proto_rawDesc, + NumEnums: 0, + NumMessages: 90, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_volume_server_proto_goTypes, + DependencyIndexes: file_volume_server_proto_depIdxs, + MessageInfos: file_volume_server_proto_msgTypes, + }.Build() + File_volume_server_proto = out.File + file_volume_server_proto_rawDesc = nil + file_volume_server_proto_goTypes = nil + file_volume_server_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for VolumeServer service +const _ = grpc.SupportPackageIsVersion6 +// VolumeServerClient is the client API for VolumeServer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type VolumeServerClient interface { - // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. + //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) @@ -2122,10 +7270,15 @@ type VolumeServerClient interface { VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) + VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error) + VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) + VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) + ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) + WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) // erasure coding @@ -2141,21 +7294,24 @@ type VolumeServerClient interface { // tiered storage VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) - // query + VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) + VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) + // query Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) + VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) } type volumeServerClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewVolumeServerClient(cc *grpc.ClientConn) VolumeServerClient { +func NewVolumeServerClient(cc grpc.ClientConnInterface) VolumeServerClient { return &volumeServerClient{cc} } func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteRequest, opts ...grpc.CallOption) (*BatchDeleteResponse, error) { out := new(BatchDeleteResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/BatchDelete", in, out, opts...) if err != nil { return nil, err } @@ -2164,7 +7320,7 @@ func (c *volumeServerClient) BatchDelete(ctx context.Context, in *BatchDeleteReq func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVolumeCheckRequest, opts ...grpc.CallOption) (*VacuumVolumeCheckResponse, error) { out := new(VacuumVolumeCheckResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCheck", in, out, opts...) if err != nil { return nil, err } @@ -2173,7 +7329,7 @@ func (c *volumeServerClient) VacuumVolumeCheck(ctx context.Context, in *VacuumVo func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *VacuumVolumeCompactRequest, opts ...grpc.CallOption) (*VacuumVolumeCompactResponse, error) { out := new(VacuumVolumeCompactResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCompact", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCompact", in, out, opts...) if err != nil { return nil, err } @@ -2182,7 +7338,7 @@ func (c *volumeServerClient) VacuumVolumeCompact(ctx context.Context, in *Vacuum func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumVolumeCommitRequest, opts ...grpc.CallOption) (*VacuumVolumeCommitResponse, error) { out := new(VacuumVolumeCommitResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCommit", in, out, opts...) if err != nil { return nil, err } @@ -2191,7 +7347,7 @@ func (c *volumeServerClient) VacuumVolumeCommit(ctx context.Context, in *VacuumV func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *VacuumVolumeCleanupRequest, opts ...grpc.CallOption) (*VacuumVolumeCleanupResponse, error) { out := new(VacuumVolumeCleanupResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VacuumVolumeCleanup", in, out, opts...) if err != nil { return nil, err } @@ -2200,7 +7356,7 @@ func (c *volumeServerClient) VacuumVolumeCleanup(ctx context.Context, in *Vacuum func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCollectionRequest, opts ...grpc.CallOption) (*DeleteCollectionResponse, error) { out := new(DeleteCollectionResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/DeleteCollection", in, out, opts...) if err != nil { return nil, err } @@ -2209,7 +7365,7 @@ func (c *volumeServerClient) DeleteCollection(ctx context.Context, in *DeleteCol func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVolumeRequest, opts ...grpc.CallOption) (*AllocateVolumeResponse, error) { out := new(AllocateVolumeResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/AllocateVolume", in, out, opts...) if err != nil { return nil, err } @@ -2218,7 +7374,7 @@ func (c *volumeServerClient) AllocateVolume(ctx context.Context, in *AllocateVol func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyncStatusRequest, opts ...grpc.CallOption) (*VolumeSyncStatusResponse, error) { out := new(VolumeSyncStatusResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeSyncStatus", in, out, opts...) if err != nil { return nil, err } @@ -2226,7 +7382,7 @@ func (c *volumeServerClient) VolumeSyncStatus(ctx context.Context, in *VolumeSyn } func (c *volumeServerClient) VolumeIncrementalCopy(ctx context.Context, in *VolumeIncrementalCopyRequest, opts ...grpc.CallOption) (VolumeServer_VolumeIncrementalCopyClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[0], c.cc, "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[0], "/volume_server_pb.VolumeServer/VolumeIncrementalCopy", opts...) if err != nil { return nil, err } @@ -2259,7 +7415,7 @@ func (x *volumeServerVolumeIncrementalCopyClient) Recv() (*VolumeIncrementalCopy func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountRequest, opts ...grpc.CallOption) (*VolumeMountResponse, error) { out := new(VolumeMountResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMount", in, out, opts...) if err != nil { return nil, err } @@ -2268,7 +7424,7 @@ func (c *volumeServerClient) VolumeMount(ctx context.Context, in *VolumeMountReq func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmountRequest, opts ...grpc.CallOption) (*VolumeUnmountResponse, error) { out := new(VolumeUnmountResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeUnmount", in, out, opts...) if err != nil { return nil, err } @@ -2277,7 +7433,7 @@ func (c *volumeServerClient) VolumeUnmount(ctx context.Context, in *VolumeUnmoun func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteRequest, opts ...grpc.CallOption) (*VolumeDeleteResponse, error) { out := new(VolumeDeleteResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeDelete", in, out, opts...) if err != nil { return nil, err } @@ -2286,7 +7442,34 @@ func (c *volumeServerClient) VolumeDelete(ctx context.Context, in *VolumeDeleteR func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeMarkReadonlyRequest, opts ...grpc.CallOption) (*VolumeMarkReadonlyResponse, error) { out := new(VolumeMarkReadonlyResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkReadonly", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeMarkWritable(ctx context.Context, in *VolumeMarkWritableRequest, opts ...grpc.CallOption) (*VolumeMarkWritableResponse, error) { + out := new(VolumeMarkWritableResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeMarkWritable", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeConfigure(ctx context.Context, in *VolumeConfigureRequest, opts ...grpc.CallOption) (*VolumeConfigureResponse, error) { + out := new(VolumeConfigureResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeConfigure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeStatus(ctx context.Context, in *VolumeStatusRequest, opts ...grpc.CallOption) (*VolumeStatusResponse, error) { + out := new(VolumeStatusResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeStatus", in, out, opts...) if err != nil { return nil, err } @@ -2295,7 +7478,7 @@ func (c *volumeServerClient) VolumeMarkReadonly(ctx context.Context, in *VolumeM func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyRequest, opts ...grpc.CallOption) (*VolumeCopyResponse, error) { out := new(VolumeCopyResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeCopy", in, out, opts...) if err != nil { return nil, err } @@ -2304,7 +7487,7 @@ func (c *volumeServerClient) VolumeCopy(ctx context.Context, in *VolumeCopyReque func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadVolumeFileStatusRequest, opts ...grpc.CallOption) (*ReadVolumeFileStatusResponse, error) { out := new(ReadVolumeFileStatusResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadVolumeFileStatus", in, out, opts...) if err != nil { return nil, err } @@ -2312,7 +7495,7 @@ func (c *volumeServerClient) ReadVolumeFileStatus(ctx context.Context, in *ReadV } func (c *volumeServerClient) CopyFile(ctx context.Context, in *CopyFileRequest, opts ...grpc.CallOption) (VolumeServer_CopyFileClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[1], c.cc, "/volume_server_pb.VolumeServer/CopyFile", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[1], "/volume_server_pb.VolumeServer/CopyFile", opts...) if err != nil { return nil, err } @@ -2343,8 +7526,26 @@ func (x *volumeServerCopyFileClient) Recv() (*CopyFileResponse, error) { return m, nil } +func (c *volumeServerClient) ReadNeedleBlob(ctx context.Context, in *ReadNeedleBlobRequest, opts ...grpc.CallOption) (*ReadNeedleBlobResponse, error) { + out := new(ReadNeedleBlobResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/ReadNeedleBlob", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) WriteNeedleBlob(ctx context.Context, in *WriteNeedleBlobRequest, opts ...grpc.CallOption) (*WriteNeedleBlobResponse, error) { + out := new(WriteNeedleBlobResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/WriteNeedleBlob", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *volumeServerClient) VolumeTailSender(ctx context.Context, in *VolumeTailSenderRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTailSenderClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[2], c.cc, "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[2], "/volume_server_pb.VolumeServer/VolumeTailSender", opts...) if err != nil { return nil, err } @@ -2377,7 +7578,7 @@ func (x *volumeServerVolumeTailSenderClient) Recv() (*VolumeTailSenderResponse, func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeTailReceiverRequest, opts ...grpc.CallOption) (*VolumeTailReceiverResponse, error) { out := new(VolumeTailReceiverResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeTailReceiver", in, out, opts...) if err != nil { return nil, err } @@ -2386,7 +7587,7 @@ func (c *volumeServerClient) VolumeTailReceiver(ctx context.Context, in *VolumeT func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *VolumeEcShardsGenerateRequest, opts ...grpc.CallOption) (*VolumeEcShardsGenerateResponse, error) { out := new(VolumeEcShardsGenerateResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsGenerate", in, out, opts...) if err != nil { return nil, err } @@ -2395,7 +7596,7 @@ func (c *volumeServerClient) VolumeEcShardsGenerate(ctx context.Context, in *Vol func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *VolumeEcShardsRebuildRequest, opts ...grpc.CallOption) (*VolumeEcShardsRebuildResponse, error) { out := new(VolumeEcShardsRebuildResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsRebuild", in, out, opts...) if err != nil { return nil, err } @@ -2404,7 +7605,7 @@ func (c *volumeServerClient) VolumeEcShardsRebuild(ctx context.Context, in *Volu func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeEcShardsCopyRequest, opts ...grpc.CallOption) (*VolumeEcShardsCopyResponse, error) { out := new(VolumeEcShardsCopyResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsCopy", in, out, opts...) if err != nil { return nil, err } @@ -2413,7 +7614,7 @@ func (c *volumeServerClient) VolumeEcShardsCopy(ctx context.Context, in *VolumeE func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *VolumeEcShardsDeleteRequest, opts ...grpc.CallOption) (*VolumeEcShardsDeleteResponse, error) { out := new(VolumeEcShardsDeleteResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsDelete", in, out, opts...) if err != nil { return nil, err } @@ -2422,7 +7623,7 @@ func (c *volumeServerClient) VolumeEcShardsDelete(ctx context.Context, in *Volum func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *VolumeEcShardsMountRequest, opts ...grpc.CallOption) (*VolumeEcShardsMountResponse, error) { out := new(VolumeEcShardsMountResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsMount", in, out, opts...) if err != nil { return nil, err } @@ -2431,7 +7632,7 @@ func (c *volumeServerClient) VolumeEcShardsMount(ctx context.Context, in *Volume func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *VolumeEcShardsUnmountRequest, opts ...grpc.CallOption) (*VolumeEcShardsUnmountResponse, error) { out := new(VolumeEcShardsUnmountResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsUnmount", in, out, opts...) if err != nil { return nil, err } @@ -2439,7 +7640,7 @@ func (c *volumeServerClient) VolumeEcShardsUnmount(ctx context.Context, in *Volu } func (c *volumeServerClient) VolumeEcShardRead(ctx context.Context, in *VolumeEcShardReadRequest, opts ...grpc.CallOption) (VolumeServer_VolumeEcShardReadClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[3], c.cc, "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[3], "/volume_server_pb.VolumeServer/VolumeEcShardRead", opts...) if err != nil { return nil, err } @@ -2472,7 +7673,7 @@ func (x *volumeServerVolumeEcShardReadClient) Recv() (*VolumeEcShardReadResponse func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeEcBlobDeleteRequest, opts ...grpc.CallOption) (*VolumeEcBlobDeleteResponse, error) { out := new(VolumeEcBlobDeleteResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcBlobDelete", in, out, opts...) if err != nil { return nil, err } @@ -2481,7 +7682,7 @@ func (c *volumeServerClient) VolumeEcBlobDelete(ctx context.Context, in *VolumeE func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *VolumeEcShardsToVolumeRequest, opts ...grpc.CallOption) (*VolumeEcShardsToVolumeResponse, error) { out := new(VolumeEcShardsToVolumeResponse) - err := grpc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeEcShardsToVolume", in, out, opts...) if err != nil { return nil, err } @@ -2489,7 +7690,7 @@ func (c *volumeServerClient) VolumeEcShardsToVolume(ctx context.Context, in *Vol } func (c *volumeServerClient) VolumeTierMoveDatToRemote(ctx context.Context, in *VolumeTierMoveDatToRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatToRemoteClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[4], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[4], "/volume_server_pb.VolumeServer/VolumeTierMoveDatToRemote", opts...) if err != nil { return nil, err } @@ -2521,7 +7722,7 @@ func (x *volumeServerVolumeTierMoveDatToRemoteClient) Recv() (*VolumeTierMoveDat } func (c *volumeServerClient) VolumeTierMoveDatFromRemote(ctx context.Context, in *VolumeTierMoveDatFromRemoteRequest, opts ...grpc.CallOption) (VolumeServer_VolumeTierMoveDatFromRemoteClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[5], c.cc, "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[5], "/volume_server_pb.VolumeServer/VolumeTierMoveDatFromRemote", opts...) if err != nil { return nil, err } @@ -2552,8 +7753,26 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteClient) Recv() (*VolumeTierMoveD return m, nil } +func (c *volumeServerClient) VolumeServerStatus(ctx context.Context, in *VolumeServerStatusRequest, opts ...grpc.CallOption) (*VolumeServerStatusResponse, error) { + out := new(VolumeServerStatusResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *volumeServerClient) VolumeServerLeave(ctx context.Context, in *VolumeServerLeaveRequest, opts ...grpc.CallOption) (*VolumeServerLeaveResponse, error) { + out := new(VolumeServerLeaveResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeServerLeave", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *volumeServerClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (VolumeServer_QueryClient, error) { - stream, err := grpc.NewClientStream(ctx, &_VolumeServer_serviceDesc.Streams[6], c.cc, "/volume_server_pb.VolumeServer/Query", opts...) + stream, err := c.cc.NewStream(ctx, &_VolumeServer_serviceDesc.Streams[6], "/volume_server_pb.VolumeServer/Query", opts...) if err != nil { return nil, err } @@ -2584,10 +7803,18 @@ func (x *volumeServerQueryClient) Recv() (*QueriedStripe, error) { return m, nil } -// Server API for VolumeServer service +func (c *volumeServerClient) VolumeNeedleStatus(ctx context.Context, in *VolumeNeedleStatusRequest, opts ...grpc.CallOption) (*VolumeNeedleStatusResponse, error) { + out := new(VolumeNeedleStatusResponse) + err := c.cc.Invoke(ctx, "/volume_server_pb.VolumeServer/VolumeNeedleStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} +// VolumeServerServer is the server API for VolumeServer service. type VolumeServerServer interface { - // Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. + //Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas. BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) @@ -2601,10 +7828,15 @@ type VolumeServerServer interface { VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) + VolumeMarkWritable(context.Context, *VolumeMarkWritableRequest) (*VolumeMarkWritableResponse, error) + VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) + VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) // copy the .idx .dat files, and mount this volume VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error + ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) + WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) // erasure coding @@ -2620,8 +7852,130 @@ type VolumeServerServer interface { // tiered storage VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error - // query + VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) + VolumeServerLeave(context.Context, *VolumeServerLeaveRequest) (*VolumeServerLeaveResponse, error) + // query Query(*QueryRequest, VolumeServer_QueryServer) error + VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) +} + +// UnimplementedVolumeServerServer can be embedded to have forward compatible implementations. +type UnimplementedVolumeServerServer struct { +} + +func (*UnimplementedVolumeServerServer) BatchDelete(context.Context, *BatchDeleteRequest) (*BatchDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCheck(context.Context, *VacuumVolumeCheckRequest) (*VacuumVolumeCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCheck not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCompact(context.Context, *VacuumVolumeCompactRequest) (*VacuumVolumeCompactResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCompact not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCommit(context.Context, *VacuumVolumeCommitRequest) (*VacuumVolumeCommitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCommit not implemented") +} +func (*UnimplementedVolumeServerServer) VacuumVolumeCleanup(context.Context, *VacuumVolumeCleanupRequest) (*VacuumVolumeCleanupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VacuumVolumeCleanup not implemented") +} +func (*UnimplementedVolumeServerServer) DeleteCollection(context.Context, *DeleteCollectionRequest) (*DeleteCollectionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteCollection not implemented") +} +func (*UnimplementedVolumeServerServer) AllocateVolume(context.Context, *AllocateVolumeRequest) (*AllocateVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllocateVolume not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeSyncStatus(context.Context, *VolumeSyncStatusRequest) (*VolumeSyncStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeSyncStatus not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeIncrementalCopy(*VolumeIncrementalCopyRequest, VolumeServer_VolumeIncrementalCopyServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeIncrementalCopy not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeMount(context.Context, *VolumeMountRequest) (*VolumeMountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeMount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeUnmount(context.Context, *VolumeUnmountRequest) (*VolumeUnmountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeUnmount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeDelete(context.Context, *VolumeDeleteRequest) (*VolumeDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeMarkReadonly(context.Context, *VolumeMarkReadonlyRequest) (*VolumeMarkReadonlyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkReadonly not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeMarkWritable(context.Context, *VolumeMarkWritableRequest) (*VolumeMarkWritableResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeMarkWritable not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeConfigure(context.Context, *VolumeConfigureRequest) (*VolumeConfigureResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeConfigure not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeStatus(context.Context, *VolumeStatusRequest) (*VolumeStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeStatus not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeCopy(context.Context, *VolumeCopyRequest) (*VolumeCopyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeCopy not implemented") +} +func (*UnimplementedVolumeServerServer) ReadVolumeFileStatus(context.Context, *ReadVolumeFileStatusRequest) (*ReadVolumeFileStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadVolumeFileStatus not implemented") +} +func (*UnimplementedVolumeServerServer) CopyFile(*CopyFileRequest, VolumeServer_CopyFileServer) error { + return status.Errorf(codes.Unimplemented, "method CopyFile not implemented") +} +func (*UnimplementedVolumeServerServer) ReadNeedleBlob(context.Context, *ReadNeedleBlobRequest) (*ReadNeedleBlobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadNeedleBlob not implemented") +} +func (*UnimplementedVolumeServerServer) WriteNeedleBlob(context.Context, *WriteNeedleBlobRequest) (*WriteNeedleBlobResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WriteNeedleBlob not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTailSender(*VolumeTailSenderRequest, VolumeServer_VolumeTailSenderServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeTailSender not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTailReceiver(context.Context, *VolumeTailReceiverRequest) (*VolumeTailReceiverResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeTailReceiver not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsGenerate(context.Context, *VolumeEcShardsGenerateRequest) (*VolumeEcShardsGenerateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsGenerate not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsRebuild(context.Context, *VolumeEcShardsRebuildRequest) (*VolumeEcShardsRebuildResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsRebuild not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsCopy(context.Context, *VolumeEcShardsCopyRequest) (*VolumeEcShardsCopyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsCopy not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsDelete(context.Context, *VolumeEcShardsDeleteRequest) (*VolumeEcShardsDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsMount(context.Context, *VolumeEcShardsMountRequest) (*VolumeEcShardsMountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsMount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsUnmount(context.Context, *VolumeEcShardsUnmountRequest) (*VolumeEcShardsUnmountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsUnmount not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardRead(*VolumeEcShardReadRequest, VolumeServer_VolumeEcShardReadServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeEcShardRead not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcBlobDelete(context.Context, *VolumeEcBlobDeleteRequest) (*VolumeEcBlobDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcBlobDelete not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeEcShardsToVolume(context.Context, *VolumeEcShardsToVolumeRequest) (*VolumeEcShardsToVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeEcShardsToVolume not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTierMoveDatToRemote(*VolumeTierMoveDatToRemoteRequest, VolumeServer_VolumeTierMoveDatToRemoteServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatToRemote not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeTierMoveDatFromRemote(*VolumeTierMoveDatFromRemoteRequest, VolumeServer_VolumeTierMoveDatFromRemoteServer) error { + return status.Errorf(codes.Unimplemented, "method VolumeTierMoveDatFromRemote not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeServerStatus(context.Context, *VolumeServerStatusRequest) (*VolumeServerStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeServerStatus not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeServerLeave(context.Context, *VolumeServerLeaveRequest) (*VolumeServerLeaveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeServerLeave not implemented") +} +func (*UnimplementedVolumeServerServer) Query(*QueryRequest, VolumeServer_QueryServer) error { + return status.Errorf(codes.Unimplemented, "method Query not implemented") +} +func (*UnimplementedVolumeServerServer) VolumeNeedleStatus(context.Context, *VolumeNeedleStatusRequest) (*VolumeNeedleStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VolumeNeedleStatus not implemented") } func RegisterVolumeServerServer(s *grpc.Server, srv VolumeServerServer) { @@ -2865,6 +8219,60 @@ func _VolumeServer_VolumeMarkReadonly_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _VolumeServer_VolumeMarkWritable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeMarkWritableRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeMarkWritable(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeMarkWritable", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeMarkWritable(ctx, req.(*VolumeMarkWritableRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeConfigure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeConfigureRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeConfigure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeConfigure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeConfigure(ctx, req.(*VolumeConfigureRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeStatus(ctx, req.(*VolumeStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_VolumeCopy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(VolumeCopyRequest) if err := dec(in); err != nil { @@ -2922,6 +8330,42 @@ func (x *volumeServerCopyFileServer) Send(m *CopyFileResponse) error { return x.ServerStream.SendMsg(m) } +func _VolumeServer_ReadNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadNeedleBlobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).ReadNeedleBlob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/ReadNeedleBlob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).ReadNeedleBlob(ctx, req.(*ReadNeedleBlobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_WriteNeedleBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteNeedleBlobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).WriteNeedleBlob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/WriteNeedleBlob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).WriteNeedleBlob(ctx, req.(*WriteNeedleBlobRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_VolumeTailSender_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(VolumeTailSenderRequest) if err := stream.RecvMsg(m); err != nil { @@ -3168,6 +8612,42 @@ func (x *volumeServerVolumeTierMoveDatFromRemoteServer) Send(m *VolumeTierMoveDa return x.ServerStream.SendMsg(m) } +func _VolumeServer_VolumeServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeServerStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeServerStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeServerStatus(ctx, req.(*VolumeServerStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VolumeServer_VolumeServerLeave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeServerLeaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeServerLeave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeServerLeave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeServerLeave(ctx, req.(*VolumeServerLeaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VolumeServer_Query_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(QueryRequest) if err := stream.RecvMsg(m); err != nil { @@ -3189,6 +8669,24 @@ func (x *volumeServerQueryServer) Send(m *QueriedStripe) error { return x.ServerStream.SendMsg(m) } +func _VolumeServer_VolumeNeedleStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VolumeNeedleStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VolumeServerServer).VolumeNeedleStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/volume_server_pb.VolumeServer/VolumeNeedleStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VolumeServerServer).VolumeNeedleStatus(ctx, req.(*VolumeNeedleStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _VolumeServer_serviceDesc = grpc.ServiceDesc{ ServiceName: "volume_server_pb.VolumeServer", HandlerType: (*VolumeServerServer)(nil), @@ -3241,6 +8739,18 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeMarkReadonly", Handler: _VolumeServer_VolumeMarkReadonly_Handler, }, + { + MethodName: "VolumeMarkWritable", + Handler: _VolumeServer_VolumeMarkWritable_Handler, + }, + { + MethodName: "VolumeConfigure", + Handler: _VolumeServer_VolumeConfigure_Handler, + }, + { + MethodName: "VolumeStatus", + Handler: _VolumeServer_VolumeStatus_Handler, + }, { MethodName: "VolumeCopy", Handler: _VolumeServer_VolumeCopy_Handler, @@ -3249,6 +8759,14 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "ReadVolumeFileStatus", Handler: _VolumeServer_ReadVolumeFileStatus_Handler, }, + { + MethodName: "ReadNeedleBlob", + Handler: _VolumeServer_ReadNeedleBlob_Handler, + }, + { + MethodName: "WriteNeedleBlob", + Handler: _VolumeServer_WriteNeedleBlob_Handler, + }, { MethodName: "VolumeTailReceiver", Handler: _VolumeServer_VolumeTailReceiver_Handler, @@ -3285,6 +8803,18 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ MethodName: "VolumeEcShardsToVolume", Handler: _VolumeServer_VolumeEcShardsToVolume_Handler, }, + { + MethodName: "VolumeServerStatus", + Handler: _VolumeServer_VolumeServerStatus_Handler, + }, + { + MethodName: "VolumeServerLeave", + Handler: _VolumeServer_VolumeServerLeave_Handler, + }, + { + MethodName: "VolumeNeedleStatus", + Handler: _VolumeServer_VolumeNeedleStatus_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -3325,191 +8855,3 @@ var _VolumeServer_serviceDesc = grpc.ServiceDesc{ }, Metadata: "volume_server.proto", } - -func init() { proto.RegisterFile("volume_server.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 2905 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x3a, 0x4d, 0x73, 0xdc, 0xc6, - 0xb1, 0x5c, 0x2e, 0x3f, 0x76, 0x7b, 0x49, 0x91, 0x1a, 0xd2, 0xd4, 0x1a, 0xa2, 0x24, 0x1a, 0xf2, - 0x87, 0x24, 0x5b, 0x94, 0x4c, 0xdb, 0xcf, 0x7e, 0xf6, 0xb3, 0xdf, 0x93, 0x28, 0xe9, 0x45, 0xb1, - 0x45, 0xd9, 0xa0, 0xac, 0x38, 0xb6, 0x2b, 0xa8, 0x21, 0x30, 0x2b, 0xc2, 0x04, 0x30, 0x10, 0x30, - 0x4b, 0x6b, 0x55, 0xc9, 0xc9, 0x39, 0xa4, 0x2a, 0x95, 0x1c, 0x52, 0xb9, 0xe4, 0x9c, 0x7b, 0xae, - 0xf9, 0x03, 0x39, 0xf8, 0x0f, 0xa4, 0x2a, 0xa7, 0x5c, 0x72, 0xce, 0x21, 0xb7, 0x54, 0xe5, 0x92, - 0x9a, 0x2f, 0x2c, 0x3e, 0xb9, 0xa0, 0xc5, 0x54, 0x2a, 0xb7, 0x41, 0x4f, 0x7f, 0x4c, 0xf7, 0x74, - 0xf7, 0x4c, 0x4f, 0x03, 0x56, 0x0e, 0xa9, 0x3f, 0x0c, 0x88, 0x9d, 0x90, 0xf8, 0x90, 0xc4, 0x9b, - 0x51, 0x4c, 0x19, 0x45, 0xcb, 0x39, 0xa0, 0x1d, 0xed, 0x99, 0xd7, 0x00, 0xdd, 0xc4, 0xcc, 0xd9, - 0xbf, 0x45, 0x7c, 0xc2, 0x88, 0x45, 0x1e, 0x0f, 0x49, 0xc2, 0xd0, 0xf3, 0xd0, 0x19, 0x78, 0x3e, - 0xb1, 0x3d, 0x37, 0xe9, 0xb7, 0x36, 0xda, 0x97, 0xba, 0xd6, 0x3c, 0xff, 0xbe, 0xeb, 0x26, 0xe6, - 0x7d, 0x58, 0xc9, 0x11, 0x24, 0x11, 0x0d, 0x13, 0x82, 0xde, 0x81, 0xf9, 0x98, 0x24, 0x43, 0x9f, - 0x49, 0x82, 0xde, 0xd6, 0xf9, 0xcd, 0xa2, 0xac, 0xcd, 0x94, 0x64, 0xe8, 0x33, 0x4b, 0xa3, 0x9b, - 0xdf, 0xb4, 0x60, 0x21, 0x3b, 0x83, 0xce, 0xc0, 0xbc, 0x12, 0xde, 0x6f, 0x6d, 0xb4, 0x2e, 0x75, - 0xad, 0x39, 0x29, 0x1b, 0xad, 0xc1, 0x5c, 0xc2, 0x30, 0x1b, 0x26, 0xfd, 0xe9, 0x8d, 0xd6, 0xa5, - 0x59, 0x4b, 0x7d, 0xa1, 0x55, 0x98, 0x25, 0x71, 0x4c, 0xe3, 0x7e, 0x5b, 0xa0, 0xcb, 0x0f, 0x84, - 0x60, 0x26, 0xf1, 0x9e, 0x92, 0xfe, 0xcc, 0x46, 0xeb, 0xd2, 0xa2, 0x25, 0xc6, 0xa8, 0x0f, 0xf3, - 0x87, 0x24, 0x4e, 0x3c, 0x1a, 0xf6, 0x67, 0x05, 0x58, 0x7f, 0x9a, 0xf3, 0x30, 0x7b, 0x3b, 0x88, - 0xd8, 0xc8, 0x7c, 0x1b, 0xfa, 0x0f, 0xb1, 0x33, 0x1c, 0x06, 0x0f, 0xc5, 0xf2, 0xb7, 0xf7, 0x89, - 0x73, 0xa0, 0xcd, 0x72, 0x16, 0xba, 0x4a, 0x29, 0xb5, 0xb6, 0x45, 0xab, 0x23, 0x01, 0x77, 0x5d, - 0xf3, 0xff, 0xe0, 0xf9, 0x0a, 0x42, 0x65, 0x9e, 0x8b, 0xb0, 0xf8, 0x08, 0xc7, 0x7b, 0xf8, 0x11, - 0xb1, 0x63, 0xcc, 0x3c, 0x2a, 0xa8, 0x5b, 0xd6, 0x82, 0x02, 0x5a, 0x1c, 0x66, 0x7e, 0x01, 0x46, - 0x8e, 0x03, 0x0d, 0x22, 0xec, 0xb0, 0x26, 0xc2, 0xd1, 0x06, 0xf4, 0xa2, 0x98, 0x60, 0xdf, 0xa7, - 0x0e, 0x66, 0x44, 0xd8, 0xa7, 0x6d, 0x65, 0x41, 0xe6, 0x39, 0x38, 0x5b, 0xc9, 0x5c, 0x2e, 0xd0, - 0x7c, 0xa7, 0xb0, 0x7a, 0x1a, 0x04, 0x5e, 0x23, 0xd1, 0xe6, 0x7a, 0x69, 0xd5, 0x82, 0x52, 0xf1, - 0xfd, 0xef, 0xc2, 0xac, 0x4f, 0x70, 0x38, 0x8c, 0x1a, 0x31, 0x2e, 0xae, 0x58, 0x93, 0xa6, 0x9c, - 0xcf, 0x48, 0xb7, 0xd9, 0xa6, 0xbe, 0x4f, 0x1c, 0xe6, 0xd1, 0x50, 0xb3, 0x3d, 0x0f, 0xe0, 0xa4, - 0x40, 0xe5, 0x44, 0x19, 0x88, 0x69, 0x40, 0xbf, 0x4c, 0xaa, 0xd8, 0xfe, 0xb9, 0x05, 0xcf, 0xdd, - 0x50, 0x46, 0x93, 0x82, 0x1b, 0x6d, 0x40, 0x5e, 0xe4, 0x74, 0x51, 0x64, 0x71, 0x83, 0xda, 0xa5, - 0x0d, 0xe2, 0x18, 0x31, 0x89, 0x7c, 0xcf, 0xc1, 0x82, 0xc5, 0x8c, 0x60, 0x91, 0x05, 0xa1, 0x65, - 0x68, 0x33, 0xe6, 0x0b, 0xcf, 0xed, 0x5a, 0x7c, 0x88, 0xb6, 0x60, 0x2d, 0x20, 0x01, 0x8d, 0x47, - 0x76, 0x80, 0x23, 0x3b, 0xc0, 0x4f, 0x6c, 0xee, 0xe6, 0x76, 0xb0, 0xd7, 0x9f, 0x13, 0xeb, 0x43, - 0x72, 0xf6, 0x1e, 0x8e, 0xee, 0xe1, 0x27, 0xbb, 0xde, 0x53, 0x72, 0x6f, 0xcf, 0xec, 0xc3, 0x5a, - 0x51, 0x3f, 0xa5, 0xfa, 0x7f, 0xc1, 0x19, 0x09, 0xd9, 0x1d, 0x85, 0xce, 0xae, 0x88, 0xad, 0x46, - 0x1b, 0xf5, 0x8f, 0x16, 0xf4, 0xcb, 0x84, 0xca, 0xf3, 0x9f, 0xd5, 0x6a, 0xc7, 0xb6, 0xc9, 0x05, - 0xe8, 0x31, 0xec, 0xf9, 0x36, 0x1d, 0x0c, 0x12, 0xc2, 0x84, 0x21, 0x66, 0x2c, 0xe0, 0xa0, 0xfb, - 0x02, 0x82, 0x2e, 0xc3, 0xb2, 0x23, 0xbd, 0xdf, 0x8e, 0xc9, 0xa1, 0x27, 0xb2, 0xc1, 0xbc, 0x58, - 0xd8, 0x92, 0xa3, 0xa3, 0x42, 0x82, 0x91, 0x09, 0x8b, 0x9e, 0xfb, 0xc4, 0x16, 0xe9, 0x48, 0x24, - 0x93, 0x8e, 0xe0, 0xd6, 0xf3, 0xdc, 0x27, 0x77, 0x3c, 0x9f, 0x70, 0x8b, 0x9a, 0x0f, 0x61, 0x5d, - 0x2a, 0x7f, 0x37, 0x74, 0x62, 0x12, 0x90, 0x90, 0x61, 0x7f, 0x9b, 0x46, 0xa3, 0x46, 0x6e, 0xf3, - 0x3c, 0x74, 0x12, 0x2f, 0x74, 0x88, 0x1d, 0xca, 0xa4, 0x36, 0x63, 0xcd, 0x8b, 0xef, 0x9d, 0xc4, - 0xbc, 0x09, 0xe7, 0x6a, 0xf8, 0x2a, 0xcb, 0xbe, 0x00, 0x0b, 0x62, 0x61, 0x0e, 0x0d, 0x19, 0x09, - 0x99, 0xe0, 0xbd, 0x60, 0xf5, 0x38, 0x6c, 0x5b, 0x82, 0xcc, 0xd7, 0x01, 0x49, 0x1e, 0xf7, 0xe8, - 0x30, 0x6c, 0x16, 0xce, 0xcf, 0xc1, 0x4a, 0x8e, 0x44, 0xf9, 0xc6, 0x1b, 0xb0, 0x2a, 0xc1, 0x9f, - 0x86, 0x41, 0x63, 0x5e, 0x67, 0xe0, 0xb9, 0x02, 0x91, 0xe2, 0xb6, 0xa5, 0x85, 0xe4, 0x8f, 0x9d, - 0x23, 0x99, 0xad, 0xe9, 0x15, 0xe4, 0x4f, 0x1e, 0x91, 0xb9, 0xe4, 0x82, 0x71, 0x7c, 0x60, 0x11, - 0xec, 0xd2, 0xd0, 0x1f, 0x35, 0xce, 0x5c, 0x15, 0x94, 0x8a, 0xef, 0xef, 0x5a, 0x70, 0x5a, 0xa7, - 0xb4, 0x86, 0xbb, 0x79, 0x4c, 0x77, 0x6e, 0xd7, 0xba, 0xf3, 0xcc, 0xd8, 0x9d, 0x2f, 0xc1, 0x72, - 0x42, 0x87, 0xb1, 0x43, 0x6c, 0x17, 0x33, 0x6c, 0x87, 0xd4, 0x25, 0xca, 0xdb, 0x4f, 0x49, 0xf8, - 0x2d, 0xcc, 0xf0, 0x0e, 0x75, 0x89, 0xf9, 0xbf, 0x7a, 0xb3, 0x73, 0x5e, 0x72, 0x19, 0x4e, 0xfb, - 0x38, 0x61, 0x36, 0x8e, 0x22, 0x12, 0xba, 0x36, 0x66, 0xdc, 0xd5, 0x5a, 0xc2, 0xd5, 0x4e, 0xf1, - 0x89, 0x1b, 0x02, 0x7e, 0x83, 0xed, 0x24, 0xe6, 0xaf, 0xa7, 0x61, 0x89, 0xd3, 0x72, 0xd7, 0x6e, - 0xa4, 0xef, 0x32, 0xb4, 0xc9, 0x13, 0xa6, 0x14, 0xe5, 0x43, 0x74, 0x0d, 0x56, 0x54, 0x0c, 0x79, - 0x34, 0x1c, 0x87, 0x57, 0x5b, 0x66, 0xa3, 0xf1, 0x54, 0x1a, 0x61, 0x17, 0xa0, 0x97, 0x30, 0x1a, - 0xe9, 0x68, 0x9d, 0x91, 0xd1, 0xca, 0x41, 0x2a, 0x5a, 0xf3, 0x36, 0x9d, 0xad, 0xb0, 0xe9, 0x82, - 0x97, 0xd8, 0xc4, 0xb1, 0xe5, 0xaa, 0x44, 0xbc, 0x77, 0x2c, 0xf0, 0x92, 0xdb, 0x8e, 0xb4, 0x06, - 0xfa, 0x00, 0xd6, 0xbd, 0x47, 0x21, 0x8d, 0x89, 0xad, 0x0c, 0x29, 0xa2, 0x26, 0xa4, 0xcc, 0x1e, - 0xd0, 0x61, 0xe8, 0x8a, 0xd8, 0xef, 0x58, 0x7d, 0x89, 0xb3, 0x2b, 0x50, 0xb8, 0x05, 0x76, 0x28, - 0xbb, 0xc3, 0xe7, 0xcd, 0xb7, 0x60, 0x79, 0x6c, 0x95, 0xe6, 0xb1, 0xf7, 0x4d, 0x4b, 0xa7, 0xd3, - 0x07, 0xd8, 0xf3, 0x77, 0x49, 0xe8, 0x92, 0xf8, 0x19, 0x73, 0x02, 0xba, 0x0e, 0xab, 0x9e, 0xeb, - 0x13, 0x9b, 0x79, 0x01, 0xa1, 0x43, 0x66, 0x27, 0xc4, 0xa1, 0xa1, 0x9b, 0x68, 0xfb, 0xf2, 0xb9, - 0x07, 0x72, 0x6a, 0x57, 0xce, 0x98, 0x3f, 0x4d, 0x73, 0x73, 0x76, 0x15, 0xe3, 0x5b, 0x49, 0x48, - 0x08, 0x67, 0xb8, 0x4f, 0xb0, 0x4b, 0x62, 0xa5, 0xc6, 0x82, 0x04, 0x7e, 0x4f, 0xc0, 0xf8, 0x0e, - 0x29, 0xa4, 0x3d, 0xea, 0x8e, 0xc4, 0x8a, 0x16, 0x2c, 0x90, 0xa0, 0x9b, 0xd4, 0x1d, 0x89, 0x24, - 0x99, 0xd8, 0xc2, 0xc9, 0x9c, 0xfd, 0x61, 0x78, 0x20, 0x56, 0xd3, 0xb1, 0x7a, 0x5e, 0xf2, 0x11, - 0x4e, 0xd8, 0x36, 0x07, 0x99, 0xbf, 0x6f, 0xe9, 0x28, 0xe5, 0xcb, 0xb0, 0x88, 0x43, 0xbc, 0xc3, - 0x7f, 0x83, 0x39, 0x38, 0x85, 0x72, 0x82, 0xdc, 0xed, 0x54, 0x05, 0x1c, 0x92, 0x73, 0xea, 0x2c, - 0x13, 0x33, 0xe3, 0x24, 0x91, 0x5f, 0xb8, 0x4a, 0x12, 0x5f, 0xea, 0x24, 0x7d, 0xdb, 0xd9, 0xdd, - 0xc7, 0xb1, 0x9b, 0xfc, 0x3f, 0x09, 0x49, 0x8c, 0xd9, 0x89, 0x5c, 0x1a, 0xcc, 0x0d, 0x38, 0x5f, - 0xc7, 0x5d, 0xc9, 0xff, 0x42, 0x1f, 0x3e, 0x1a, 0xc3, 0x22, 0x7b, 0x43, 0xcf, 0x77, 0x4f, 0x44, - 0xfc, 0x87, 0x45, 0xe5, 0x52, 0xe6, 0xca, 0x7f, 0xae, 0xc0, 0xe9, 0x58, 0x80, 0x98, 0x9d, 0x70, - 0x84, 0xb4, 0x5e, 0x58, 0xb4, 0x96, 0xd4, 0x84, 0x20, 0xe4, 0x75, 0xc3, 0xcf, 0xa7, 0xb5, 0x07, - 0x68, 0x6e, 0x27, 0x96, 0x56, 0xcf, 0x42, 0x77, 0x2c, 0xbe, 0x2d, 0xc4, 0x77, 0x12, 0x25, 0x97, - 0x7b, 0xa7, 0x43, 0xa3, 0x91, 0x4d, 0x1c, 0x79, 0x8e, 0x8b, 0xad, 0xee, 0x58, 0x3d, 0x0e, 0xbc, - 0xed, 0x88, 0x63, 0xbc, 0x79, 0x8e, 0xcd, 0x70, 0xfb, 0x4a, 0x72, 0x9b, 0xcb, 0x72, 0xfb, 0x4a, - 0x70, 0xd3, 0x38, 0x87, 0xde, 0x40, 0xe2, 0xcc, 0x8f, 0x71, 0x1e, 0x7a, 0x03, 0x8e, 0x33, 0xf6, - 0xaa, 0xbc, 0x31, 0xd4, 0xae, 0x7e, 0x0d, 0x67, 0xf3, 0xb3, 0xcd, 0x8f, 0xc9, 0x67, 0x32, 0x96, - 0x79, 0xbe, 0xe8, 0x4e, 0x85, 0xb3, 0xf6, 0xb0, 0xb8, 0xec, 0xc6, 0xf7, 0x8a, 0x67, 0x5b, 0xd7, - 0xb9, 0xa2, 0x41, 0xf2, 0x97, 0x93, 0xcf, 0x8a, 0xcb, 0x3e, 0xc6, 0x25, 0xe5, 0x68, 0xc1, 0x17, - 0x8a, 0x21, 0x50, 0xbc, 0xc9, 0xfc, 0x26, 0xcd, 0xaf, 0x0a, 0x83, 0xdf, 0x23, 0x1a, 0xe7, 0x35, - 0x25, 0x57, 0x98, 0x63, 0xd1, 0x9a, 0x57, 0x62, 0x79, 0xa1, 0xab, 0xce, 0x43, 0x59, 0x27, 0xa8, - 0xaf, 0x5c, 0x49, 0xdb, 0x56, 0x25, 0xad, 0x2e, 0xd5, 0x0f, 0xc8, 0x48, 0xf8, 0xec, 0x8c, 0x2c, - 0xd5, 0x3f, 0x24, 0x23, 0x73, 0xa7, 0x10, 0x71, 0x72, 0x69, 0x2a, 0x76, 0x11, 0xcc, 0x70, 0x67, - 0x57, 0x29, 0x5f, 0x8c, 0xd1, 0x39, 0x00, 0x2f, 0xb1, 0x5d, 0xb1, 0xe7, 0x72, 0x51, 0x1d, 0xab, - 0xeb, 0x29, 0x27, 0x70, 0xcd, 0x5f, 0xb4, 0xc6, 0x0c, 0x6f, 0xfa, 0x74, 0xef, 0x04, 0xbd, 0x32, - 0xab, 0x45, 0x3b, 0xa7, 0x45, 0xb6, 0x66, 0x9f, 0xc9, 0xd7, 0xec, 0x99, 0x20, 0xca, 0x2e, 0xa7, - 0x2e, 0x35, 0x3f, 0xa0, 0x27, 0x57, 0xcf, 0x95, 0x53, 0xf3, 0x98, 0xbb, 0x92, 0xff, 0x2e, 0x9c, - 0xe5, 0x06, 0x97, 0x50, 0x51, 0x2d, 0x34, 0xaf, 0xa8, 0xfe, 0x3a, 0x0d, 0xeb, 0xd5, 0xc4, 0x4d, - 0xaa, 0xaa, 0xf7, 0xc0, 0x48, 0xab, 0x16, 0x7e, 0x34, 0x26, 0x0c, 0x07, 0x51, 0x7a, 0x38, 0xca, - 0x33, 0xf4, 0x8c, 0x2a, 0x61, 0x1e, 0xe8, 0x79, 0x7d, 0x42, 0x96, 0x4a, 0x9e, 0x76, 0xa9, 0xe4, - 0xe1, 0x02, 0x5c, 0xcc, 0xea, 0x04, 0xc8, 0x3b, 0xdc, 0x19, 0x17, 0xb3, 0x3a, 0x01, 0x29, 0xb1, - 0x10, 0x20, 0xbd, 0xb6, 0xa7, 0xf0, 0x85, 0x80, 0x73, 0x00, 0xea, 0x7a, 0x35, 0x0c, 0x75, 0x09, - 0xd7, 0x95, 0x97, 0xab, 0x61, 0x58, 0x7b, 0xcb, 0x9c, 0xaf, 0xbd, 0x65, 0xe6, 0x77, 0xb3, 0x53, - 0xda, 0xcd, 0xcf, 0x00, 0x6e, 0x79, 0xc9, 0x81, 0x34, 0x32, 0xbf, 0xd6, 0xba, 0x5e, 0xac, 0xde, - 0x0d, 0xf8, 0x90, 0x43, 0xb0, 0xef, 0x2b, 0xd3, 0xf1, 0x21, 0x0f, 0x9f, 0x61, 0x42, 0x5c, 0x65, - 0x1d, 0x31, 0xe6, 0xb0, 0x41, 0x4c, 0x88, 0x32, 0x80, 0x18, 0x9b, 0xbf, 0x6d, 0x41, 0xf7, 0x1e, - 0x09, 0x14, 0xe7, 0xf3, 0x00, 0x8f, 0x68, 0x4c, 0x87, 0xcc, 0x0b, 0x89, 0xbc, 0x85, 0xcf, 0x5a, - 0x19, 0xc8, 0x77, 0x97, 0x23, 0x52, 0x03, 0xf1, 0x07, 0xca, 0x98, 0x62, 0xcc, 0x61, 0xfb, 0x04, - 0x47, 0xca, 0x7e, 0x62, 0x8c, 0x56, 0x61, 0x36, 0x61, 0xd8, 0x39, 0x10, 0xc6, 0x9a, 0xb1, 0xe4, - 0x87, 0xf9, 0xa7, 0x16, 0x80, 0x45, 0x02, 0xca, 0x84, 0xaf, 0xf1, 0xdb, 0xed, 0x1e, 0x76, 0x0e, - 0x78, 0xbd, 0xc0, 0x46, 0x11, 0x51, 0x96, 0xe8, 0x29, 0xd8, 0x83, 0x51, 0x24, 0x76, 0x48, 0xa3, - 0xa8, 0xfc, 0xd5, 0xb5, 0xba, 0x0a, 0x22, 0x2b, 0x03, 0x1d, 0xca, 0x5d, 0x8b, 0x0f, 0x33, 0x39, - 0x4d, 0x2e, 0x5b, 0xe7, 0xb4, 0xb3, 0xd0, 0x2d, 0xba, 0x82, 0x48, 0x05, 0xc2, 0x0f, 0x2e, 0xc2, - 0x62, 0x40, 0x5d, 0x6f, 0xe0, 0x11, 0x57, 0x38, 0x9a, 0x52, 0x65, 0x41, 0x03, 0xb9, 0x73, 0xa1, - 0x75, 0xe8, 0x92, 0x27, 0x8c, 0x84, 0xa9, 0x0f, 0x74, 0xad, 0x31, 0xc0, 0xfc, 0x1c, 0x40, 0x97, - 0xd1, 0x03, 0x8a, 0xb6, 0x60, 0x96, 0x33, 0xd7, 0x8f, 0x94, 0xeb, 0xe5, 0x47, 0xca, 0xb1, 0x19, - 0x2c, 0x89, 0x9a, 0x4d, 0x40, 0xd3, 0xf9, 0x04, 0xf4, 0x6d, 0x0b, 0x36, 0xd4, 0xe5, 0xd0, 0x23, - 0xf1, 0x3d, 0x7a, 0xc8, 0x2f, 0x0a, 0x0f, 0xa8, 0x64, 0x71, 0x22, 0x79, 0xf1, 0x1d, 0xe8, 0xbb, - 0x24, 0x61, 0x5e, 0x28, 0xca, 0x43, 0x5b, 0x9b, 0x3c, 0xc4, 0x01, 0x51, 0xc6, 0x5d, 0xcb, 0xcc, - 0xdf, 0x94, 0xd3, 0x3b, 0x38, 0x20, 0xe8, 0x2a, 0xac, 0x1c, 0x10, 0x12, 0xd9, 0x3e, 0x75, 0xb0, - 0x6f, 0xeb, 0x88, 0x53, 0xb7, 0x9f, 0x65, 0x3e, 0xf5, 0x11, 0x9f, 0xb9, 0x25, 0xa3, 0xce, 0x4c, - 0xe0, 0x85, 0x23, 0x34, 0x51, 0x59, 0x67, 0x1d, 0xba, 0x51, 0x4c, 0x1d, 0x92, 0x70, 0x8f, 0x6c, - 0x89, 0x43, 0x68, 0x0c, 0x40, 0xd7, 0x61, 0x25, 0xfd, 0xf8, 0x98, 0xc4, 0x0e, 0x09, 0x19, 0x7e, - 0x24, 0xdf, 0x22, 0xa7, 0xad, 0xaa, 0x29, 0xf3, 0x57, 0x2d, 0x30, 0x4b, 0x52, 0xef, 0xc4, 0x34, - 0x38, 0x41, 0x0b, 0x5e, 0x83, 0x55, 0x61, 0x87, 0x58, 0xb0, 0x1c, 0x1b, 0x42, 0x16, 0x29, 0xa7, - 0xf9, 0x9c, 0x94, 0xa6, 0x2d, 0x31, 0x84, 0x8b, 0x47, 0xae, 0xe9, 0x5f, 0x64, 0x8b, 0xbf, 0x2f, - 0xc0, 0xc2, 0x27, 0x43, 0x12, 0x8f, 0x32, 0x8f, 0x98, 0x09, 0x51, 0x5a, 0xe8, 0x57, 0xf8, 0x0c, - 0x84, 0xe7, 0xd1, 0x41, 0x4c, 0x03, 0x3b, 0x7d, 0xa8, 0x9f, 0x16, 0x28, 0x3d, 0x0e, 0xbc, 0x23, - 0x1f, 0xeb, 0xd1, 0xfb, 0x30, 0x37, 0xf0, 0x7c, 0x46, 0xe4, 0xd3, 0x78, 0x6f, 0xeb, 0xa5, 0xb2, - 0xbf, 0x67, 0x65, 0x6e, 0xde, 0x11, 0xc8, 0x96, 0x22, 0x42, 0x7b, 0xb0, 0xe2, 0x85, 0x91, 0x28, - 0xac, 0x62, 0x0f, 0xfb, 0xde, 0xd3, 0xf1, 0x33, 0x5c, 0x6f, 0xeb, 0xf5, 0x09, 0xbc, 0xee, 0x72, - 0xca, 0xdd, 0x2c, 0xa1, 0x85, 0xbc, 0x12, 0x0c, 0x11, 0x58, 0xa5, 0x43, 0x56, 0x16, 0x32, 0x2b, - 0x84, 0x6c, 0x4d, 0x10, 0x72, 0x5f, 0x90, 0xe6, 0xa5, 0xac, 0xd0, 0x32, 0xd0, 0xd8, 0x81, 0x39, - 0xa9, 0x1c, 0xcf, 0x80, 0x03, 0x8f, 0xf8, 0xba, 0xb9, 0x20, 0x3f, 0x78, 0x90, 0xd3, 0x88, 0xc4, - 0x38, 0xd4, 0xc9, 0x4c, 0x7f, 0x72, 0xfc, 0x43, 0xec, 0x0f, 0x75, 0xbc, 0xc9, 0x0f, 0xe3, 0x8f, - 0xb3, 0x80, 0xca, 0x1a, 0xea, 0xb7, 0xc5, 0x98, 0x24, 0x3c, 0x41, 0x64, 0xb3, 0xe7, 0x52, 0x06, - 0x2e, 0x32, 0xe8, 0x0f, 0xa0, 0xeb, 0x24, 0x87, 0xb6, 0x30, 0x89, 0x90, 0xd9, 0xdb, 0x7a, 0xf7, - 0xd8, 0x26, 0xdd, 0xdc, 0xde, 0x7d, 0x28, 0xa0, 0x56, 0xc7, 0x49, 0x0e, 0xc5, 0x08, 0x7d, 0x0e, - 0xf0, 0x55, 0x42, 0x43, 0xc5, 0x59, 0x6e, 0xfc, 0x7b, 0xc7, 0xe7, 0xfc, 0xfd, 0xdd, 0xfb, 0x3b, - 0x92, 0x75, 0x97, 0xb3, 0x93, 0xbc, 0x1d, 0x58, 0x8c, 0x70, 0xfc, 0x78, 0x48, 0x98, 0x62, 0x2f, - 0x7d, 0xe1, 0x83, 0xe3, 0xb3, 0xff, 0x58, 0xb2, 0x91, 0x12, 0x16, 0xa2, 0xcc, 0x97, 0xf1, 0xed, - 0x34, 0x74, 0xb4, 0x5e, 0xbc, 0x36, 0x13, 0x1e, 0x2e, 0x5f, 0x28, 0x6c, 0x2f, 0x1c, 0x50, 0x65, - 0xd1, 0x53, 0x1c, 0x2e, 0x1f, 0x29, 0x44, 0x6e, 0xbf, 0x0c, 0xcb, 0x31, 0x71, 0x68, 0xec, 0xf2, - 0x1b, 0xac, 0x17, 0x78, 0xdc, 0xed, 0xe5, 0x5e, 0x2e, 0x49, 0xf8, 0x2d, 0x0d, 0x46, 0xaf, 0xc0, - 0x92, 0xd8, 0xf6, 0x0c, 0x66, 0x5b, 0xf3, 0x24, 0x7e, 0x06, 0xf1, 0x32, 0x2c, 0x3f, 0x1e, 0xf2, - 0xbc, 0xe1, 0xec, 0xe3, 0x18, 0x3b, 0x8c, 0xa6, 0x6f, 0x05, 0x4b, 0x02, 0xbe, 0x9d, 0x82, 0xd1, - 0x9b, 0xb0, 0x26, 0x51, 0x49, 0xe2, 0xe0, 0x28, 0xa5, 0x20, 0xb1, 0x2a, 0x25, 0x57, 0xc5, 0xec, - 0x6d, 0x31, 0xb9, 0xad, 0xe7, 0x90, 0x01, 0x1d, 0x87, 0x06, 0x01, 0x09, 0x59, 0x22, 0x0e, 0xb7, - 0xae, 0x95, 0x7e, 0xa3, 0x1b, 0x70, 0x0e, 0xfb, 0x3e, 0xfd, 0xda, 0x16, 0x94, 0xae, 0x5d, 0xd2, - 0x4e, 0x16, 0x96, 0x86, 0x40, 0xfa, 0x44, 0xe0, 0x58, 0x79, 0x45, 0x8d, 0x0b, 0xd0, 0x4d, 0xf7, - 0x91, 0xdf, 0x07, 0x32, 0x0e, 0x29, 0xc6, 0xc6, 0x29, 0x58, 0xc8, 0xee, 0x84, 0xf1, 0xb7, 0x36, - 0xac, 0x54, 0x04, 0x15, 0xfa, 0x02, 0x80, 0x7b, 0xab, 0x0c, 0x2d, 0xe5, 0xae, 0xff, 0x73, 0xfc, - 0xe0, 0xe4, 0xfe, 0x2a, 0xc1, 0x16, 0xf7, 0x7e, 0x39, 0x44, 0x3f, 0x82, 0x9e, 0xf0, 0x58, 0xc5, - 0x5d, 0xba, 0xec, 0xfb, 0xdf, 0x81, 0x3b, 0xd7, 0x55, 0xb1, 0x17, 0x31, 0x20, 0xc7, 0xc6, 0x5f, - 0x5a, 0xd0, 0x4d, 0x05, 0xf3, 0xdb, 0x8d, 0xdc, 0x28, 0xb1, 0xd7, 0x89, 0xbe, 0xdd, 0x08, 0xd8, - 0x1d, 0x01, 0xfa, 0x8f, 0x74, 0x25, 0xe3, 0x6d, 0x80, 0xb1, 0xfe, 0x95, 0x2a, 0xb4, 0x2a, 0x55, - 0x30, 0x2f, 0xc3, 0x22, 0xb7, 0xac, 0x47, 0xdc, 0x5d, 0x16, 0x7b, 0x91, 0x68, 0x93, 0x4a, 0x9c, - 0x44, 0x95, 0x87, 0xfa, 0x73, 0xeb, 0x0f, 0x06, 0x2c, 0x64, 0x9f, 0xc7, 0xd0, 0x97, 0xd0, 0xcb, - 0xb4, 0x83, 0xd1, 0x8b, 0xe5, 0x4d, 0x2b, 0xb7, 0x97, 0x8d, 0x97, 0x26, 0x60, 0xa9, 0x0a, 0x6a, - 0x0a, 0x85, 0x70, 0xba, 0xd4, 0x53, 0x45, 0x57, 0xca, 0xd4, 0x75, 0x1d, 0x5b, 0xe3, 0xd5, 0x46, - 0xb8, 0xa9, 0x3c, 0x06, 0x2b, 0x15, 0x4d, 0x52, 0xf4, 0xda, 0x04, 0x2e, 0xb9, 0x46, 0xad, 0x71, - 0xb5, 0x21, 0x76, 0x2a, 0xf5, 0x31, 0xa0, 0x72, 0x07, 0x15, 0xbd, 0x3a, 0x91, 0xcd, 0xb8, 0x43, - 0x6b, 0xbc, 0xd6, 0x0c, 0xb9, 0x56, 0x51, 0xd9, 0x5b, 0x9d, 0xa8, 0x68, 0xae, 0x7b, 0x3b, 0x51, - 0xd1, 0x42, 0xc3, 0x76, 0x0a, 0x1d, 0xc0, 0x72, 0xb1, 0xef, 0x8a, 0x2e, 0xd7, 0xfd, 0x27, 0x50, - 0x6a, 0xeb, 0x1a, 0x57, 0x9a, 0xa0, 0xa6, 0xc2, 0x08, 0x9c, 0xca, 0xf7, 0x39, 0xd1, 0x2b, 0x65, - 0xfa, 0xca, 0x4e, 0xaf, 0x71, 0x69, 0x32, 0x62, 0x56, 0xa7, 0x62, 0xef, 0xb3, 0x4a, 0xa7, 0x9a, - 0xc6, 0x6a, 0x95, 0x4e, 0x75, 0xad, 0x54, 0x73, 0x0a, 0xfd, 0x58, 0x37, 0xd4, 0x0a, 0x3d, 0x41, - 0xb4, 0x59, 0xc7, 0xa6, 0xba, 0x29, 0x69, 0x5c, 0x6b, 0x8c, 0xaf, 0x65, 0x5f, 0x6f, 0xf1, 0x58, - 0xcf, 0xb4, 0x06, 0xab, 0x62, 0xbd, 0xdc, 0x6c, 0xac, 0x8a, 0xf5, 0xaa, 0xfe, 0xe2, 0x14, 0xda, - 0x83, 0xc5, 0x5c, 0xb3, 0x10, 0xbd, 0x5c, 0x47, 0x99, 0x7f, 0xdd, 0x33, 0x5e, 0x99, 0x88, 0x97, - 0xca, 0xb0, 0x75, 0xf6, 0x52, 0xe9, 0xaa, 0x76, 0x71, 0xf9, 0x7c, 0xf5, 0xf2, 0x24, 0xb4, 0x5c, - 0x28, 0x97, 0x5a, 0x8a, 0x95, 0xa1, 0x5c, 0xd7, 0xb2, 0xac, 0x0c, 0xe5, 0xfa, 0x2e, 0xe5, 0x14, - 0xfa, 0xa1, 0x2e, 0x70, 0x85, 0x23, 0x5c, 0xac, 0xa3, 0xce, 0xee, 0xfe, 0x8b, 0x47, 0x23, 0xa5, - 0xac, 0xbf, 0x86, 0xd5, 0xaa, 0x57, 0x28, 0x74, 0xb5, 0xaa, 0x6c, 0xae, 0x7d, 0xea, 0x32, 0x36, - 0x9b, 0xa2, 0xa7, 0x82, 0x3f, 0x85, 0x8e, 0x6e, 0xb9, 0xa1, 0x17, 0xca, 0xd4, 0x85, 0x26, 0xa5, - 0x61, 0x1e, 0x85, 0x92, 0x71, 0xe0, 0x40, 0xc7, 0xea, 0xb8, 0x17, 0x56, 0x1f, 0xab, 0xa5, 0xae, - 0x5d, 0x7d, 0xac, 0x96, 0x5b, 0x6b, 0x42, 0x5c, 0xea, 0x0c, 0xd9, 0xd6, 0x51, 0xbd, 0x33, 0x54, - 0x74, 0xc6, 0xea, 0x9d, 0xa1, 0xb2, 0x1b, 0x35, 0x85, 0x7e, 0x02, 0x6b, 0xd5, 0x1d, 0x23, 0x54, - 0x1b, 0xf1, 0x35, 0x9d, 0x2b, 0xe3, 0x7a, 0x73, 0x82, 0x54, 0xfc, 0x53, 0x9d, 0x9f, 0x0a, 0x1d, - 0xa3, 0xfa, 0xfc, 0x54, 0xdd, 0xb7, 0x32, 0xae, 0x35, 0xc6, 0x2f, 0x87, 0x5e, 0xb6, 0xa5, 0x52, - 0x6f, 0xed, 0x8a, 0x2e, 0x54, 0xbd, 0xb5, 0x2b, 0xbb, 0x34, 0x22, 0x3e, 0xaa, 0xda, 0x25, 0x55, - 0xf1, 0x71, 0x44, 0x3f, 0xc7, 0xd8, 0x6c, 0x8a, 0x9e, 0x3b, 0xbe, 0xcb, 0xfd, 0x10, 0x34, 0x71, - 0xfd, 0xb9, 0xcc, 0x7c, 0xb5, 0x21, 0x76, 0xfd, 0xee, 0xea, 0x4c, 0x3d, 0x51, 0x81, 0x42, 0xc6, - 0xbe, 0xd6, 0x18, 0x3f, 0x95, 0x1d, 0xe9, 0x9f, 0x31, 0x32, 0xbd, 0x0c, 0x74, 0x65, 0x02, 0x9f, - 0x4c, 0x2f, 0xc6, 0x78, 0xb5, 0x11, 0x6e, 0x55, 0xf4, 0x66, 0xbb, 0x0b, 0x47, 0xf9, 0x53, 0xa9, - 0x25, 0x72, 0x94, 0x3f, 0x55, 0x34, 0x2c, 0x2a, 0xa2, 0x57, 0x37, 0x15, 0x26, 0x47, 0x6f, 0xa1, - 0xb9, 0x31, 0x39, 0x7a, 0x4b, 0xfd, 0x8a, 0x29, 0xf4, 0xb3, 0x71, 0x93, 0xbe, 0xfc, 0x08, 0x88, - 0xb6, 0x6a, 0x53, 0x51, 0xed, 0xdb, 0xa7, 0xf1, 0xc6, 0xb1, 0x68, 0x32, 0xc6, 0xff, 0x65, 0x4b, - 0x77, 0xfc, 0x2a, 0x5f, 0xe1, 0xd0, 0x9b, 0x0d, 0x18, 0x97, 0x1e, 0x12, 0x8d, 0xb7, 0x8e, 0x49, - 0x95, 0x59, 0xd0, 0x47, 0x30, 0x2b, 0xaa, 0x4f, 0x74, 0xfe, 0xe8, 0xb2, 0xd4, 0xb8, 0x50, 0x3d, - 0x9f, 0x16, 0x57, 0x9c, 0xdb, 0xde, 0x9c, 0xf8, 0x1d, 0xf7, 0x8d, 0x7f, 0x06, 0x00, 0x00, 0xff, - 0xff, 0x96, 0x31, 0x6f, 0x58, 0xa5, 0x2b, 0x00, 0x00, -} diff --git a/weed/replication/repl_util/replication_util.go b/weed/replication/repl_util/replication_util.go new file mode 100644 index 000000000..519a9a201 --- /dev/null +++ b/weed/replication/repl_util/replication_util.go @@ -0,0 +1,42 @@ +package repl_util + +import ( + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.FilerSource, writeFunc func(data []byte) error) error { + + for _, chunk := range chunkViews { + + fileUrls, err := filerSource.LookupFileId(chunk.FileId) + if err != nil { + return err + } + + var writeErr error + var shouldRetry bool + + for _, fileUrl := range fileUrls { + shouldRetry, err = util.ReadUrlAsStream(fileUrl, nil, false, chunk.IsFullChunk(), chunk.Offset, int(chunk.Size), func(data []byte) { + writeErr = writeFunc(data) + }) + if err != nil { + glog.V(1).Infof("read from %s: %v", fileUrl, err) + } else if writeErr != nil { + glog.V(1).Infof("copy from %s: %v", fileUrl, writeErr) + } else { + break + } + } + if shouldRetry && err != nil { + return err + } + if writeErr != nil { + return writeErr + } + } + return nil +} diff --git a/weed/replication/replicator.go b/weed/replication/replicator.go index 7353cdc91..d7e609c68 100644 --- a/weed/replication/replicator.go +++ b/weed/replication/replicator.go @@ -3,8 +3,10 @@ package replication import ( "context" "fmt" - "path/filepath" + "github.com/chrislusf/seaweedfs/weed/pb" + "google.golang.org/grpc" "strings" + "time" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" @@ -18,10 +20,10 @@ type Replicator struct { source *source.FilerSource } -func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSink) *Replicator { +func NewReplicator(sourceConfig util.Configuration, configPrefix string, dataSink sink.ReplicationSink) *Replicator { source := &source.FilerSource{} - source.Initialize(sourceConfig) + source.Initialize(sourceConfig, configPrefix) dataSink.SetSourceFiler(source) @@ -32,37 +34,64 @@ func NewReplicator(sourceConfig util.Configuration, dataSink sink.ReplicationSin } func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_pb.EventNotification) error { + if message.IsFromOtherCluster && r.sink.GetName() == "filer" { + return nil + } if !strings.HasPrefix(key, r.source.Dir) { glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir) return nil } - newKey := filepath.ToSlash(filepath.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])) + var dateKey string + if r.sink.IsIncremental() { + var mTime int64 + if message.NewEntry != nil { + mTime = message.NewEntry.Attributes.Mtime + } else if message.OldEntry != nil { + mTime = message.OldEntry.Attributes.Mtime + } + dateKey = time.Unix(mTime, 0).Format("2006-01-02") + } + newKey := util.Join(r.sink.GetSinkToDirectory(), dateKey, key[len(r.source.Dir):]) glog.V(3).Infof("replicate %s => %s", key, newKey) key = newKey if message.OldEntry != nil && message.NewEntry == nil { glog.V(4).Infof("deleting %v", key) - return r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, message.DeleteChunks) + return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures) } if message.OldEntry == nil && message.NewEntry != nil { glog.V(4).Infof("creating %v", key) - return r.sink.CreateEntry(ctx, key, message.NewEntry) + return r.sink.CreateEntry(key, message.NewEntry, message.Signatures) } if message.OldEntry == nil && message.NewEntry == nil { glog.V(0).Infof("weird message %+v", message) return nil } - foundExisting, err := r.sink.UpdateEntry(ctx, key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks) + foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures) if foundExisting { glog.V(4).Infof("updated %v", key) return err } - err = r.sink.DeleteEntry(ctx, key, message.OldEntry.IsDirectory, false) + err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false, message.Signatures) if err != nil { return fmt.Errorf("delete old entry %v: %v", key, err) } glog.V(4).Infof("creating missing %v", key) - return r.sink.CreateEntry(ctx, key, message.NewEntry) + return r.sink.CreateEntry(key, message.NewEntry, message.Signatures) +} + +func ReadFilerSignature(grpcDialOption grpc.DialOption, filer string) (filerSignature int32, readErr error) { + if readErr = pb.WithFilerClient(filer, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + if resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{}); err != nil { + return fmt.Errorf("GetFilerConfiguration %s: %v", filer, err) + } else { + filerSignature = resp.Signature + } + return nil + }); readErr != nil { + return 0, readErr + } + return filerSignature, nil } diff --git a/weed/replication/sink/azuresink/azure_sink.go b/weed/replication/sink/azuresink/azure_sink.go index 6381908a1..d13a1049b 100644 --- a/weed/replication/sink/azuresink/azure_sink.go +++ b/weed/replication/sink/azuresink/azure_sink.go @@ -4,11 +4,12 @@ import ( "bytes" "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" "net/url" "strings" "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" @@ -17,10 +18,11 @@ import ( ) type AzureSink struct { - containerURL azblob.ContainerURL - container string - dir string - filerSource *source.FilerSource + containerURL azblob.ContainerURL + container string + dir string + filerSource *source.FilerSource + isIncremental bool } func init() { @@ -35,12 +37,17 @@ func (g *AzureSink) GetSinkToDirectory() string { return g.dir } -func (g *AzureSink) Initialize(configuration util.Configuration) error { +func (g *AzureSink) IsIncremental() bool { + return g.isIncremental +} + +func (g *AzureSink) Initialize(configuration util.Configuration, prefix string) error { + g.isIncremental = configuration.GetBool(prefix + "is_incremental") return g.initialize( - configuration.GetString("account_name"), - configuration.GetString("account_key"), - configuration.GetString("container"), - configuration.GetString("directory"), + configuration.GetString(prefix+"account_name"), + configuration.GetString(prefix+"account_key"), + configuration.GetString(prefix+"container"), + configuration.GetString(prefix+"directory"), ) } @@ -70,7 +77,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e return nil } -func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *AzureSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { key = cleanKey(key) @@ -78,7 +85,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de key = key + "/" } - if _, err := g.containerURL.NewBlobURL(key).Delete(ctx, + if _, err := g.containerURL.NewBlobURL(key).Delete(context.Background(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}); err != nil { return fmt.Errorf("azure delete %s/%s: %v", g.container, key, err) } @@ -87,7 +94,7 @@ func (g *AzureSink) DeleteEntry(ctx context.Context, key string, isDirectory, de } -func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *AzureSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { key = cleanKey(key) @@ -95,44 +102,32 @@ func (g *AzureSink) CreateEntry(ctx context.Context, key string, entry *filer_pb return nil } - totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) // Create a URL that references a to-be-created blob in your // Azure Storage account's container. appendBlobURL := g.containerURL.NewAppendBlobURL(key) - _, err := appendBlobURL.Create(ctx, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) + _, err := appendBlobURL.Create(context.Background(), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) if err != nil { return err } - for _, chunk := range chunkViews { - - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) - if err != nil { - return err - } - - var writeErr error - _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { - _, writeErr = appendBlobURL.AppendBlock(ctx, bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) - }) - - if readErr != nil { - return readErr - } - if writeErr != nil { - return writeErr - } + writeFunc := func(data []byte) error { + _, writeErr := appendBlobURL.AppendBlock(context.Background(), bytes.NewReader(data), azblob.AppendBlobAccessConditions{}, nil) + return writeErr + } + if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil { + return err } return nil } -func (g *AzureSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *AzureSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/b2sink/b2_sink.go b/weed/replication/sink/b2sink/b2_sink.go index 35c2230fa..90a0bb2e8 100644 --- a/weed/replication/sink/b2sink/b2_sink.go +++ b/weed/replication/sink/b2sink/b2_sink.go @@ -2,9 +2,10 @@ package B2Sink import ( "context" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/source" @@ -13,10 +14,11 @@ import ( ) type B2Sink struct { - client *b2.Client - bucket string - dir string - filerSource *source.FilerSource + client *b2.Client + bucket string + dir string + filerSource *source.FilerSource + isIncremental bool } func init() { @@ -31,12 +33,17 @@ func (g *B2Sink) GetSinkToDirectory() string { return g.dir } -func (g *B2Sink) Initialize(configuration util.Configuration) error { +func (g *B2Sink) IsIncremental() bool { + return g.isIncremental +} + +func (g *B2Sink) Initialize(configuration util.Configuration, prefix string) error { + g.isIncremental = configuration.GetBool(prefix + "is_incremental") return g.initialize( - configuration.GetString("b2_account_id"), - configuration.GetString("b2_master_application_key"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"b2_account_id"), + configuration.GetString(prefix+"b2_master_application_key"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } @@ -45,8 +52,7 @@ func (g *B2Sink) SetSourceFiler(s *source.FilerSource) { } func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { - ctx := context.Background() - client, err := b2.NewClient(ctx, accountId, accountKey) + client, err := b2.NewClient(context.Background(), accountId, accountKey) if err != nil { return err } @@ -58,7 +64,7 @@ func (g *B2Sink) initialize(accountId, accountKey, bucket, dir string) error { return nil } -func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *B2Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { key = cleanKey(key) @@ -66,18 +72,18 @@ func (g *B2Sink) DeleteEntry(ctx context.Context, key string, isDirectory, delet key = key + "/" } - bucket, err := g.client.Bucket(ctx, g.bucket) + bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { return err } targetObject := bucket.Object(key) - return targetObject.Delete(ctx) + return targetObject.Delete(context.Background()) } -func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *B2Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { key = cleanKey(key) @@ -85,46 +91,33 @@ func (g *B2Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.En return nil } - totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) - bucket, err := g.client.Bucket(ctx, g.bucket) + bucket, err := g.client.Bucket(context.Background(), g.bucket) if err != nil { return err } targetObject := bucket.Object(key) - writer := targetObject.NewWriter(ctx) - - for _, chunk := range chunkViews { - - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) - if err != nil { - return err - } - - var writeErr error - _, readErr := util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { - _, err := writer.Write(data) - if err != nil { - writeErr = err - } - }) - - if readErr != nil { - return readErr - } - if writeErr != nil { - return writeErr - } + writer := targetObject.NewWriter(context.Background()) + writeFunc := func(data []byte) error { + _, writeErr := writer.Write(data) + return writeErr } - return writer.Close() + defer writer.Close() + + if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil { + return err + } + + return nil } -func (g *B2Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *B2Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { key = cleanKey(key) diff --git a/weed/replication/sink/filersink/README.txt b/weed/replication/sink/filersink/README.txt new file mode 100644 index 000000000..4ba0fc752 --- /dev/null +++ b/weed/replication/sink/filersink/README.txt @@ -0,0 +1,12 @@ +How replication works +====== + +All metadata changes within current cluster would be notified to a message queue. + +If the meta data change is from other clusters, this metadata would change would not be notified to the message queue. + +So active<=>active replication is possible. + + +All metadata changes would be published as metadata changes. +So all mounts listening for metadata changes will get updated. \ No newline at end of file diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go index 97e9671a3..a7392d856 100644 --- a/weed/replication/sink/filersink/fetch_write.go +++ b/weed/replication/sink/filersink/fetch_write.go @@ -3,41 +3,46 @@ package filersink import ( "context" "fmt" - "google.golang.org/grpc" - "strings" + "github.com/chrislusf/seaweedfs/weed/util" "sync" + "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerSink) replicateChunks(ctx context.Context, sourceChunks []*filer_pb.FileChunk) (replicatedChunks []*filer_pb.FileChunk, err error) { +func (fs *FilerSink) replicateChunks(sourceChunks []*filer_pb.FileChunk, path string) (replicatedChunks []*filer_pb.FileChunk, err error) { if len(sourceChunks) == 0 { return } + + replicatedChunks = make([]*filer_pb.FileChunk, len(sourceChunks)) + var wg sync.WaitGroup - for _, sourceChunk := range sourceChunks { + for chunkIndex, sourceChunk := range sourceChunks { wg.Add(1) - go func(chunk *filer_pb.FileChunk) { + go func(chunk *filer_pb.FileChunk, index int) { defer wg.Done() - replicatedChunk, e := fs.replicateOneChunk(ctx, chunk) + replicatedChunk, e := fs.replicateOneChunk(chunk, path) if e != nil { err = e + return } - replicatedChunks = append(replicatedChunks, replicatedChunk) - }(sourceChunk) + replicatedChunks[index] = replicatedChunk + }(sourceChunk, chunkIndex) } wg.Wait() return } -func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_pb.FileChunk) (*filer_pb.FileChunk, error) { +func (fs *FilerSink) replicateOneChunk(sourceChunk *filer_pb.FileChunk, path string) (*filer_pb.FileChunk, error) { - fileId, err := fs.fetchAndWrite(ctx, sourceChunk) + fileId, err := fs.fetchAndWrite(sourceChunk, path) if err != nil { return nil, fmt.Errorf("copy %s: %v", sourceChunk.GetFileIdString(), err) } @@ -49,21 +54,23 @@ func (fs *FilerSink) replicateOneChunk(ctx context.Context, sourceChunk *filer_p Mtime: sourceChunk.Mtime, ETag: sourceChunk.ETag, SourceFileId: sourceChunk.GetFileIdString(), + CipherKey: sourceChunk.CipherKey, + IsCompressed: sourceChunk.IsCompressed, }, nil } -func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.FileChunk) (fileId string, err error) { +func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string) (fileId string, err error) { - filename, header, readCloser, err := fs.filerSource.ReadPart(ctx, sourceChunk.GetFileIdString()) + filename, header, resp, err := fs.filerSource.ReadPart(sourceChunk.GetFileIdString()) if err != nil { return "", fmt.Errorf("read part %s: %v", sourceChunk.GetFileIdString(), err) } - defer readCloser.Close() + defer util.CloseResponse(resp) var host string var auth security.EncodedJwt - if err := fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.AssignVolumeRequest{ Count: 1, @@ -71,13 +78,18 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi Collection: fs.collection, TtlSec: fs.ttlSec, DataCenter: fs.dataCenter, + DiskType: fs.diskType, + Path: path, } - resp, err := client.AssignVolume(ctx, request) + resp, err := client.AssignVolume(context.Background(), request) if err != nil { glog.V(0).Infof("assign volume failure %v: %v", request, err) return err } + if resp.Error != "" { + return fmt.Errorf("assign volume failure %v: %v", request, resp.Error) + } fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth) @@ -87,13 +99,16 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi } fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) + if fs.writeChunkByFiler { + fileUrl = fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, fileId) + } glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header) - uploadResult, err := operation.Upload(fileUrl, filename, readCloser, - "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) + // fetch data as is, regardless whether it is encrypted or not + uploadResult, err, _ := operation.Upload(fileUrl, filename, false, resp.Body, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth) if err != nil { - glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err) + glog.V(0).Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err) return "", fmt.Errorf("upload data: %v", err) } if uploadResult.Error != "" { @@ -104,19 +119,16 @@ func (fs *FilerSink) fetchAndWrite(ctx context.Context, sourceChunk *filer_pb.Fi return } -func (fs *FilerSink) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +var _ = filer_pb.FilerClient(&FilerSink{}) - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { +func (fs *FilerSink) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { + + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.grpcAddress, fs.grpcDialOption) } - -func volumeId(fileId string) string { - lastCommaIndex := strings.LastIndex(fileId, ",") - if lastCommaIndex > 0 { - return fileId[:lastCommaIndex] - } - return fileId +func (fs *FilerSink) AdjustedUrl(location *filer_pb.Location) string { + return location.Url } diff --git a/weed/replication/sink/filersink/filer_sink.go b/weed/replication/sink/filersink/filer_sink.go index f99c7fdf6..d7c5fccc3 100644 --- a/weed/replication/sink/filersink/filer_sink.go +++ b/weed/replication/sink/filersink/filer_sink.go @@ -3,11 +3,14 @@ package filersink import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/security" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" @@ -16,14 +19,18 @@ import ( ) type FilerSink struct { - filerSource *source.FilerSource - grpcAddress string - dir string - replication string - collection string - ttlSec int32 - dataCenter string - grpcDialOption grpc.DialOption + filerSource *source.FilerSource + grpcAddress string + dir string + replication string + collection string + ttlSec int32 + diskType string + dataCenter string + grpcDialOption grpc.DialOption + address string + writeChunkByFiler bool + isIncremental bool } func init() { @@ -38,58 +45,63 @@ func (fs *FilerSink) GetSinkToDirectory() string { return fs.dir } -func (fs *FilerSink) Initialize(configuration util.Configuration) error { - return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), - configuration.GetString("replication"), - configuration.GetString("collection"), - configuration.GetInt("ttlSec"), - ) +func (fs *FilerSink) IsIncremental() bool { + return fs.isIncremental +} + +func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error { + fs.isIncremental = configuration.GetBool(prefix + "is_incremental") + return fs.DoInitialize( + "", + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), + configuration.GetString(prefix+"replication"), + configuration.GetString(prefix+"collection"), + configuration.GetInt(prefix+"ttlSec"), + configuration.GetString(prefix+"disk"), + security.LoadClientTLS(util.GetViper(), "grpc.client"), + false) } func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) { fs.filerSource = s } -func (fs *FilerSink) initialize(grpcAddress string, dir string, - replication string, collection string, ttlSec int) (err error) { +func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string, + replication string, collection string, ttlSec int, diskType string, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) { + fs.address = address + if fs.address == "" { + fs.address = pb.GrpcAddressToServerAddress(grpcAddress) + } fs.grpcAddress = grpcAddress fs.dir = dir fs.replication = replication fs.collection = collection fs.ttlSec = int32(ttlSec) - fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fs.diskType = diskType + fs.grpcDialOption = grpcDialOption + fs.writeChunkByFiler = writeChunkByFiler return nil } -func (fs *FilerSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { - return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - dir, name := filer2.FullPath(key).DirAndName() - - request := &filer_pb.DeleteEntryRequest{ - Directory: dir, - Name: name, - IsDeleteData: deleteIncludeChunks, - } +func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { - glog.V(1).Infof("delete entry: %v", request) - _, err := client.DeleteEntry(ctx, request) - if err != nil { - glog.V(0).Infof("delete entry %s: %v", key, err) - return fmt.Errorf("delete entry %s: %v", key, err) - } + dir, name := util.FullPath(key).DirAndName() - return nil - }) + glog.V(4).Infof("delete entry: %v", key) + err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures) + if err != nil { + glog.V(0).Infof("delete entry %s: %v", key, err) + return fmt.Errorf("delete entry %s: %v", key, err) + } + return nil } -func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { - return fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - dir, name := filer2.FullPath(key).DirAndName() + dir, name := util.FullPath(key).DirAndName() // look up existing entry lookupRequest := &filer_pb.LookupDirectoryEntryRequest{ @@ -97,21 +109,21 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p Name: name, } glog.V(1).Infof("lookup: %v", lookupRequest) - if resp, err := client.LookupDirectoryEntry(ctx, lookupRequest); err == nil { - if filer2.ETag(resp.Entry.Chunks) == filer2.ETag(entry.Chunks) { - glog.V(0).Infof("already replicated %s", key) + if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil { + if filer.ETag(resp.Entry) == filer.ETag(entry) { + glog.V(3).Infof("already replicated %s", key) return nil } } - replicatedChunks, err := fs.replicateChunks(ctx, entry.Chunks) + replicatedChunks, err := fs.replicateChunks(entry.Chunks, key) if err != nil { - glog.V(0).Infof("replicate entry chunks %s: %v", key, err) - return fmt.Errorf("replicate entry chunks %s: %v", key, err) + // only warning here since the source chunk may have been deleted already + glog.Warningf("replicate entry chunks %s: %v", key, err) } - glog.V(0).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks) + glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks) request := &filer_pb.CreateEntryRequest{ Directory: dir, @@ -120,11 +132,14 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p IsDirectory: entry.IsDirectory, Attributes: entry.Attributes, Chunks: replicatedChunks, + Content: entry.Content, }, + IsFromOtherCluster: true, + Signatures: signatures, } - glog.V(1).Infof("create: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { + glog.V(3).Infof("create: %v", request) + if err := filer_pb.CreateEntry(client, request); err != nil { glog.V(0).Infof("create entry %s: %v", key, err) return fmt.Errorf("create entry %s: %v", key, err) } @@ -133,13 +148,13 @@ func (fs *FilerSink) CreateEntry(ctx context.Context, key string, entry *filer_p }) } -func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { - dir, name := filer2.FullPath(key).DirAndName() + dir, name := util.FullPath(key).DirAndName() // read existing entry var existingEntry *filer_pb.Entry - err = fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.LookupDirectoryEntryRequest{ Directory: dir, @@ -147,7 +162,7 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } glog.V(4).Infof("lookup entry: %v", request) - resp, err := client.LookupDirectoryEntry(ctx, request) + resp, err := filer_pb.LookupEntry(client, request) if err != nil { glog.V(0).Infof("lookup %s: %v", key, err) return err @@ -162,28 +177,31 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file return false, fmt.Errorf("lookup %s: %v", key, err) } - glog.V(0).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry) + glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry) if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime { // skip if already changed // this usually happens when the messages are not ordered - glog.V(0).Infof("late updates %s", key) - } else if filer2.ETag(newEntry.Chunks) == filer2.ETag(existingEntry.Chunks) { + glog.V(2).Infof("late updates %s", key) + } else if filer.ETag(newEntry) == filer.ETag(existingEntry) { // skip if no change // this usually happens when retrying the replication - glog.V(0).Infof("already replicated %s", key) + glog.V(3).Infof("already replicated %s", key) } else { // find out what changed - deletedChunks, newChunks := compareChunks(oldEntry, newEntry) + deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry) + if err != nil { + return true, fmt.Errorf("replicte %s compare chunks error: %v", key, err) + } // delete the chunks that are deleted from the source if deleteIncludeChunks { // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks - existingEntry.Chunks = filer2.MinusChunks(existingEntry.Chunks, deletedChunks) + existingEntry.Chunks = filer.DoMinusChunks(existingEntry.Chunks, deletedChunks) } // replicate the chunks that are new in the source - replicatedChunks, err := fs.replicateChunks(ctx, newChunks) + replicatedChunks, err := fs.replicateChunks(newChunks, key) if err != nil { return true, fmt.Errorf("replicte %s chunks error: %v", key, err) } @@ -191,14 +209,16 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file } // save updated meta data - return true, fs.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return true, fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { request := &filer_pb.UpdateEntryRequest{ - Directory: newParentPath, - Entry: existingEntry, + Directory: newParentPath, + Entry: existingEntry, + IsFromOtherCluster: true, + Signatures: signatures, } - if _, err := client.UpdateEntry(ctx, request); err != nil { + if _, err := client.UpdateEntry(context.Background(), request); err != nil { return fmt.Errorf("update existingEntry %s: %v", key, err) } @@ -206,8 +226,21 @@ func (fs *FilerSink) UpdateEntry(ctx context.Context, key string, oldEntry *file }) } -func compareChunks(oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk) { - deletedChunks = filer2.MinusChunks(oldEntry.Chunks, newEntry.Chunks) - newChunks = filer2.MinusChunks(newEntry.Chunks, oldEntry.Chunks) +func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) { + aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.Chunks) + if aErr != nil { + return nil, nil, aErr + } + bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.Chunks) + if bErr != nil { + return nil, nil, bErr + } + + deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...) + deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...) + + newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...) + newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...) + return } diff --git a/weed/replication/sink/gcssink/gcs_sink.go b/weed/replication/sink/gcssink/gcs_sink.go index abd7c49b9..5cf5b7317 100644 --- a/weed/replication/sink/gcssink/gcs_sink.go +++ b/weed/replication/sink/gcssink/gcs_sink.go @@ -3,23 +3,26 @@ package gcssink import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" "os" "cloud.google.com/go/storage" - "github.com/chrislusf/seaweedfs/weed/filer2" + "google.golang.org/api/option" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/api/option" ) type GcsSink struct { - client *storage.Client - bucket string - dir string - filerSource *source.FilerSource + client *storage.Client + bucket string + dir string + filerSource *source.FilerSource + isIncremental bool } func init() { @@ -34,11 +37,16 @@ func (g *GcsSink) GetSinkToDirectory() string { return g.dir } -func (g *GcsSink) Initialize(configuration util.Configuration) error { +func (g *GcsSink) IsIncremental() bool { + return g.isIncremental +} + +func (g *GcsSink) Initialize(configuration util.Configuration, prefix string) error { + g.isIncremental = configuration.GetBool(prefix + "is_incremental") return g.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), ) } @@ -50,7 +58,6 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str g.bucket = bucketName g.dir = dir - ctx := context.Background() // Creates a client. if google_application_credentials == "" { var found bool @@ -59,7 +66,7 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml") } } - client, err := storage.NewClient(ctx, option.WithCredentialsFile(google_application_credentials)) + client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials)) if err != nil { glog.Fatalf("Failed to create client: %v", err) } @@ -69,13 +76,13 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str return nil } -func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (g *GcsSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { if isDirectory { key = key + "/" } - if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != nil { + if err := g.client.Bucket(g.bucket).Object(key).Delete(context.Background()); err != nil { return fmt.Errorf("gcs delete %s%s: %v", g.bucket, key, err) } @@ -83,35 +90,24 @@ func (g *GcsSink) DeleteEntry(ctx context.Context, key string, isDirectory, dele } -func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { +func (g *GcsSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { if entry.IsDirectory { return nil } - totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) - - wc := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(g.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) - for _, chunk := range chunkViews { - - fileUrl, err := g.filerSource.LookupFileId(ctx, chunk.FileId) - if err != nil { - return err - } - - _, err = util.ReadUrlAsStream(fileUrl, chunk.Offset, int(chunk.Size), func(data []byte) { - wc.Write(data) - }) - - if err != nil { - return err - } + wc := g.client.Bucket(g.bucket).Object(key).NewWriter(context.Background()) + defer wc.Close() + writeFunc := func(data []byte) error { + _, writeErr := wc.Write(data) + return writeErr } - if err := wc.Close(); err != nil { + if err := repl_util.CopyFromChunkViews(chunkViews, g.filerSource, writeFunc); err != nil { return err } @@ -119,7 +115,7 @@ func (g *GcsSink) CreateEntry(ctx context.Context, key string, entry *filer_pb.E } -func (g *GcsSink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (g *GcsSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { // TODO improve efficiency return false, nil } diff --git a/weed/replication/sink/localsink/local_incremental_sink.go b/weed/replication/sink/localsink/local_incremental_sink.go new file mode 100644 index 000000000..a1d49e28a --- /dev/null +++ b/weed/replication/sink/localsink/local_incremental_sink.go @@ -0,0 +1,17 @@ +package localsink + +import ( + "github.com/chrislusf/seaweedfs/weed/replication/sink" +) + +type LocalIncSink struct { + LocalSink +} + +func (localincsink *LocalIncSink) GetName() string { + return "local_incremental" +} + +func init() { + sink.Sinks = append(sink.Sinks, &LocalIncSink{}) +} diff --git a/weed/replication/sink/localsink/local_sink.go b/weed/replication/sink/localsink/local_sink.go new file mode 100644 index 000000000..2b9b3e69a --- /dev/null +++ b/weed/replication/sink/localsink/local_sink.go @@ -0,0 +1,105 @@ +package localsink + +import ( + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/replication/repl_util" + "github.com/chrislusf/seaweedfs/weed/replication/sink" + "github.com/chrislusf/seaweedfs/weed/replication/source" + "github.com/chrislusf/seaweedfs/weed/util" + "io/ioutil" + "os" + "path/filepath" + "strings" +) + +type LocalSink struct { + Dir string + filerSource *source.FilerSource +} + +func init() { + sink.Sinks = append(sink.Sinks, &LocalSink{}) +} + +func (localsink *LocalSink) SetSourceFiler(s *source.FilerSource) { + localsink.filerSource = s +} + +func (localsink *LocalSink) GetName() string { + return "local" +} + +func (localsink *LocalSink) isMultiPartEntry(key string) bool { + return strings.HasSuffix(key, ".part") && strings.Contains(key, "/.uploads/") +} + +func (localsink *LocalSink) initialize(dir string) error { + localsink.Dir = dir + return nil +} + +func (localsink *LocalSink) Initialize(configuration util.Configuration, prefix string) error { + dir := configuration.GetString(prefix + "directory") + glog.V(4).Infof("sink.local.directory: %v", dir) + return localsink.initialize(dir) +} + +func (localsink *LocalSink) GetSinkToDirectory() string { + return localsink.Dir +} + +func (localsink *LocalSink) IsIncremental() bool { + return true +} + +func (localsink *LocalSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { + if localsink.isMultiPartEntry(key) { + return nil + } + glog.V(4).Infof("Delete Entry key: %s", key) + if err := os.Remove(key); err != nil { + glog.V(0).Infof("remove entry key %s: %s", key, err) + } + return nil +} + +func (localsink *LocalSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { + if entry.IsDirectory || localsink.isMultiPartEntry(key) { + return nil + } + glog.V(4).Infof("Create Entry key: %s", key) + + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(localsink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + + dir := filepath.Dir(key) + + if _, err := os.Stat(dir); os.IsNotExist(err) { + glog.V(4).Infof("Create Direcotry key: %s", dir) + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + } + + writeFunc := func(data []byte) error { + writeErr := ioutil.WriteFile(key, data, 0755) + return writeErr + } + + if err := repl_util.CopyFromChunkViews(chunkViews, localsink.filerSource, writeFunc); err != nil { + return err + } + + return nil +} + +func (localsink *LocalSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { + if localsink.isMultiPartEntry(key) { + return true, nil + } + glog.V(4).Infof("Update Entry key: %s", key) + // do delete and create + return false, nil +} diff --git a/weed/replication/sink/replication_sink.go b/weed/replication/sink/replication_sink.go index dd54f0005..4ffd09462 100644 --- a/weed/replication/sink/replication_sink.go +++ b/weed/replication/sink/replication_sink.go @@ -1,7 +1,6 @@ package sink import ( - "context" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/source" "github.com/chrislusf/seaweedfs/weed/util" @@ -9,12 +8,13 @@ import ( type ReplicationSink interface { GetName() string - Initialize(configuration util.Configuration) error - DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error - CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error - UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) + Initialize(configuration util.Configuration, prefix string) error + DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error + CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error + UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) GetSinkToDirectory() string SetSourceFiler(s *source.FilerSource) + IsIncremental() bool } var ( diff --git a/weed/replication/sink/s3sink/s3_sink.go b/weed/replication/sink/s3sink/s3_sink.go index 4cff341d0..9a36573e3 100644 --- a/weed/replication/sink/s3sink/s3_sink.go +++ b/weed/replication/sink/s3sink/s3_sink.go @@ -11,7 +11,8 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" - "github.com/chrislusf/seaweedfs/weed/filer2" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/replication/sink" @@ -20,11 +21,13 @@ import ( ) type S3Sink struct { - conn s3iface.S3API - region string - bucket string - dir string - filerSource *source.FilerSource + conn s3iface.S3API + region string + bucket string + dir string + endpoint string + filerSource *source.FilerSource + isIncremental bool } func init() { @@ -39,16 +42,24 @@ func (s3sink *S3Sink) GetSinkToDirectory() string { return s3sink.dir } -func (s3sink *S3Sink) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("sink.s3.region: %v", configuration.GetString("region")) - glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString("bucket")) - glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString("directory")) +func (s3sink *S3Sink) IsIncremental() bool { + return s3sink.isIncremental +} + +func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket")) + glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory")) + glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint")) + glog.V(0).Infof("sink.s3.is_incremental: %v", configuration.GetString(prefix+"is_incremental")) + s3sink.isIncremental = configuration.GetBool(prefix + "is_incremental") return s3sink.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("bucket"), - configuration.GetString("directory"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"bucket"), + configuration.GetString(prefix+"directory"), + configuration.GetString(prefix+"endpoint"), ) } @@ -56,13 +67,16 @@ func (s3sink *S3Sink) SetSourceFiler(s *source.FilerSource) { s3sink.filerSource = s } -func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir string) error { +func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, bucket, dir, endpoint string) error { s3sink.region = region s3sink.bucket = bucket s3sink.dir = dir + s3sink.endpoint = endpoint config := &aws.Config{ - Region: aws.String(s3sink.region), + Region: aws.String(s3sink.region), + Endpoint: aws.String(s3sink.endpoint), + S3ForcePathStyle: aws.Bool(true), } if awsAccessKeyId != "" && awsSecretAccessKey != "" { config.Credentials = credentials.NewStaticCredentials(awsAccessKeyId, awsSecretAccessKey, "") @@ -77,7 +91,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey, region, buc return nil } -func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, deleteIncludeChunks bool) error { +func (s3sink *S3Sink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error { key = cleanKey(key) @@ -89,8 +103,7 @@ func (s3sink *S3Sink) DeleteEntry(ctx context.Context, key string, isDirectory, } -func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_pb.Entry) error { - +func (s3sink *S3Sink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error { key = cleanKey(key) if entry.IsDirectory { @@ -99,38 +112,40 @@ func (s3sink *S3Sink) CreateEntry(ctx context.Context, key string, entry *filer_ uploadId, err := s3sink.createMultipartUpload(key, entry) if err != nil { - return err + return fmt.Errorf("createMultipartUpload: %v", err) } - totalSize := filer2.TotalSize(entry.Chunks) - chunkViews := filer2.ViewFromChunks(entry.Chunks, 0, int(totalSize)) + totalSize := filer.FileSize(entry) + chunkViews := filer.ViewFromChunks(s3sink.filerSource.LookupFileId, entry.Chunks, 0, int64(totalSize)) + + parts := make([]*s3.CompletedPart, len(chunkViews)) - var parts []*s3.CompletedPart var wg sync.WaitGroup for chunkIndex, chunk := range chunkViews { partId := chunkIndex + 1 wg.Add(1) - go func(chunk *filer2.ChunkView) { + go func(chunk *filer.ChunkView, index int) { defer wg.Done() - if part, uploadErr := s3sink.uploadPart(ctx, key, uploadId, partId, chunk); uploadErr != nil { + if part, uploadErr := s3sink.uploadPart(key, uploadId, partId, chunk); uploadErr != nil { err = uploadErr + glog.Errorf("uploadPart: %v", uploadErr) } else { - parts = append(parts, part) + parts[index] = part } - }(chunk) + }(chunk, chunkIndex) } wg.Wait() if err != nil { s3sink.abortMultipartUpload(key, uploadId) - return err + return fmt.Errorf("uploadPart: %v", err) } - return s3sink.completeMultipartUpload(ctx, key, uploadId, parts) + return s3sink.completeMultipartUpload(context.Background(), key, uploadId, parts) } -func (s3sink *S3Sink) UpdateEntry(ctx context.Context, key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool) (foundExistingEntry bool, err error) { +func (s3sink *S3Sink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) { key = cleanKey(key) // TODO improve efficiency return false, nil diff --git a/weed/replication/sink/s3sink/s3_write.go b/weed/replication/sink/s3sink/s3_write.go index 0a190b27d..3dde52616 100644 --- a/weed/replication/sink/s3sink/s3_write.go +++ b/weed/replication/sink/s3sink/s3_write.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/s3" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -24,7 +24,7 @@ func (s3sink *S3Sink) deleteObject(key string) error { result, err := s3sink.conn.DeleteObject(input) if err == nil { - glog.V(0).Infof("[%s] delete %s: %v", s3sink.bucket, key, result) + glog.V(2).Infof("[%s] delete %s: %v", s3sink.bucket, key, result) } else { glog.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err) } @@ -43,7 +43,7 @@ func (s3sink *S3Sink) createMultipartUpload(key string, entry *filer_pb.Entry) ( result, err := s3sink.conn.CreateMultipartUpload(input) if err == nil { - glog.V(0).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result) + glog.V(2).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result) } else { glog.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err) return "", err @@ -94,19 +94,20 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId result, err := s3sink.conn.CompleteMultipartUpload(input) if err == nil { - glog.V(0).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result) + glog.V(2).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result) } else { glog.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err) + return fmt.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err) } - return err + return nil } // To upload a part -func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, partId int, chunk *filer2.ChunkView) (*s3.CompletedPart, error) { +func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.ChunkView) (*s3.CompletedPart, error) { var readSeeker io.ReadSeeker - readSeeker, err := s3sink.buildReadSeeker(ctx, chunk) + readSeeker, err := s3sink.buildReadSeeker(chunk) if err != nil { glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err) @@ -122,7 +123,7 @@ func (s3sink *S3Sink) uploadPart(ctx context.Context, key, uploadId string, part result, err := s3sink.conn.UploadPart(input) if err == nil { - glog.V(0).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result) + glog.V(2).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result) } else { glog.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err) } @@ -156,12 +157,19 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou return err } -func (s3sink *S3Sink) buildReadSeeker(ctx context.Context, chunk *filer2.ChunkView) (io.ReadSeeker, error) { - fileUrl, err := s3sink.filerSource.LookupFileId(ctx, chunk.FileId) +func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, error) { + fileUrls, err := s3sink.filerSource.LookupFileId(chunk.FileId) if err != nil { return nil, err } buf := make([]byte, chunk.Size) - util.ReadUrl(fileUrl, chunk.Offset, int(chunk.Size), buf, true) + for _, fileUrl := range fileUrls { + _, err = util.ReadUrl(fileUrl, chunk.CipherKey, chunk.IsGzipped, false, chunk.Offset, int(chunk.Size), buf) + if err != nil { + glog.V(1).Infof("read from %s: %v", fileUrl, err) + } else { + break + } + } return bytes.NewReader(buf), nil } diff --git a/weed/replication/source/filer_source.go b/weed/replication/source/filer_source.go index d7b5ebc4d..e2e3575dc 100644 --- a/weed/replication/source/filer_source.go +++ b/weed/replication/source/filer_source.go @@ -3,13 +3,15 @@ package source import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" - "google.golang.org/grpc" "io" "net/http" "strings" + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" @@ -23,32 +25,41 @@ type FilerSource struct { grpcAddress string grpcDialOption grpc.DialOption Dir string + address string + proxyByFiler bool } -func (fs *FilerSource) Initialize(configuration util.Configuration) error { - return fs.initialize( - configuration.GetString("grpcAddress"), - configuration.GetString("directory"), +func (fs *FilerSource) Initialize(configuration util.Configuration, prefix string) error { + return fs.DoInitialize( + "", + configuration.GetString(prefix+"grpcAddress"), + configuration.GetString(prefix+"directory"), + false, ) } -func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { +func (fs *FilerSource) DoInitialize(address, grpcAddress string, dir string, readChunkFromFiler bool) (err error) { + fs.address = address + if fs.address == "" { + fs.address = pb.GrpcAddressToServerAddress(grpcAddress) + } fs.grpcAddress = grpcAddress fs.Dir = dir - fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client") + fs.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client") + fs.proxyByFiler = readChunkFromFiler return nil } -func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl string, err error) { +func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error) { vid2Locations := make(map[string]*filer_pb.Locations) vid := volumeId(part) - err = fs.withFilerClient(ctx, fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { glog.V(4).Infof("read lookup volume id locations: %v", vid) - resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{ + resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ VolumeIds: []string{vid}, }) if err != nil { @@ -62,42 +73,65 @@ func (fs *FilerSource) LookupFileId(ctx context.Context, part string) (fileUrl s if err != nil { glog.V(1).Infof("LookupFileId volume id %s: %v", vid, err) - return "", fmt.Errorf("LookupFileId volume id %s: %v", vid, err) + return nil, fmt.Errorf("LookupFileId volume id %s: %v", vid, err) } locations := vid2Locations[vid] if locations == nil || len(locations.Locations) == 0 { glog.V(1).Infof("LookupFileId locate volume id %s: %v", vid, err) - return "", fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err) + return nil, fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err) } - fileUrl = fmt.Sprintf("http://%s/%s", locations.Locations[0].Url, part) + if !fs.proxyByFiler { + for _, loc := range locations.Locations { + fileUrls = append(fileUrls, fmt.Sprintf("http://%s/%s?readDeleted=true", loc.Url, part)) + } + } else { + fileUrls = append(fileUrls, fmt.Sprintf("http://%s/?proxyChunkId=%s", fs.address, part)) + } return } -func (fs *FilerSource) ReadPart(ctx context.Context, part string) (filename string, header http.Header, readCloser io.ReadCloser, err error) { +func (fs *FilerSource) ReadPart(fileId string) (filename string, header http.Header, resp *http.Response, err error) { - fileUrl, err := fs.LookupFileId(ctx, part) + if fs.proxyByFiler { + return util.DownloadFile("http://" + fs.address + "/?proxyChunkId=" + fileId) + } + + fileUrls, err := fs.LookupFileId(fileId) if err != nil { return "", nil, nil, err } - filename, header, readCloser, err = util.DownloadFile(fileUrl) + for _, fileUrl := range fileUrls { + filename, header, resp, err = util.DownloadFile(fileUrl) + if err != nil { + glog.V(1).Infof("fail to read from %s: %v", fileUrl, err) + } else { + break + } + } - return filename, header, readCloser, err + return filename, header, resp, err } -func (fs *FilerSource) withFilerClient(ctx context.Context, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error { +var _ = filer_pb.FilerClient(&FilerSource{}) + +func (fs *FilerSource) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, fs.grpcAddress, fs.grpcDialOption) } +func (fs *FilerSource) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} + func volumeId(fileId string) string { lastCommaIndex := strings.LastIndex(fileId, ",") if lastCommaIndex > 0 { diff --git a/weed/replication/sub/notification_aws_sqs.go b/weed/replication/sub/notification_aws_sqs.go index bed26c79c..642834c72 100644 --- a/weed/replication/sub/notification_aws_sqs.go +++ b/weed/replication/sub/notification_aws_sqs.go @@ -27,14 +27,14 @@ func (k *AwsSqsInput) GetName() string { return "aws_sqs" } -func (k *AwsSqsInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString("region")) - glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString("sqs_queue_name")) +func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region")) + glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name")) return k.initialize( - configuration.GetString("aws_access_key_id"), - configuration.GetString("aws_secret_access_key"), - configuration.GetString("region"), - configuration.GetString("sqs_queue_name"), + configuration.GetString(prefix+"aws_access_key_id"), + configuration.GetString(prefix+"aws_secret_access_key"), + configuration.GetString(prefix+"region"), + configuration.GetString(prefix+"sqs_queue_name"), ) } @@ -68,7 +68,7 @@ func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, que return nil } -func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { +func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) { // receive message result, err := k.svc.ReceiveMessage(&sqs.ReceiveMessageInput{ @@ -92,7 +92,9 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif } // process the message - key = *result.Messages[0].Attributes["key"] + // fmt.Printf("messages: %+v\n", result.Messages[0]) + keyValue := result.Messages[0].MessageAttributes["key"] + key = *keyValue.StringValue text := *result.Messages[0].Body message = &filer_pb.EventNotification{} err = proto.UnmarshalText(text, message) diff --git a/weed/replication/sub/notification_gocdk_pub_sub.go b/weed/replication/sub/notification_gocdk_pub_sub.go index eddba9ff8..b16eec2e1 100644 --- a/weed/replication/sub/notification_gocdk_pub_sub.go +++ b/weed/replication/sub/notification_gocdk_pub_sub.go @@ -2,13 +2,20 @@ package sub import ( "context" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" "github.com/golang/protobuf/proto" + "github.com/streadway/amqp" "gocloud.dev/pubsub" _ "gocloud.dev/pubsub/awssnssqs" + "gocloud.dev/pubsub/rabbitpubsub" + "net/url" + "os" + "path" + "strings" + "time" + // _ "gocloud.dev/pubsub/azuresb" _ "gocloud.dev/pubsub/gcppubsub" _ "gocloud.dev/pubsub/natspubsub" @@ -19,32 +26,139 @@ func init() { NotificationInputs = append(NotificationInputs, &GoCDKPubSubInput{}) } +func getPath(rawUrl string) string { + parsedUrl, _ := url.Parse(rawUrl) + return path.Join(parsedUrl.Host, parsedUrl.Path) +} + +func QueueDeclareAndBind(conn *amqp.Connection, exchangeUrl string, queueUrl string) error { + exchangeName := getPath(exchangeUrl) + queueName := getPath(queueUrl) + exchangeNameDLX := "DLX." + exchangeName + queueNameDLX := "DLX." + queueName + ch, err := conn.Channel() + if err != nil { + glog.Error(err) + return err + } + defer ch.Close() + if err := ch.ExchangeDeclare( + exchangeNameDLX, "fanout", false, false, false, false, nil); err != nil { + glog.Error(err) + return err + } + if err := ch.ExchangeDeclare( + exchangeName, "fanout", false, false, false, false, nil); err != nil { + glog.Error(err) + return err + } + if _, err := ch.QueueDeclare( + queueName, false, false, false, false, + amqp.Table{"x-dead-letter-exchange": exchangeNameDLX}); err != nil { + glog.Error(err) + return err + } + if err := ch.QueueBind(queueName, "", exchangeName, false, nil); err != nil { + glog.Error(err) + return err + } + if _, err := ch.QueueDeclare( + queueNameDLX, false, false, false, false, + amqp.Table{"x-dead-letter-exchange": exchangeName, "x-message-ttl": 600000}); err != nil { + glog.Error(err) + return err + } + if err := ch.QueueBind(queueNameDLX, "", exchangeNameDLX, false, nil); err != nil { + glog.Error(err) + return err + } + return nil +} + type GoCDKPubSubInput struct { - sub *pubsub.Subscription + sub *pubsub.Subscription + subURL string } func (k *GoCDKPubSubInput) GetName() string { return "gocdk_pub_sub" } -func (k *GoCDKPubSubInput) Initialize(config util.Configuration) error { - subURL := config.GetString("sub_url") - glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL) - sub, err := pubsub.OpenSubscription(context.Background(), subURL) +func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error { + topicUrl := configuration.GetString(prefix + "topic_url") + k.subURL = configuration.GetString(prefix + "sub_url") + glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", k.subURL) + sub, err := pubsub.OpenSubscription(context.Background(), k.subURL) if err != nil { return err } + var conn *amqp.Connection + if sub.As(&conn) { + ch, err := conn.Channel() + if err != nil { + return err + } + defer ch.Close() + _, err = ch.QueueInspect(getPath(k.subURL)) + if err != nil { + if strings.HasPrefix(err.Error(), "Exception (404) Reason") { + if err := QueueDeclareAndBind(conn, topicUrl, k.subURL); err != nil { + return err + } + } else { + return err + } + } + } k.sub = sub return nil } -func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { - msg, err := k.sub.Receive(context.Background()) +func (k *GoCDKPubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) { + ctx := context.Background() + msg, err := k.sub.Receive(ctx) + if err != nil { + var conn *amqp.Connection + if k.sub.As(&conn) && conn.IsClosed() { + conn.Close() + k.sub.Shutdown(ctx) + conn, err = amqp.Dial(os.Getenv("RABBIT_SERVER_URL")) + if err != nil { + glog.Error(err) + time.Sleep(time.Second) + return + } + k.sub = rabbitpubsub.OpenSubscription(conn, getPath(k.subURL), nil) + return + } + // This is permanent cached sub err + glog.Fatal(err) + } + onFailureFn = func() { + if msg.Nackable() { + isRedelivered := false + var delivery amqp.Delivery + if msg.As(&delivery) { + isRedelivered = delivery.Redelivered + glog.Warningf("onFailureFn() metadata: %+v, redelivered: %v", msg.Metadata, delivery.Redelivered) + } + if isRedelivered { + if err := delivery.Nack(false, false); err != nil { + glog.Error(err) + } + } else { + msg.Nack() + } + } + } + onSuccessFn = func() { + msg.Ack() + } key = msg.Metadata["key"] message = &filer_pb.EventNotification{} err = proto.Unmarshal(msg.Body, message) if err != nil { - return "", nil, err + return "", nil, onSuccessFn, onFailureFn, err } - return key, message, nil + return key, message, onSuccessFn, onFailureFn, nil } diff --git a/weed/replication/sub/notification_google_pub_sub.go b/weed/replication/sub/notification_google_pub_sub.go index ad6b42a2e..f7c767d4a 100644 --- a/weed/replication/sub/notification_google_pub_sub.go +++ b/weed/replication/sub/notification_google_pub_sub.go @@ -27,13 +27,13 @@ func (k *GooglePubSubInput) GetName() string { return "google_pub_sub" } -func (k *GooglePubSubInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString("project_id")) - glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString("topic")) +func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id")) + glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetString("google_application_credentials"), - configuration.GetString("project_id"), - configuration.GetString("topic"), + configuration.GetString(prefix+"google_application_credentials"), + configuration.GetString(prefix+"project_id"), + configuration.GetString(prefix+"topic"), ) } @@ -85,16 +85,22 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId go k.sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { k.messageChan <- m - m.Ack() }) return err } -func (k *GooglePubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { +func (k *GooglePubSubInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) { m := <-k.messageChan + onSuccessFn = func() { + m.Ack() + } + onFailureFn = func() { + m.Nack() + } + // process the message key = m.Attributes["key"] message = &filer_pb.EventNotification{} diff --git a/weed/replication/sub/notification_kafka.go b/weed/replication/sub/notification_kafka.go index 1a86a8307..622a759ea 100644 --- a/weed/replication/sub/notification_kafka.go +++ b/weed/replication/sub/notification_kafka.go @@ -28,14 +28,14 @@ func (k *KafkaInput) GetName() string { return "kafka" } -func (k *KafkaInput) Initialize(configuration util.Configuration) error { - glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice("hosts")) - glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString("topic")) +func (k *KafkaInput) Initialize(configuration util.Configuration, prefix string) error { + glog.V(0).Infof("replication.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts")) + glog.V(0).Infof("replication.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic")) return k.initialize( - configuration.GetStringSlice("hosts"), - configuration.GetString("topic"), - configuration.GetString("offsetFile"), - configuration.GetInt("offsetSaveIntervalSeconds"), + configuration.GetStringSlice(prefix+"hosts"), + configuration.GetString(prefix+"topic"), + configuration.GetString(prefix+"offsetFile"), + configuration.GetInt(prefix+"offsetSaveIntervalSeconds"), ) } @@ -97,7 +97,7 @@ func (k *KafkaInput) initialize(hosts []string, topic string, offsetFile string, return nil } -func (k *KafkaInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) { +func (k *KafkaInput) ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) { msg := <-k.messageChan diff --git a/weed/replication/sub/notifications.go b/weed/replication/sub/notifications.go index 66fbef824..d5a910db9 100644 --- a/weed/replication/sub/notifications.go +++ b/weed/replication/sub/notifications.go @@ -9,8 +9,8 @@ type NotificationInput interface { // GetName gets the name to locate the configuration in sync.toml file GetName() string // Initialize initializes the file store - Initialize(configuration util.Configuration) error - ReceiveMessage() (key string, message *filer_pb.EventNotification, err error) + Initialize(configuration util.Configuration, prefix string) error + ReceiveMessage() (key string, message *filer_pb.EventNotification, onSuccessFn func(), onFailureFn func(), err error) } var ( diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go new file mode 100644 index 000000000..b8af6381a --- /dev/null +++ b/weed/s3api/auth_credentials.go @@ -0,0 +1,275 @@ +package s3api + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "io/ioutil" + "net/http" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" +) + +type Action string + +type Iam interface { + Check(f http.HandlerFunc, actions ...Action) http.HandlerFunc +} + +type IdentityAccessManagement struct { + identities []*Identity + domain string +} + +type Identity struct { + Name string + Credentials []*Credential + Actions []Action +} + +type Credential struct { + AccessKey string + SecretKey string +} + +func NewIdentityAccessManagement(option *S3ApiServerOption) *IdentityAccessManagement { + iam := &IdentityAccessManagement{ + domain: option.DomainName, + } + if option.Config != "" { + if err := iam.loadS3ApiConfigurationFromFile(option.Config); err != nil { + glog.Fatalf("fail to load config file %s: %v", option.Config, err) + } + } else { + if err := iam.loadS3ApiConfigurationFromFiler(option); err != nil { + glog.Warningf("fail to load config: %v", err) + } + } + return iam +} + +func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFiler(option *S3ApiServerOption) error { + content, err := filer.ReadContent(option.Filer, filer.IamConfigDirecotry, filer.IamIdentityFile) + if err != nil { + return fmt.Errorf("read S3 config: %v", err) + } + return iam.loadS3ApiConfigurationFromBytes(content) +} + +func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromFile(fileName string) error { + content, readErr := ioutil.ReadFile(fileName) + if readErr != nil { + glog.Warningf("fail to read %s : %v", fileName, readErr) + return fmt.Errorf("fail to read %s : %v", fileName, readErr) + } + return iam.loadS3ApiConfigurationFromBytes(content) +} + +func (iam *IdentityAccessManagement) loadS3ApiConfigurationFromBytes(content []byte) error { + s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} + if err := filer.ParseS3ConfigurationFromBytes(content, s3ApiConfiguration); err != nil { + glog.Warningf("unmarshal error: %v", err) + return fmt.Errorf("unmarshal error: %v", err) + } + if err := iam.loadS3ApiConfiguration(s3ApiConfiguration); err != nil { + return err + } + return nil +} + +func (iam *IdentityAccessManagement) loadS3ApiConfiguration(config *iam_pb.S3ApiConfiguration) error { + var identities []*Identity + for _, ident := range config.Identities { + t := &Identity{ + Name: ident.Name, + Credentials: nil, + Actions: nil, + } + for _, action := range ident.Actions { + t.Actions = append(t.Actions, Action(action)) + } + for _, cred := range ident.Credentials { + t.Credentials = append(t.Credentials, &Credential{ + AccessKey: cred.AccessKey, + SecretKey: cred.SecretKey, + }) + } + identities = append(identities, t) + } + + // atomically switch + iam.identities = identities + return nil +} + +func (iam *IdentityAccessManagement) isEnabled() bool { + + return len(iam.identities) > 0 +} + +func (iam *IdentityAccessManagement) lookupByAccessKey(accessKey string) (identity *Identity, cred *Credential, found bool) { + + for _, ident := range iam.identities { + for _, cred := range ident.Credentials { + if cred.AccessKey == accessKey { + return ident, cred, true + } + } + } + return nil, nil, false +} + +func (iam *IdentityAccessManagement) lookupAnonymous() (identity *Identity, found bool) { + + for _, ident := range iam.identities { + if ident.Name == "anonymous" { + return ident, true + } + } + return nil, false +} + +func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc, action Action) http.HandlerFunc { + + if !iam.isEnabled() { + return f + } + + return func(w http.ResponseWriter, r *http.Request) { + identity, errCode := iam.authRequest(r, action) + if errCode == s3err.ErrNone { + if identity != nil && identity.Name != "" { + r.Header.Set(xhttp.AmzIdentityId, identity.Name) + if identity.isAdmin() { + r.Header.Set(xhttp.AmzIsAdmin, "true") + } + } + f(w, r) + return + } + writeErrorResponse(w, errCode, r.URL) + } +} + +// check whether the request has valid access keys +func (iam *IdentityAccessManagement) authRequest(r *http.Request, action Action) (*Identity, s3err.ErrorCode) { + var identity *Identity + var s3Err s3err.ErrorCode + var found bool + switch getRequestAuthType(r) { + case authTypeStreamingSigned: + return identity, s3err.ErrNone + case authTypeUnknown: + glog.V(3).Infof("unknown auth type") + return identity, s3err.ErrAccessDenied + case authTypePresignedV2, authTypeSignedV2: + glog.V(3).Infof("v2 auth type") + identity, s3Err = iam.isReqAuthenticatedV2(r) + case authTypeSigned, authTypePresigned: + glog.V(3).Infof("v4 auth type") + identity, s3Err = iam.reqSignatureV4Verify(r) + case authTypePostPolicy: + glog.V(3).Infof("post policy auth type") + return identity, s3err.ErrNone + case authTypeJWT: + glog.V(3).Infof("jwt auth type") + return identity, s3err.ErrNotImplemented + case authTypeAnonymous: + identity, found = iam.lookupAnonymous() + if !found { + return identity, s3err.ErrAccessDenied + } + default: + return identity, s3err.ErrNotImplemented + } + + if s3Err != s3err.ErrNone { + return identity, s3Err + } + + glog.V(3).Infof("user name: %v actions: %v", identity.Name, identity.Actions) + + bucket, _ := getBucketAndObject(r) + + if !identity.canDo(action, bucket) { + return identity, s3err.ErrAccessDenied + } + + return identity, s3err.ErrNone + +} + +func (iam *IdentityAccessManagement) authUser(r *http.Request) (*Identity, s3err.ErrorCode) { + var identity *Identity + var s3Err s3err.ErrorCode + var found bool + switch getRequestAuthType(r) { + case authTypeStreamingSigned: + return identity, s3err.ErrNone + case authTypeUnknown: + glog.V(3).Infof("unknown auth type") + return identity, s3err.ErrAccessDenied + case authTypePresignedV2, authTypeSignedV2: + glog.V(3).Infof("v2 auth type") + identity, s3Err = iam.isReqAuthenticatedV2(r) + case authTypeSigned, authTypePresigned: + glog.V(3).Infof("v4 auth type") + identity, s3Err = iam.reqSignatureV4Verify(r) + case authTypePostPolicy: + glog.V(3).Infof("post policy auth type") + return identity, s3err.ErrNone + case authTypeJWT: + glog.V(3).Infof("jwt auth type") + return identity, s3err.ErrNotImplemented + case authTypeAnonymous: + identity, found = iam.lookupAnonymous() + if !found { + return identity, s3err.ErrAccessDenied + } + default: + return identity, s3err.ErrNotImplemented + } + + glog.V(3).Infof("auth error: %v", s3Err) + if s3Err != s3err.ErrNone { + return identity, s3Err + } + return identity, s3err.ErrNone +} + +func (identity *Identity) canDo(action Action, bucket string) bool { + if identity.isAdmin() { + return true + } + for _, a := range identity.Actions { + if a == action { + return true + } + } + if bucket == "" { + return false + } + limitedByBucket := string(action) + ":" + bucket + adminLimitedByBucket := s3_constants.ACTION_ADMIN + ":" + bucket + for _, a := range identity.Actions { + if string(a) == limitedByBucket { + return true + } + if string(a) == adminLimitedByBucket { + return true + } + } + return false +} + +func (identity *Identity) isAdmin() bool { + for _, a := range identity.Actions { + if a == "Admin" { + return true + } + } + return false +} diff --git a/weed/s3api/auth_credentials_subscribe.go b/weed/s3api/auth_credentials_subscribe.go new file mode 100644 index 000000000..ea4b69550 --- /dev/null +++ b/weed/s3api/auth_credentials_subscribe.go @@ -0,0 +1,70 @@ +package s3api + +import ( + "context" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "io" + "time" +) + +func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, lastTsNs int64) error { + + processEventFn := func(resp *filer_pb.SubscribeMetadataResponse) error { + + message := resp.EventNotification + if message.NewEntry == nil { + return nil + } + + dir := resp.Directory + + if message.NewParentPath != "" { + dir = message.NewParentPath + } + if dir == filer.IamConfigDirecotry && message.NewEntry.Name == filer.IamIdentityFile { + if err := s3a.iam.loadS3ApiConfigurationFromBytes(message.NewEntry.Content); err != nil { + return err + } + glog.V(0).Infof("updated %s/%s", filer.IamConfigDirecotry, filer.IamIdentityFile) + } + + return nil + } + + for { + err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := client.SubscribeMetadata(ctx, &filer_pb.SubscribeMetadataRequest{ + ClientName: clientName, + PathPrefix: prefix, + SinceNs: lastTsNs, + }) + if err != nil { + return fmt.Errorf("subscribe: %v", err) + } + + for { + resp, listenErr := stream.Recv() + if listenErr == io.EOF { + return nil + } + if listenErr != nil { + return listenErr + } + + if err := processEventFn(resp); err != nil { + glog.Fatalf("process %v: %v", resp, err) + } + lastTsNs = resp.TsNs + } + }) + if err != nil { + glog.Errorf("subscribing filer meta change: %v", err) + } + time.Sleep(time.Second) + } +} diff --git a/weed/s3api/auth_credentials_test.go b/weed/s3api/auth_credentials_test.go new file mode 100644 index 000000000..0383ddbcd --- /dev/null +++ b/weed/s3api/auth_credentials_test.go @@ -0,0 +1,69 @@ +package s3api + +import ( + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "testing" + + "github.com/golang/protobuf/jsonpb" + + "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" +) + +func TestIdentityListFileFormat(t *testing.T) { + + s3ApiConfiguration := &iam_pb.S3ApiConfiguration{} + + identity1 := &iam_pb.Identity{ + Name: "some_name", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_ADMIN, + ACTION_READ, + ACTION_WRITE, + }, + } + identity2 := &iam_pb.Identity{ + Name: "some_read_only_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key1", + SecretKey: "some_secret_key1", + }, + }, + Actions: []string{ + ACTION_READ, + }, + } + identity3 := &iam_pb.Identity{ + Name: "some_normal_user", + Credentials: []*iam_pb.Credential{ + { + AccessKey: "some_access_key2", + SecretKey: "some_secret_key2", + }, + }, + Actions: []string{ + ACTION_READ, + ACTION_WRITE, + }, + } + + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity1) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity2) + s3ApiConfiguration.Identities = append(s3ApiConfiguration.Identities, identity3) + + m := jsonpb.Marshaler{ + EmitDefaults: true, + Indent: " ", + } + + text, _ := m.MarshalToString(s3ApiConfiguration) + + println(text) + +} diff --git a/weed/s3api/auth_signature_v2.go b/weed/s3api/auth_signature_v2.go new file mode 100644 index 000000000..5694a96ac --- /dev/null +++ b/weed/s3api/auth_signature_v2.go @@ -0,0 +1,427 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3api + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "net" + "net/http" + "net/url" + "path" + "sort" + "strconv" + "strings" + "time" +) + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// The list should be alphabetically sorted +var resourceList = []string{ + "acl", + "delete", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// Verify if request has valid AWS Signature Version '2'. +func (iam *IdentityAccessManagement) isReqAuthenticatedV2(r *http.Request) (*Identity, s3err.ErrorCode) { + if isRequestSignatureV2(r) { + return iam.doesSignV2Match(r) + } + return iam.doesPresignV2SignatureMatch(r) +} + +func (iam *IdentityAccessManagement) doesPolicySignatureV2Match(formValues http.Header) s3err.ErrorCode { + accessKey := formValues.Get("AWSAccessKeyId") + _, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return s3err.ErrInvalidAccessKeyID + } + policy := formValues.Get("Policy") + signature := formValues.Get("Signature") + if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) { + return s3err.ErrSignatureDoesNotMatch + } + return s3err.ErrNone +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// doesSignV2Match - Verify authorization header with calculated header in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html +// returns true if matches, false otherwise. if error is not nil then it is always false + +func validateV2AuthHeader(v2Auth string) (accessKey string, errCode s3err.ErrorCode) { + if v2Auth == "" { + return "", s3err.ErrAuthHeaderEmpty + } + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v2Auth, signV2Algorithm) { + return "", s3err.ErrSignatureVersionNotSupported + } + + // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string). + // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature + authFields := strings.Split(v2Auth, " ") + if len(authFields) != 2 { + return "", s3err.ErrMissingFields + } + + // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. + keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") + if len(keySignFields) != 2 { + return "", s3err.ErrMissingFields + } + + return keySignFields[0], s3err.ErrNone +} + +func (iam *IdentityAccessManagement) doesSignV2Match(r *http.Request) (*Identity, s3err.ErrorCode) { + v2Auth := r.Header.Get("Authorization") + + accessKey, apiError := validateV2AuthHeader(v2Auth) + if apiError != s3err.ErrNone { + return nil, apiError + } + + // Access credentials. + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + unescapedQueries, err := unescapeQueries(encodedQuery) + if err != nil { + return nil, s3err.ErrInvalidQueryParams + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, s3err.ErrInvalidRequest + } + + prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) + if !strings.HasPrefix(v2Auth, prefix) { + return nil, s3err.ErrSignatureDoesNotMatch + } + v2Auth = v2Auth[len(prefix):] + expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) + if !compareSignatureV2(v2Auth, expectedAuth) { + return nil, s3err.ErrSignatureDoesNotMatch + } + return ident, s3err.ErrNone +} + +// doesPresignV2SignatureMatch - Verify query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth +// returns ErrNone if matches. S3 errors otherwise. +func (iam *IdentityAccessManagement) doesPresignV2SignatureMatch(r *http.Request) (*Identity, s3err.ErrorCode) { + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + var ( + filteredQueries []string + gotSignature string + expires string + accessKey string + err error + ) + + var unescapedQueries []string + unescapedQueries, err = unescapeQueries(encodedQuery) + if err != nil { + return nil, s3err.ErrInvalidQueryParams + } + + // Extract the necessary values from presigned query, construct a list of new filtered queries. + for _, query := range unescapedQueries { + keyval := strings.SplitN(query, "=", 2) + if len(keyval) != 2 { + return nil, s3err.ErrInvalidQueryParams + } + switch keyval[0] { + case "AWSAccessKeyId": + accessKey = keyval[1] + case "Signature": + gotSignature = keyval[1] + case "Expires": + expires = keyval[1] + default: + filteredQueries = append(filteredQueries, query) + } + } + + // Invalid values returns error. + if accessKey == "" || gotSignature == "" || expires == "" { + return nil, s3err.ErrInvalidQueryParams + } + + // Validate if access key id same. + ident, cred, found := iam.lookupByAccessKey(accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // Make sure the request has not expired. + expiresInt, err := strconv.ParseInt(expires, 10, 64) + if err != nil { + return nil, s3err.ErrMalformedExpires + } + + // Check if the presigned URL has expired. + if expiresInt < time.Now().UTC().Unix() { + return nil, s3err.ErrExpiredPresignRequest + } + + encodedResource, err = getResource(encodedResource, r.Host, iam.domain) + if err != nil { + return nil, s3err.ErrInvalidRequest + } + + expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) + if !compareSignatureV2(gotSignature, expectedSignature) { + return nil, s3err.ErrSignatureDoesNotMatch + } + + return ident, s3err.ErrNone +} + +// Escape encodedQuery string into unescaped list of query params, returns error +// if any while unescaping the values. +func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { + for _, query := range strings.Split(encodedQuery, "&") { + var unescapedQuery string + unescapedQuery, err = url.QueryUnescape(query) + if err != nil { + return nil, err + } + unescapedQueries = append(unescapedQueries, unescapedQuery) + } + return unescapedQueries, nil +} + +// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. +func getResource(path string, host string, domain string) (string, error) { + if domain == "" { + return path, nil + } + // If virtual-host-style is enabled construct the "resource" properly. + if strings.Contains(host, ":") { + // In bucket.mydomain.com:9000, strip out :9000 + var err error + if host, _, err = net.SplitHostPort(host); err != nil { + return "", err + } + } + if !strings.HasSuffix(host, "."+domain) { + return path, nil + } + bucket := strings.TrimSuffix(host, "."+domain) + return "/" + pathJoin(bucket, path), nil +} + +// pathJoin - like path.Join() but retains trailing "/" of the last element +func pathJoin(elem ...string) string { + trailingSlash := "" + if len(elem) > 0 { + if strings.HasSuffix(elem[len(elem)-1], "/") { + trailingSlash = "/" + } + } + return path.Join(elem...) + trailingSlash +} + +// Return the signature v2 of a given request. +func signatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "") + signature := calculateSignatureV2(stringToSign, cred.SecretKey) + return signature +} + +// Return string to sign under two different conditions. +// - if expires string is set then string to sign includes date instead of the Date header. +// - if expires string is empty then string to sign includes date header instead. +func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string { + canonicalHeaders := canonicalizedAmzHeadersV2(headers) + if len(canonicalHeaders) > 0 { + canonicalHeaders += "\n" + } + + date := expires // Date is set to expires date for presign operations. + if date == "" { + // If expires date is empty then request header Date is used. + date = headers.Get("Date") + } + + // From the Amazon docs: + // + // StringToSign = HTTP-Verb + "\n" + + // Content-Md5 + "\n" + + // Content-Type + "\n" + + // Date/Expires + "\n" + + // CanonicalizedProtocolHeaders + + // CanonicalizedResource; + stringToSign := strings.Join([]string{ + method, + headers.Get("Content-MD5"), + headers.Get("Content-Type"), + date, + canonicalHeaders, + }, "\n") + + return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery) +} + +// Return canonical resource string. +func canonicalizedResourceV2(encodedResource, encodedQuery string) string { + queries := strings.Split(encodedQuery, "&") + keyval := make(map[string]string) + for _, query := range queries { + key := query + val := "" + index := strings.Index(query, "=") + if index != -1 { + key = query[:index] + val = query[index+1:] + } + keyval[key] = val + } + + var canonicalQueries []string + for _, key := range resourceList { + val, ok := keyval[key] + if !ok { + continue + } + if val == "" { + canonicalQueries = append(canonicalQueries, key) + continue + } + canonicalQueries = append(canonicalQueries, key+"="+val) + } + + // The queries will be already sorted as resourceList is sorted, if canonicalQueries + // is empty strings.Join returns empty. + canonicalQuery := strings.Join(canonicalQueries, "&") + if canonicalQuery != "" { + return encodedResource + "?" + canonicalQuery + } + return encodedResource +} + +// Return canonical headers. +func canonicalizedAmzHeadersV2(headers http.Header) string { + var keys []string + keyval := make(map[string]string) + for key := range headers { + lkey := strings.ToLower(key) + if !strings.HasPrefix(lkey, "x-amz-") { + continue + } + keys = append(keys, lkey) + keyval[lkey] = strings.Join(headers[key], ",") + } + sort.Strings(keys) + var canonicalHeaders []string + for _, key := range keys { + canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key]) + } + return strings.Join(canonicalHeaders, "\n") +} + +func calculateSignatureV2(stringToSign string, secret string) string { + hm := hmac.New(sha1.New, []byte(secret)) + hm.Write([]byte(stringToSign)) + return base64.StdEncoding.EncodeToString(hm.Sum(nil)) +} + +// compareSignatureV2 returns true if and only if both signatures +// are equal. The signatures are expected to be base64 encoded strings +// according to the AWS S3 signature V2 spec. +func compareSignatureV2(sig1, sig2 string) bool { + // Decode signature string to binary byte-sequence representation is required + // as Base64 encoding of a value is not unique: + // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice. + signature1, err := base64.StdEncoding.DecodeString(sig1) + if err != nil { + return false + } + signature2, err := base64.StdEncoding.DecodeString(sig2) + if err != nil { + return false + } + return subtle.ConstantTimeCompare(signature1, signature2) == 1 +} + +// Return signature-v2 for the presigned request. +func preSignatureV2(cred *Credential, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires) + return calculateSignatureV2(stringToSign, cred.SecretKey) +} diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go new file mode 100644 index 000000000..0df26e6fc --- /dev/null +++ b/weed/s3api/auth_signature_v4.go @@ -0,0 +1,770 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package s3api + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "encoding/hex" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +func (iam *IdentityAccessManagement) reqSignatureV4Verify(r *http.Request) (*Identity, s3err.ErrorCode) { + sha256sum := getContentSha256Cksum(r) + switch { + case isRequestSignatureV4(r): + return iam.doesSignatureMatch(sha256sum, r) + case isRequestPresignedSignatureV4(r): + return iam.doesPresignedSignatureMatch(sha256sum, r) + } + return nil, s3err.ErrAccessDenied +} + +// Streaming AWS Signature Version '4' constants. +const ( + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" + + // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the + // client did not calculate sha256 of the payload. + unsignedPayload = "UNSIGNED-PAYLOAD" +) + +// Returns SHA256 for calculating canonical-request. +func getContentSha256Cksum(r *http.Request) string { + var ( + defaultSha256Cksum string + v []string + ok bool + ) + + // For a presigned request we look at the query param for sha256. + if isRequestPresignedSignatureV4(r) { + // X-Amz-Content-Sha256, if not set in presigned requests, checksum + // will default to 'UNSIGNED-PAYLOAD'. + defaultSha256Cksum = unsignedPayload + v, ok = r.URL.Query()["X-Amz-Content-Sha256"] + if !ok { + v, ok = r.Header["X-Amz-Content-Sha256"] + } + } else { + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = emptySHA256 + v, ok = r.Header["X-Amz-Content-Sha256"] + } + + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] + } + + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum +} + +// Verify authorization header - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, err := parseSignV4(v4Auth) + if err != s3err.ErrNone { + return nil, err + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != s3err.ErrNone { + return nil, errCode + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // Extract date, if not present throw error. + var date string + if date = req.Header.Get(http.CanonicalHeaderKey("X-Amz-Date")); date == "" { + if date = r.Header.Get("Date"); date == "" { + return nil, s3err.ErrMissingDateHeader + } + } + // Parse date header. + t, e := time.Parse(iso8601Format, date) + if e != nil { + return nil, s3err.ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get hashed Payload + if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil { + buf, _ := ioutil.ReadAll(r.Body) + r.Body = ioutil.NopCloser(bytes.NewBuffer(buf)) + b, _ := ioutil.ReadAll(bytes.NewBuffer(buf)) + if len(b) != 0 { + bodyHash := sha256.Sum256(b) + hashedPayload = hex.EncodeToString(bodyHash[:]) + } + } + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, + signV4Values.Credential.scope.date, + signV4Values.Credential.scope.region, + signV4Values.Credential.scope.service) + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, s3err.ErrSignatureDoesNotMatch + } + + // Return error none. + return identity, s3err.ErrNone +} + +// credentialHeader data type represents structured form of Credential +// string from authorization header. +type credentialHeader struct { + accessKey string + scope struct { + date time.Time + region string + service string + request string + } +} + +// signValues data type represents structured form of AWS Signature V4 header. +type signValues struct { + Credential credentialHeader + SignedHeaders []string + Signature string +} + +// Return scope string. +func (c credentialHeader) getScope() string { + return strings.Join([]string{ + c.scope.date.Format(yyyymmdd), + c.scope.region, + c.scope.service, + c.scope.request, + }, "/") +} + +// Authorization: algorithm Credential=accessKeyID/credScope, \ +// SignedHeaders=signedHeaders, Signature=signature +// +func parseSignV4(v4Auth string) (sv signValues, aec s3err.ErrorCode) { + // Replace all spaced strings, some clients can send spaced + // parameters and some won't. So we pro-actively remove any spaces + // to make parsing easier. + v4Auth = strings.Replace(v4Auth, " ", "", -1) + if v4Auth == "" { + return sv, s3err.ErrAuthHeaderEmpty + } + + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v4Auth, signV4Algorithm) { + return sv, s3err.ErrSignatureVersionNotSupported + } + + // Strip off the Algorithm prefix. + v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) + authFields := strings.Split(strings.TrimSpace(v4Auth), ",") + if len(authFields) != 3 { + return sv, s3err.ErrMissingFields + } + + // Initialize signature version '4' structured header. + signV4Values := signValues{} + + var err s3err.ErrorCode + // Save credentail values. + signV4Values.Credential, err = parseCredentialHeader(authFields[0]) + if err != s3err.ErrNone { + return sv, err + } + + // Save signed headers. + signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1]) + if err != s3err.ErrNone { + return sv, err + } + + // Save signature. + signV4Values.Signature, err = parseSignature(authFields[2]) + if err != s3err.ErrNone { + return sv, err + } + + // Return the structure here. + return signV4Values, s3err.ErrNone +} + +// parse credentialHeader string into its structured form. +func parseCredentialHeader(credElement string) (ch credentialHeader, aec s3err.ErrorCode) { + creds := strings.Split(strings.TrimSpace(credElement), "=") + if len(creds) != 2 { + return ch, s3err.ErrMissingFields + } + if creds[0] != "Credential" { + return ch, s3err.ErrMissingCredTag + } + credElements := strings.Split(strings.TrimSpace(creds[1]), "/") + if len(credElements) != 5 { + return ch, s3err.ErrCredMalformed + } + // Save access key id. + cred := credentialHeader{ + accessKey: credElements[0], + } + var e error + cred.scope.date, e = time.Parse(yyyymmdd, credElements[1]) + if e != nil { + return ch, s3err.ErrMalformedCredentialDate + } + + cred.scope.region = credElements[2] + cred.scope.service = credElements[3] // "s3" + cred.scope.request = credElements[4] // "aws4_request" + return cred, s3err.ErrNone +} + +// Parse slice of signed headers from signed headers tag. +func parseSignedHeader(signedHdrElement string) ([]string, s3err.ErrorCode) { + signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") + if len(signedHdrFields) != 2 { + return nil, s3err.ErrMissingFields + } + if signedHdrFields[0] != "SignedHeaders" { + return nil, s3err.ErrMissingSignHeadersTag + } + if signedHdrFields[1] == "" { + return nil, s3err.ErrMissingFields + } + signedHeaders := strings.Split(signedHdrFields[1], ";") + return signedHeaders, s3err.ErrNone +} + +// Parse signature from signature tag. +func parseSignature(signElement string) (string, s3err.ErrorCode) { + signFields := strings.Split(strings.TrimSpace(signElement), "=") + if len(signFields) != 2 { + return "", s3err.ErrMissingFields + } + if signFields[0] != "Signature" { + return "", s3err.ErrMissingSignTag + } + if signFields[1] == "" { + return "", s3err.ErrMissingFields + } + signature := signFields[1] + return signature, s3err.ErrNone +} + +// doesPolicySignatureMatch - Verify query headers with post policy +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// returns ErrNone if the signature matches. +func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http.Header) s3err.ErrorCode { + + // Parse credential tag. + credHeader, err := parseCredentialHeader("Credential=" + formValues.Get("X-Amz-Credential")) + if err != s3err.ErrNone { + return s3err.ErrMissingFields + } + + _, cred, found := iam.lookupByAccessKey(credHeader.accessKey) + if !found { + return s3err.ErrInvalidAccessKeyID + } + + // Get signing key. + signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region, credHeader.scope.service) + + // Get signature. + newSignature := getSignature(signingKey, formValues.Get("Policy")) + + // Verify signature. + if !compareSignatureV4(newSignature, formValues.Get("X-Amz-Signature")) { + return s3err.ErrSignatureDoesNotMatch + } + + // Success. + return s3err.ErrNone +} + +// check query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html +func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload string, r *http.Request) (*Identity, s3err.ErrorCode) { + + // Copy request + req := *r + + // Parse request query string. + pSignValues, err := parsePreSignV4(req.URL.Query()) + if err != s3err.ErrNone { + return nil, err + } + + // Verify if the access key id matches. + identity, cred, found := iam.lookupByAccessKey(pSignValues.Credential.accessKey) + if !found { + return nil, s3err.ErrInvalidAccessKeyID + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) + if errCode != s3err.ErrNone { + return nil, errCode + } + // Construct new query. + query := make(url.Values) + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + query.Set("X-Amz-Content-Sha256", hashedPayload) + } + + query.Set("X-Amz-Algorithm", signV4Algorithm) + + now := time.Now().UTC() + + // If the host which signed the request is slightly ahead in time (by less than globalMaxSkewTime) the + // request should still be allowed. + if pSignValues.Date.After(now.Add(15 * time.Minute)) { + return nil, s3err.ErrRequestNotReadyYet + } + + if now.Sub(pSignValues.Date) > pSignValues.Expires { + return nil, s3err.ErrExpiredPresignRequest + } + + // Save the date and expires. + t := pSignValues.Date + expireSeconds := int(pSignValues.Expires / time.Second) + + // Construct the query. + query.Set("X-Amz-Date", t.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.Itoa(expireSeconds)) + query.Set("X-Amz-SignedHeaders", getSignedHeaders(extractedSignedHeaders)) + query.Set("X-Amz-Credential", cred.AccessKey+"/"+getScope(t, pSignValues.Credential.scope.region)) + + // Save other headers available in the request parameters. + for k, v := range req.URL.Query() { + + // Handle the metadata in presigned put query string + if strings.Contains(strings.ToLower(k), "x-amz-meta-") { + query.Set(k, v[0]) + } + + if strings.HasPrefix(strings.ToLower(k), "x-amz") { + continue + } + query[k] = v + } + + // Get the encoded query. + encodedQuery := query.Encode() + + // Verify if date query is same. + if req.URL.Query().Get("X-Amz-Date") != query.Get("X-Amz-Date") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if expires query is same. + if req.URL.Query().Get("X-Amz-Expires") != query.Get("X-Amz-Expires") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if signed headers query is same. + if req.URL.Query().Get("X-Amz-SignedHeaders") != query.Get("X-Amz-SignedHeaders") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if credential query is same. + if req.URL.Query().Get("X-Amz-Credential") != query.Get("X-Amz-Credential") { + return nil, s3err.ErrSignatureDoesNotMatch + } + // Verify if sha256 payload query is same. + if req.URL.Query().Get("X-Amz-Content-Sha256") != "" { + if req.URL.Query().Get("X-Amz-Content-Sha256") != query.Get("X-Amz-Content-Sha256") { + return nil, s3err.ErrContentSHA256Mismatch + } + } + + /// Verify finally if signature is same. + + // Get canonical request. + presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) + + // Get hmac presigned signing key. + presignedSigningKey := getSigningKey(cred.SecretKey, + pSignValues.Credential.scope.date, + pSignValues.Credential.scope.region, + pSignValues.Credential.scope.service) + + // Get new signature. + newSignature := getSignature(presignedSigningKey, presignedStringToSign) + + // Verify signature. + if !compareSignatureV4(req.URL.Query().Get("X-Amz-Signature"), newSignature) { + return nil, s3err.ErrSignatureDoesNotMatch + } + return identity, s3err.ErrNone +} + +func contains(list []string, elem string) bool { + for _, t := range list { + if t == elem { + return true + } + } + return false +} + +// preSignValues data type represents structued form of AWS Signature V4 query string. +type preSignValues struct { + signValues + Date time.Time + Expires time.Duration +} + +// Parses signature version '4' query string of the following form. +// +// querystring = X-Amz-Algorithm=algorithm +// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) +// querystring += &X-Amz-Date=date +// querystring += &X-Amz-Expires=timeout interval +// querystring += &X-Amz-SignedHeaders=signed_headers +// querystring += &X-Amz-Signature=signature +// +// verifies if any of the necessary query params are missing in the presigned request. +func doesV4PresignParamsExist(query url.Values) s3err.ErrorCode { + v4PresignQueryParams := []string{"X-Amz-Algorithm", "X-Amz-Credential", "X-Amz-Signature", "X-Amz-Date", "X-Amz-SignedHeaders", "X-Amz-Expires"} + for _, v4PresignQueryParam := range v4PresignQueryParams { + if _, ok := query[v4PresignQueryParam]; !ok { + return s3err.ErrInvalidQueryParams + } + } + return s3err.ErrNone +} + +// Parses all the presigned signature values into separate elements. +func parsePreSignV4(query url.Values) (psv preSignValues, aec s3err.ErrorCode) { + var err s3err.ErrorCode + // verify whether the required query params exist. + err = doesV4PresignParamsExist(query) + if err != s3err.ErrNone { + return psv, err + } + + // Verify if the query algorithm is supported or not. + if query.Get("X-Amz-Algorithm") != signV4Algorithm { + return psv, s3err.ErrInvalidQuerySignatureAlgo + } + + // Initialize signature version '4' structured header. + preSignV4Values := preSignValues{} + + // Save credential. + preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential")) + if err != s3err.ErrNone { + return psv, err + } + + var e error + // Save date in native time.Time. + preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date")) + if e != nil { + return psv, s3err.ErrMalformedPresignedDate + } + + // Save expires in native time.Duration. + preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s") + if e != nil { + return psv, s3err.ErrMalformedExpires + } + + if preSignV4Values.Expires < 0 { + return psv, s3err.ErrNegativeExpires + } + + // Check if Expiry time is less than 7 days (value in seconds). + if preSignV4Values.Expires.Seconds() > 604800 { + return psv, s3err.ErrMaximumExpires + } + + // Save signed headers. + preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders")) + if err != s3err.ErrNone { + return psv, err + } + + // Save signature. + preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature")) + if err != s3err.ErrNone { + return psv, err + } + + // Return structed form of signature query string. + return preSignV4Values, s3err.ErrNone +} + +// extractSignedHeaders extract signed headers from Authorization header +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, s3err.ErrorCode) { + reqHeaders := r.Header + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, s3err.ErrUnsignedHeaders + } + extractedSignedHeaders := make(http.Header) + for _, header := range signedHeaders { + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if ok { + for _, enc := range val { + extractedSignedHeaders.Add(header, enc) + } + continue + } + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + for _, enc := range r.TransferEncoding { + extractedSignedHeaders.Add(header, enc) + } + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, s3err.ErrUnsignedHeaders + } + } + return extractedSignedHeaders, s3err.ErrNone +} + +// getSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func getSignedHeaders(signedHeaders http.Header) string { + var headers []string + for k := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + } + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getScope generate a string of a specific date, an AWS region, and a service. +func getScope(t time.Time, region string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + return scope +} + +// getCanonicalRequest generate a canonical request of style +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +// +func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { + rawQuery := strings.Replace(queryStr, "+", "%20", -1) + encodedPath := encodePath(urlPath) + canonicalRequest := strings.Join([]string{ + method, + encodedPath, + rawQuery, + getCanonicalHeaders(extractedSignedHeaders), + getSignedHeaders(extractedSignedHeaders), + payload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSign(canonicalRequest string, t time.Time, scope string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" + stringToSign = stringToSign + scope + "\n" + canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) + stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) + return stringToSign +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secretKey string, t time.Time, region string, service string) []byte { + date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd))) + regionBytes := sumHMAC(date, []byte(region)) + serviceBytes := sumHMAC(regionBytes, []byte(service)) + signingKey := sumHMAC(serviceBytes, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getCanonicalHeaders generate a list of request headers with their values +func getCanonicalHeaders(signedHeaders http.Header) string { + var headers []string + vals := make(http.Header) + for k, vv := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + sort.Strings(headers) + + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + return buf.String() +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func encodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// compareSignatureV4 returns true if and only if both signatures +// are equal. The signatures are expected to be HEX encoded strings +// according to the AWS S3 signature V4 spec. +func compareSignatureV4(sig1, sig2 string) bool { + // The CTC using []byte(str) works because the hex encoding + // is unique for a sequence of bytes. See also compareSignatureV2. + return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 +} diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go new file mode 100644 index 000000000..b47cd5f2d --- /dev/null +++ b/weed/s3api/auto_signature_v4_test.go @@ -0,0 +1,421 @@ +package s3api + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "testing" + "time" + "unicode/utf8" +) + +// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection. +func TestIsRequestPresignedSignatureV4(t *testing.T) { + testCases := []struct { + inputQueryKey string + inputQueryValue string + expectedResult bool + }{ + // Test case - 1. + // Test case with query key ""X-Amz-Credential" set. + {"", "", false}, + // Test case - 2. + {"X-Amz-Credential", "", true}, + // Test case - 3. + {"X-Amz-Content-Sha256", "", false}, + } + + for i, testCase := range testCases { + // creating an input HTTP request. + // Only the query parameters are relevant for this particular test. + inputReq, err := http.NewRequest("GET", "http://example.com", nil) + if err != nil { + t.Fatalf("Error initializing input HTTP request: %v", err) + } + q := inputReq.URL.Query() + q.Add(testCase.inputQueryKey, testCase.inputQueryValue) + inputReq.URL.RawQuery = q.Encode() + + actualResult := isRequestPresignedSignatureV4(inputReq) + if testCase.expectedResult != actualResult { + t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult) + } + } +} + +// Tests is requested authenticated function, tests replies for s3 errors. +func TestIsReqAuthenticated(t *testing.T) { + option := S3ApiServerOption{} + iam := NewIdentityAccessManagement(&option) + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", + }, + }, + Actions: nil, + }, + } + + // List of test cases for validating http request authentication. + testCases := []struct { + req *http.Request + s3Error s3err.ErrorCode + }{ + // When request is unsigned, access denied is returned. + {mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrAccessDenied}, + // When request is properly signed, error is none. + {mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), s3err.ErrNone}, + } + + // Validates all testcases. + for i, testCase := range testCases { + if _, s3Error := iam.reqSignatureV4Verify(testCase.req); s3Error != testCase.s3Error { + ioutil.ReadAll(testCase.req.Body) + t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d", i, testCase.s3Error, s3Error) + } + } +} + +func TestCheckAdminRequestAuthType(t *testing.T) { + option := S3ApiServerOption{} + iam := NewIdentityAccessManagement(&option) + iam.identities = []*Identity{ + { + Name: "someone", + Credentials: []*Credential{ + { + AccessKey: "access_key_1", + SecretKey: "secret_key_1", + }, + }, + Actions: nil, + }, + } + + testCases := []struct { + Request *http.Request + ErrCode s3err.ErrorCode + }{ + {Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrAccessDenied}, + {Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone}, + {Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: s3err.ErrNone}, + } + for i, testCase := range testCases { + if _, s3Error := iam.reqSignatureV4Verify(testCase.Request); s3Error != testCase.ErrCode { + t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error) + } + } +} + +// Provides a fully populated http request instance, fails otherwise. +func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req, err := newTestRequest(method, urlStr, contentLength, body) + if err != nil { + t.Fatalf("Unable to initialize new http request %s", err) + } + return req +} + +// This is similar to mustNewRequest but additionally the request +// is signed with AWS Signature V4, fails if not able to do so. +func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req := mustNewRequest(method, urlStr, contentLength, body, t) + cred := &Credential{"access_key_1", "secret_key_1"} + if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// This is similar to mustNewRequest but additionally the request +// is presigned with AWS Signature V4, fails if not able to do so. +func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request { + req := mustNewRequest(method, urlStr, contentLength, body, t) + cred := &Credential{"access_key_1", "secret_key_1"} + if err := preSignV4(req, cred.AccessKey, cred.SecretKey, int64(10*time.Minute.Seconds())); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// Returns new HTTP request object. +func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) { + if method == "" { + method = "POST" + } + + // Save for subsequent use + var hashedPayload string + var md5Base64 string + switch { + case body == nil: + hashedPayload = getSHA256Hash([]byte{}) + default: + payloadBytes, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + hashedPayload = getSHA256Hash(payloadBytes) + md5Base64 = getMD5HashBase64(payloadBytes) + } + // Seek back to beginning. + if body != nil { + body.Seek(0, 0) + } else { + body = bytes.NewReader([]byte("")) + } + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + if md5Base64 != "" { + req.Header.Set("Content-Md5", md5Base64) + } + req.Header.Set("x-amz-content-sha256", hashedPayload) + + // Add Content-Length + req.ContentLength = contentLength + + return req, nil +} + +// getSHA256Hash returns SHA-256 hash in hex encoding of given data. +func getSHA256Hash(data []byte) string { + return hex.EncodeToString(getSHA256Sum(data)) +} + +// getMD5HashBase64 returns MD5 hash in base64 encoding of given data. +func getMD5HashBase64(data []byte) string { + return base64.StdEncoding.EncodeToString(getMD5Sum(data)) +} + +// getSHA256Hash returns SHA-256 sum of given data. +func getSHA256Sum(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Sum returns MD5 sum of given data. +func getMD5Sum(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Hash returns MD5 hash in hex encoding of given data. +func getMD5Hash(data []byte) string { + return hex.EncodeToString(getMD5Sum(data)) +} + +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// Sign given request using Signature V4. +func signRequestV4(req *http.Request, accessKey, secretKey string) error { + // Get hashed payload. + hashedPayload := req.Header.Get("x-amz-content-sha256") + if hashedPayload == "" { + return fmt.Errorf("Invalid hashed payload") + } + + currTime := time.Now() + + // Set x-amz-date. + req.Header.Set("x-amz-date", currTime.Format(iso8601Format)) + + // Get header map. + headerMap := make(map[string][]string) + for k, vv := range req.Header { + // If request header key is not in ignored headers, then add it. + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok { + headerMap[strings.ToLower(k)] = vv + } + } + + // Get header keys. + headers := []string{"host"} + for k := range headerMap { + headers = append(headers, k) + } + sort.Strings(headers) + + region := "us-east-1" + + // Get canonical headers. + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(req.URL.Host) + fallthrough + default: + for idx, v := range headerMap[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + canonicalHeaders := buf.String() + + // Get signed headers. + signedHeaders := strings.Join(headers, ";") + + // Get canonical query string. + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + + // Get canonical URI. + canonicalURI := EncodePath(req.URL.Path) + + // Get canonical request. + // canonicalRequest = + // \n + // \n + // \n + // \n + // \n + // + // + canonicalRequest := strings.Join([]string{ + req.Method, + canonicalURI, + req.URL.RawQuery, + canonicalHeaders, + signedHeaders, + hashedPayload, + }, "\n") + + // Get scope. + scope := strings.Join([]string{ + currTime.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, "/") + + stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" + stringToSign = stringToSign + scope + "\n" + stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) + + date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) + regionHMAC := sumHMAC(date, []byte(region)) + service := sumHMAC(regionHMAC, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + + signature := hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) + + // final Authorization header + parts := []string{ + "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return nil +} + +// preSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires int64) error { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return errors.New("Presign cannot be generated without access and secret keys") + } + + region := "us-east-1" + date := time.Now().UTC() + scope := getScope(date, region) + credential := fmt.Sprintf("%s/%s", accessKeyID, scope) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", date.Format(iso8601Format)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", "host") + query.Set("X-Amz-Credential", credential) + query.Set("X-Amz-Content-Sha256", unsignedPayload) + + // "host" is the only header required to be signed for Presigned URLs. + extractedSignedHeaders := make(http.Header) + extractedSignedHeaders.Set("host", req.Host) + + queryStr := strings.Replace(query.Encode(), "+", "%20", -1) + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) + stringToSign := getStringToSign(canonicalRequest, date, scope) + signingKey := getSigningKey(secretAccessKey, date, region, "s3") + signature := getSignature(signingKey, stringToSign) + + req.URL.RawQuery = query.Encode() + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + url.QueryEscape(signature) + + // Construct the final presigned URL. + return nil +} + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go index 061fd4a92..b163ec2f6 100644 --- a/weed/s3api/chunked_reader_v4.go +++ b/weed/s3api/chunked_reader_v4.go @@ -21,17 +21,116 @@ package s3api import ( "bufio" "bytes" + "crypto/sha256" + "encoding/hex" "errors" - "github.com/dustin/go-humanize" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "hash" "io" "net/http" -) + "time" -// Streaming AWS Signature Version '4' constants. -const ( - streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + "github.com/dustin/go-humanize" ) +// getChunkSignature - get chunk signature. +func getChunkSignature(secretKey string, seedSignature string, region string, date time.Time, hashedChunk string) string { + + // Calculate string to sign. + stringToSign := signV4ChunkedAlgorithm + "\n" + + date.Format(iso8601Format) + "\n" + + getScope(date, region) + "\n" + + seedSignature + "\n" + + emptySHA256 + "\n" + + hashedChunk + + // Get hmac signing key. + signingKey := getSigningKey(secretKey, date, region, "s3") + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + return newSignature +} + +// calculateSeedSignature - Calculate seed signature in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html +// returns signature, error otherwise if the signature mismatches or any other +// error while parsing and validating. +func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cred *Credential, signature string, region string, date time.Time, errCode s3err.ErrorCode) { + + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get("Authorization") + + // Parse signature version '4' header. + signV4Values, errCode := parseSignV4(v4Auth) + if errCode != s3err.ErrNone { + return nil, "", "", time.Time{}, errCode + } + + // Payload streaming. + payload := streamingContentSHA256 + + // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' + if payload != req.Header.Get("X-Amz-Content-Sha256") { + return nil, "", "", time.Time{}, s3err.ErrContentSHA256Mismatch + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != s3err.ErrNone { + return nil, "", "", time.Time{}, errCode + } + // Verify if the access key id matches. + _, cred, found := iam.lookupByAccessKey(signV4Values.Credential.accessKey) + if !found { + return nil, "", "", time.Time{}, s3err.ErrInvalidAccessKeyID + } + + // Verify if region is valid. + region = signV4Values.Credential.scope.region + + // Extract date, if not present throw error. + var dateStr string + if dateStr = req.Header.Get(http.CanonicalHeaderKey("x-amz-date")); dateStr == "" { + if dateStr = r.Header.Get("Date"); dateStr == "" { + return nil, "", "", time.Time{}, s3err.ErrMissingDateHeader + } + } + // Parse date header. + var err error + date, err = time.Parse(iso8601Format, dateStr) + if err != nil { + return nil, "", "", time.Time{}, s3err.ErrMalformedDate + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, "s3") + + // Calculate signature. + newSignature := getSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return nil, "", "", time.Time{}, s3err.ErrSignatureDoesNotMatch + } + + // Return caculated signature. + return cred, newSignature, region, date, s3err.ErrNone +} + const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB // lineTooLong is generated as chunk header is bigger than 4KiB. @@ -43,22 +142,36 @@ var errMalformedEncoding = errors.New("malformed chunked encoding") // newSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. // The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. -func newSignV4ChunkedReader(req *http.Request) io.ReadCloser { - return &s3ChunkedReader{ - reader: bufio.NewReader(req.Body), - state: readChunkHeader, +func (iam *IdentityAccessManagement) newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, s3err.ErrorCode) { + ident, seedSignature, region, seedDate, errCode := iam.calculateSeedSignature(req) + if errCode != s3err.ErrNone { + return nil, errCode } + return &s3ChunkedReader{ + cred: ident, + reader: bufio.NewReader(req.Body), + seedSignature: seedSignature, + seedDate: seedDate, + region: region, + chunkSHA256Writer: sha256.New(), + state: readChunkHeader, + }, s3err.ErrNone } // Represents the overall state that is required for decoding a // AWS Signature V4 chunked reader. type s3ChunkedReader struct { - reader *bufio.Reader - state chunkState - lastChunk bool - chunkSignature string - n uint64 // Unread bytes in chunk - err error + cred *Credential + reader *bufio.Reader + seedSignature string + seedDate time.Time + region string + state chunkState + lastChunk bool + chunkSignature string + chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. + n uint64 // Unread bytes in chunk + err error } // Read chunk reads the chunk token signature portion. @@ -157,6 +270,9 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { return 0, cr.err } + // Calculate sha256. + cr.chunkSHA256Writer.Write(rbuf[:n0]) + // Update the bytes read into request buffer so far. n += n0 buf = buf[n0:] @@ -169,6 +285,19 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { continue } case verifyChunk: + // Calculate the hashed chunk. + hashedChunk := hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil)) + // Calculate the chunk signature. + newSignature := getChunkSignature(cr.cred.SecretKey, cr.seedSignature, cr.region, cr.seedDate, hashedChunk) + if !compareSignatureV4(cr.chunkSignature, newSignature) { + // Chunk signature doesn't match we return signature does not match. + cr.err = errors.New("chunk signature does not match") + return 0, cr.err + } + // Newly calculated signature becomes the seed for the next chunk + // this follows the chaining. + cr.seedSignature = newSignature + cr.chunkSHA256Writer.Reset() if cr.lastChunk { cr.state = eofChunk } else { diff --git a/weed/s3api/filer_multipart.go b/weed/s3api/filer_multipart.go index d3bde66ee..f882592c1 100644 --- a/weed/s3api/filer_multipart.go +++ b/weed/s3api/filer_multipart.go @@ -1,9 +1,9 @@ package s3api import ( - "context" "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "path/filepath" "strconv" "strings" @@ -11,10 +11,11 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/google/uuid" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/google/uuid" ) type InitiateMultipartUploadResult struct { @@ -22,18 +23,21 @@ type InitiateMultipartUploadResult struct { s3.CreateMultipartUploadOutput } -func (s3a *S3ApiServer) createMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) { + + glog.V(2).Infof("createMultipartUpload input %v", input) + uploadId, _ := uuid.NewRandom() uploadIdString := uploadId.String() - if err := s3a.mkdir(ctx, s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { + if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) { if entry.Extended == nil { entry.Extended = make(map[string][]byte) } entry.Extended["key"] = []byte(*input.Key) }); err != nil { glog.Errorf("NewMultipartUpload error: %v", err) - return nil, ErrInternalError + return nil, s3err.ErrInternalError } output = &InitiateMultipartUploadResult{ @@ -52,14 +56,16 @@ type CompleteMultipartUploadResult struct { s3.CompleteMultipartUploadOutput } -func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code ErrorCode) { +func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) { + + glog.V(2).Infof("completeMultipartUpload input %v", input) uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId - entries, err := s3a.list(ctx, uploadDirectory, "", "", false, 0) - if err != nil { - glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err) - return nil, ErrNoSuchUpload + entries, _, err := s3a.list(uploadDirectory, "", "", false, 0) + if err != nil || len(entries) == 0 { + glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries)) + return nil, s3err.ErrNoSuchUpload } var finalParts []*filer_pb.FileChunk @@ -69,11 +75,12 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory { for _, chunk := range entry.Chunks { p := &filer_pb.FileChunk{ - FileId: chunk.GetFileIdString(), - Offset: offset, - Size: chunk.Size, - Mtime: chunk.Mtime, - ETag: chunk.ETag, + FileId: chunk.GetFileIdString(), + Offset: offset, + Size: chunk.Size, + Mtime: chunk.Mtime, + CipherKey: chunk.CipherKey, + ETag: chunk.ETag, } finalParts = append(finalParts, p) offset += int64(chunk.Size) @@ -96,78 +103,103 @@ func (s3a *S3ApiServer) completeMultipartUpload(ctx context.Context, input *s3.C dirName = dirName[:len(dirName)-1] } - err = s3a.mkFile(ctx, dirName, entryName, finalParts) + err = s3a.mkFile(dirName, entryName, finalParts) if err != nil { glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err) - return nil, ErrInternalError + return nil, s3err.ErrInternalError } output = &CompleteMultipartUploadResult{ CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{ Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer, dirName, entryName)), Bucket: input.Bucket, - ETag: aws.String("\"" + filer2.ETag(finalParts) + "\""), + ETag: aws.String("\"" + filer.ETagChunks(finalParts) + "\""), Key: objectKey(input.Key), }, } - if err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, false, true); err != nil { + if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil { glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err) } return } -func (s3a *S3ApiServer) abortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code ErrorCode) { +func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code s3err.ErrorCode) { + + glog.V(2).Infof("abortMultipartUpload input %v", input) - exists, err := s3a.exists(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) + exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true) if err != nil { glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err) - return nil, ErrNoSuchUpload + return nil, s3err.ErrNoSuchUpload } if exists { - err = s3a.rm(ctx, s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true, true) + err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true) } if err != nil { glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err) - return nil, ErrInternalError + return nil, s3err.ErrInternalError } - return &s3.AbortMultipartUploadOutput{}, ErrNone + return &s3.AbortMultipartUploadOutput{}, s3err.ErrNone } type ListMultipartUploadsResult struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult"` - s3.ListMultipartUploadsOutput + + // copied from s3.ListMultipartUploadsOutput, the Uploads is not converting to + Bucket *string `type:"string"` + Delimiter *string `type:"string"` + EncodingType *string `type:"string" enum:"EncodingType"` + IsTruncated *bool `type:"boolean"` + KeyMarker *string `type:"string"` + MaxUploads *int64 `type:"integer"` + NextKeyMarker *string `type:"string"` + NextUploadIdMarker *string `type:"string"` + Prefix *string `type:"string"` + UploadIdMarker *string `type:"string"` + Upload []*s3.MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` } -func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code ErrorCode) { +func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code s3err.ErrorCode) { + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html + + glog.V(2).Infof("listMultipartUploads input %v", input) output = &ListMultipartUploadsResult{ - ListMultipartUploadsOutput: s3.ListMultipartUploadsOutput{ - Bucket: input.Bucket, - Delimiter: input.Delimiter, - EncodingType: input.EncodingType, - KeyMarker: input.KeyMarker, - MaxUploads: input.MaxUploads, - Prefix: input.Prefix, - }, + Bucket: input.Bucket, + Delimiter: input.Delimiter, + EncodingType: input.EncodingType, + KeyMarker: input.KeyMarker, + MaxUploads: input.MaxUploads, + Prefix: input.Prefix, } - entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket), *input.Prefix, *input.KeyMarker, true, int(*input.MaxUploads)) + entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, uint32(*input.MaxUploads)) if err != nil { glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err) return } + output.IsTruncated = aws.Bool(!isLast) for _, entry := range entries { if entry.Extended != nil { - key := entry.Extended["key"] - output.Uploads = append(output.Uploads, &s3.MultipartUpload{ - Key: objectKey(aws.String(string(key))), + key := string(entry.Extended["key"]) + if *input.KeyMarker != "" && *input.KeyMarker != key { + continue + } + if *input.Prefix != "" && !strings.HasPrefix(key, *input.Prefix) { + continue + } + output.Upload = append(output.Upload, &s3.MultipartUpload{ + Key: objectKey(aws.String(key)), UploadId: aws.String(entry.Name), }) + if !isLast { + output.NextUploadIdMarker = aws.String(entry.Name) + } } } @@ -176,27 +208,41 @@ func (s3a *S3ApiServer) listMultipartUploads(ctx context.Context, input *s3.List type ListPartsResult struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult"` - s3.ListPartsOutput + + // copied from s3.ListPartsOutput, the Parts is not converting to + Bucket *string `type:"string"` + IsTruncated *bool `type:"boolean"` + Key *string `min:"1" type:"string"` + MaxParts *int64 `type:"integer"` + NextPartNumberMarker *int64 `type:"integer"` + PartNumberMarker *int64 `type:"integer"` + Part []*s3.Part `locationName:"Part" type:"list" flattened:"true"` + StorageClass *string `type:"string" enum:"StorageClass"` + UploadId *string `type:"string"` } -func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListPartsInput) (output *ListPartsResult, code ErrorCode) { +func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code s3err.ErrorCode) { + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html + + glog.V(2).Infof("listObjectParts input %v", input) + output = &ListPartsResult{ - ListPartsOutput: s3.ListPartsOutput{ - Bucket: input.Bucket, - Key: objectKey(input.Key), - UploadId: input.UploadId, - MaxParts: input.MaxParts, // the maximum number of parts to return. - PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive - }, + Bucket: input.Bucket, + Key: objectKey(input.Key), + UploadId: input.UploadId, + MaxParts: input.MaxParts, // the maximum number of parts to return. + PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive + StorageClass: aws.String("STANDARD"), } - entries, err := s3a.list(ctx, s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, - "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, int(*input.MaxParts)) + entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d.part", *input.PartNumberMarker), false, uint32(*input.MaxParts)) if err != nil { glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err) - return nil, ErrNoSuchUpload + return nil, s3err.ErrNoSuchUpload } + output.IsTruncated = aws.Bool(!isLast) + for _, entry := range entries { if strings.HasSuffix(entry.Name, ".part") && !entry.IsDirectory { partNumberString := entry.Name[:len(entry.Name)-len(".part")] @@ -205,12 +251,15 @@ func (s3a *S3ApiServer) listObjectParts(ctx context.Context, input *s3.ListParts glog.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err) continue } - output.Parts = append(output.Parts, &s3.Part{ + output.Part = append(output.Part, &s3.Part{ PartNumber: aws.Int64(int64(partNumber)), - LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0)), - Size: aws.Int64(int64(filer2.TotalSize(entry.Chunks))), - ETag: aws.String("\"" + filer2.ETag(entry.Chunks) + "\""), + LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()), + Size: aws.Int64(int64(filer.FileSize(entry))), + ETag: aws.String("\"" + filer.ETag(entry) + "\""), }) + if !isLast { + output.NextPartNumberMarker = aws.Int64(int64(partNumber)) + } } } diff --git a/weed/s3api/filer_multipart_test.go b/weed/s3api/filer_multipart_test.go index 835665dd6..f2568b6bc 100644 --- a/weed/s3api/filer_multipart_test.go +++ b/weed/s3api/filer_multipart_test.go @@ -4,6 +4,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "testing" + "time" ) func TestInitiateMultipartUploadResult(t *testing.T) { @@ -24,3 +25,25 @@ func TestInitiateMultipartUploadResult(t *testing.T) { } } + +func TestListPartsResult(t *testing.T) { + + expected := ` +"12345678"1970-01-01T00:00:00Z1123` + response := &ListPartsResult{ + Part: []*s3.Part{ + { + PartNumber: aws.Int64(int64(1)), + LastModified: aws.Time(time.Unix(0, 0).UTC()), + Size: aws.Int64(int64(123)), + ETag: aws.String("\"12345678\""), + }, + }, + } + + encoded := string(encodeResponse(response)) + if encoded != expected { + t.Errorf("unexpected output: %s\nexpecting:%s", encoded, expected) + } + +} diff --git a/weed/s3api/filer_util.go b/weed/s3api/filer_util.go index ed9612d35..1803332a3 100644 --- a/weed/s3api/filer_util.go +++ b/weed/s3api/filer_util.go @@ -3,164 +3,91 @@ package s3api import ( "context" "fmt" - "io" - "os" - "strings" - "time" - "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" + "strings" ) -func (s3a *S3ApiServer) mkdir(ctx context.Context, parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - entry := &filer_pb.Entry{ - Name: dirName, - IsDirectory: true, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(0777 | os.ModeDir), - Uid: OS_UID, - Gid: OS_GID, - }, - } +func (s3a *S3ApiServer) mkdir(parentDirectoryPath string, dirName string, fn func(entry *filer_pb.Entry)) error { - if fn != nil { - fn(entry) - } + return filer_pb.Mkdir(s3a, parentDirectoryPath, dirName, fn) - request := &filer_pb.CreateEntryRequest{ - Directory: parentDirectoryPath, - Entry: entry, - } +} - glog.V(1).Infof("mkdir: %v", request) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("mkdir %v: %v", request, err) - return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err) - } +func (s3a *S3ApiServer) mkFile(parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return nil - }) -} + return filer_pb.MkFile(s3a, parentDirectoryPath, fileName, chunks) -func (s3a *S3ApiServer) mkFile(ctx context.Context, parentDirectoryPath string, fileName string, chunks []*filer_pb.FileChunk) error { - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - entry := &filer_pb.Entry{ - Name: fileName, - IsDirectory: false, - Attributes: &filer_pb.FuseAttributes{ - Mtime: time.Now().Unix(), - Crtime: time.Now().Unix(), - FileMode: uint32(0770), - Uid: OS_UID, - Gid: OS_GID, - }, - Chunks: chunks, - } +} - request := &filer_pb.CreateEntryRequest{ - Directory: parentDirectoryPath, - Entry: entry, - } +func (s3a *S3ApiServer) list(parentDirectoryPath, prefix, startFrom string, inclusive bool, limit uint32) (entries []*filer_pb.Entry, isLast bool, err error) { - glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName) - if _, err := client.CreateEntry(ctx, request); err != nil { - glog.V(0).Infof("create file %v:%v", request, err) - return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err) + err = filer_pb.List(s3a, parentDirectoryPath, prefix, func(entry *filer_pb.Entry, isLastEntry bool) error { + entries = append(entries, entry) + if isLastEntry { + isLast = true } - return nil - }) -} + }, startFrom, inclusive, limit) -func (s3a *S3ApiServer) list(ctx context.Context, parentDirectoryPath, prefix, startFrom string, inclusive bool, limit int) (entries []*filer_pb.Entry, err error) { - - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if len(entries) == 0 { + isLast = true + } - request := &filer_pb.ListEntriesRequest{ - Directory: parentDirectoryPath, - Prefix: prefix, - StartFromFileName: startFrom, - InclusiveStartFrom: inclusive, - Limit: uint32(limit), - } + return - glog.V(4).Infof("read directory: %v", request) - stream, err := client.ListEntries(ctx, request) - if err != nil { - glog.V(0).Infof("read directory %v: %v", request, err) - return fmt.Errorf("list dir %v: %v", parentDirectoryPath, err) - } +} - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - break - } else { - return recvErr - } - } +func (s3a *S3ApiServer) rm(parentDirectoryPath, entryName string, isDeleteData, isRecursive bool) error { - entries = append(entries, resp.Entry) + return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) + if err != nil { + return err } return nil }) - return - } -func (s3a *S3ApiServer) rm(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory, isDeleteData, isRecursive bool) error { - - return s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.DeleteEntryRequest{ - Directory: parentDirectoryPath, - Name: entryName, - IsDeleteData: isDeleteData, - IsRecursive: isRecursive, - } +func doDeleteEntry(client filer_pb.SeaweedFilerClient, parentDirectoryPath string, entryName string, isDeleteData bool, isRecursive bool) error { + request := &filer_pb.DeleteEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + IsDeleteData: isDeleteData, + IsRecursive: isRecursive, + } - glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) - if _, err := client.DeleteEntry(ctx, request); err != nil { - glog.V(0).Infof("delete entry %v: %v", request, err) - return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) + glog.V(1).Infof("delete entry %v/%v: %v", parentDirectoryPath, entryName, request) + if resp, err := client.DeleteEntry(context.Background(), request); err != nil { + glog.V(0).Infof("delete entry %v: %v", request, err) + return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, err) + } else { + if resp.Error != "" { + return fmt.Errorf("delete entry %s/%s: %v", parentDirectoryPath, entryName, resp.Error) } - - return nil - }) - + } + return nil } -func (s3a *S3ApiServer) exists(ctx context.Context, parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { +func (s3a *S3ApiServer) exists(parentDirectoryPath string, entryName string, isDirectory bool) (exists bool, err error) { - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + return filer_pb.Exists(s3a, parentDirectoryPath, entryName, isDirectory) - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: parentDirectoryPath, - Name: entryName, - } +} - glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request) - resp, err := client.LookupDirectoryEntry(ctx, request) - if err != nil { - glog.V(0).Infof("exists entry %v: %v", request, err) - return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err) - } +func (s3a *S3ApiServer) touch(parentDirectoryPath string, entryName string, entry *filer_pb.Entry) (err error) { - exists = resp.Entry.IsDirectory == isDirectory + return filer_pb.Touch(s3a, parentDirectoryPath, entryName, entry) - return nil - }) +} - return +func (s3a *S3ApiServer) getEntry(parentDirectoryPath, entryName string) (entry *filer_pb.Entry, err error) { + fullPath := util.NewFullPath(parentDirectoryPath, entryName) + return filer_pb.GetEntry(s3a, fullPath) } func objectKey(key *string) *string { diff --git a/weed/s3api/filer_util_tags.go b/weed/s3api/filer_util_tags.go new file mode 100644 index 000000000..75d3b37d0 --- /dev/null +++ b/weed/s3api/filer_util_tags.go @@ -0,0 +1,105 @@ +package s3api + +import ( + "strings" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" +) + +const ( + S3TAG_PREFIX = xhttp.AmzObjectTagging + "-" +) + +func (s3a *S3ApiServer) getTags(parentDirectoryPath string, entryName string) (tags map[string]string, err error) { + + err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + }) + if err != nil { + return err + } + tags = make(map[string]string) + for k, v := range resp.Entry.Extended { + if strings.HasPrefix(k, S3TAG_PREFIX) { + tags[k[len(S3TAG_PREFIX):]] = string(v) + } + } + return nil + }) + return +} + +func (s3a *S3ApiServer) setTags(parentDirectoryPath string, entryName string, tags map[string]string) (err error) { + + return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + }) + if err != nil { + return err + } + + for k, _ := range resp.Entry.Extended { + if strings.HasPrefix(k, S3TAG_PREFIX) { + delete(resp.Entry.Extended, k) + } + } + + if resp.Entry.Extended == nil { + resp.Entry.Extended = make(map[string][]byte) + } + for k, v := range tags { + resp.Entry.Extended[S3TAG_PREFIX+k] = []byte(v) + } + + return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ + Directory: parentDirectoryPath, + Entry: resp.Entry, + IsFromOtherCluster: false, + Signatures: nil, + }) + + }) + +} + +func (s3a *S3ApiServer) rmTags(parentDirectoryPath string, entryName string) (err error) { + + return s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + resp, err := filer_pb.LookupEntry(client, &filer_pb.LookupDirectoryEntryRequest{ + Directory: parentDirectoryPath, + Name: entryName, + }) + if err != nil { + return err + } + + hasDeletion := false + for k, _ := range resp.Entry.Extended { + if strings.HasPrefix(k, S3TAG_PREFIX) { + delete(resp.Entry.Extended, k) + hasDeletion = true + } + } + + if !hasDeletion { + return nil + } + + return filer_pb.UpdateEntry(client, &filer_pb.UpdateEntryRequest{ + Directory: parentDirectoryPath, + Entry: resp.Entry, + IsFromOtherCluster: false, + Signatures: nil, + }) + + }) + +} diff --git a/weed/s3api/http/header.go b/weed/s3api/http/header.go new file mode 100644 index 000000000..6614b0af0 --- /dev/null +++ b/weed/s3api/http/header.go @@ -0,0 +1,36 @@ +/* + * MinIO Cloud Storage, (C) 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package http + +// Standard S3 HTTP request constants +const ( + // S3 storage class + AmzStorageClass = "x-amz-storage-class" + + // S3 user-defined metadata + AmzUserMetaPrefix = "X-Amz-Meta-" + + // S3 object tagging + AmzObjectTagging = "X-Amz-Tagging" + AmzTagCount = "x-amz-tagging-count" +) + +// Non-Standard S3 HTTP request constants +const ( + AmzIdentityId = "s3-identity-id" + AmzIsAdmin = "s3-is-admin" // only set to http request header as a context +) diff --git a/weed/s3api/policy/post-policy.go b/weed/s3api/policy/post-policy.go new file mode 100644 index 000000000..5ef8d397d --- /dev/null +++ b/weed/s3api/policy/post-policy.go @@ -0,0 +1,321 @@ +package policy + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "encoding/base64" + "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "net/http" + "strings" + "time" +) + +// expirationDateFormat date format for expiration key in json policy. +const expirationDateFormat = "2006-01-02T15:04:05.999Z" + +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +// +type policyCondition struct { + matchType string + condition string + value string +} + +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. +type PostPolicy struct { + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. + contentLengthRange struct { + min int64 + max int64 + } + + // Post form data. + formData map[string]string +} + +// NewPostPolicy - Instantiate new post policy. +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.conditions = make([]policyCondition, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires - Sets expiration time for the new policy. +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return errInvalidArgument("No expiry time set.") + } + p.expiration = t + return nil +} + +// SetKey - Sets an object name for the policy based upload. +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Object name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { + return errInvalidArgument("Object prefix is empty.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket - Sets bucket at which objects will be uploaded to. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { + return errInvalidArgument("Bucket name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName + return nil +} + +// SetCondition - Sets condition for credentials, date and algorithm +func (p *PostPolicy) SetCondition(matchType, condition, value string) error { + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("No value specified for condition") + } + + policyCond := policyCondition{ + matchType: matchType, + condition: "$" + condition, + value: value, + } + if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" { + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[condition] = value + return nil + } + return errInvalidArgument("Invalid condition in policy") +} + +// SetContentType - Sets content-type of the object for this policy +// based upload. +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" || contentType == "" { + return errInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { + if min > max { + return errInvalidArgument("Minimum limit is larger than maximum limit.") + } + if min < 0 { + return errInvalidArgument("Minimum limit cannot be negative.") + } + if max < 0 { + return errInvalidArgument("Maximum limit cannot be negative.") + } + p.contentLengthRange.min = min + p.contentLengthRange.max = max + return nil +} + +// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { + if strings.TrimSpace(redirect) == "" || redirect == "" { + return errInvalidArgument("Redirect is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_redirect", + value: redirect, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_redirect"] = redirect + return nil +} + +// SetSuccessStatusAction - Sets the status success code of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessStatusAction(status string) error { + if strings.TrimSpace(status) == "" || status == "" { + return errInvalidArgument("Status is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_status", + value: status, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_status"] = status + return nil +} + +// SetUserMetadata - Set user metadata as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadata(key string, value string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Key is empty") + } + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// SetUserData - Set user data as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserData(key string, value string) error { + if key == "" { + return errInvalidArgument("Key is empty") + } + if value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// addNewPolicy - internal helper to validate adding new policies. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + return errInvalidArgument("Policy fields are empty.") + } + p.conditions = append(p.conditions, policyCond) + return nil +} + +// String function for printing policy in json formatted string. +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON - Provides Marshaled JSON in bytes. +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionsStr + retStr = retStr + "}" + return []byte(retStr) +} + +// base64 - Produces base64 of PostPolicy's Marshaled json. +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} + +// errInvalidArgument - Invalid argument response. +func errInvalidArgument(message string) error { + return s3err.RESTErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} diff --git a/weed/s3api/policy/post-policy_test.go b/weed/s3api/policy/post-policy_test.go new file mode 100644 index 000000000..ce241b723 --- /dev/null +++ b/weed/s3api/policy/post-policy_test.go @@ -0,0 +1,378 @@ +package policy + +/* + * MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "mime/multipart" + "net/http" + "net/url" + "regexp" + "strings" + "time" + "unicode/utf8" +) + +const ( + iso8601DateFormat = "20060102T150405Z" + iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision. +) + +func newPostPolicyBytesV4WithContentRange(credential, bucketName, objectKey string, expiration time.Time) []byte { + t := time.Now().UTC() + // Add the expiration date. + expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat)) + // Add the bucket condition, only accept buckets equal to the one passed. + bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName) + // Add the key condition, only accept keys equal to the one passed. + keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s/upload.txt"]`, objectKey) + // Add content length condition, only accept content sizes of a given length. + contentLengthCondStr := `["content-length-range", 1024, 1048576]` + // Add the algorithm condition, only accept AWS SignV4 Sha256. + algorithmConditionStr := `["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"]` + // Add the date condition, only accept the current date. + dateConditionStr := fmt.Sprintf(`["eq", "$x-amz-date", "%s"]`, t.Format(iso8601DateFormat)) + // Add the credential string, only accept the credential passed. + credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential) + // Add the meta-uuid string, set to 1234 + uuidConditionStr := fmt.Sprintf(`["eq", "$x-amz-meta-uuid", "%s"]`, "1234") + + // Combine all conditions into one string. + conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s, %s]`, bucketConditionStr, + keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionStr + retStr = retStr + "}" + + return []byte(retStr) +} + +// newPostPolicyBytesV4 - creates a bare bones postpolicy string with key and bucket matches. +func newPostPolicyBytesV4(credential, bucketName, objectKey string, expiration time.Time) []byte { + t := time.Now().UTC() + // Add the expiration date. + expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat)) + // Add the bucket condition, only accept buckets equal to the one passed. + bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName) + // Add the key condition, only accept keys equal to the one passed. + keyConditionStr := fmt.Sprintf(`["eq", "$key", "%s/upload.txt"]`, objectKey) + // Add the algorithm condition, only accept AWS SignV4 Sha256. + algorithmConditionStr := `["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"]` + // Add the date condition, only accept the current date. + dateConditionStr := fmt.Sprintf(`["eq", "$x-amz-date", "%s"]`, t.Format(iso8601DateFormat)) + // Add the credential string, only accept the credential passed. + credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential) + // Add the meta-uuid string, set to 1234 + uuidConditionStr := fmt.Sprintf(`["eq", "$x-amz-meta-uuid", "%s"]`, "1234") + + // Combine all conditions into one string. + conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionStr + retStr = retStr + "}" + + return []byte(retStr) +} + +// newPostPolicyBytesV2 - creates a bare bones postpolicy string with key and bucket matches. +func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) []byte { + // Add the expiration date. + expirationStr := fmt.Sprintf(`"expiration": "%s"`, expiration.Format(iso8601TimeFormat)) + // Add the bucket condition, only accept buckets equal to the one passed. + bucketConditionStr := fmt.Sprintf(`["eq", "$bucket", "%s"]`, bucketName) + // Add the key condition, only accept keys equal to the one passed. + keyConditionStr := fmt.Sprintf(`["starts-with", "$key", "%s/upload.txt"]`, objectKey) + + // Combine all conditions into one string. + conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr) + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionStr + retStr = retStr + "}" + + return []byte(retStr) +} + +// Wrapper for calling TestPostPolicyBucketHandler tests for both Erasure multiple disks and single node setup. + +// testPostPolicyBucketHandler - Tests validate post policy handler uploading objects. + +// Wrapper for calling TestPostPolicyBucketHandlerRedirect tests for both Erasure multiple disks and single node setup. + +// testPostPolicyBucketHandlerRedirect tests POST Object when success_action_redirect is specified + +// postPresignSignatureV4 - presigned signature for PostPolicy requests. +func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, t, location) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// copied from auth_signature_v4.go to break import loop +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// copied from auth_signature_v4.go to break import loop +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secretKey string, t time.Time, region string) []byte { + date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format("20060102"))) + regionBytes := sumHMAC(date, []byte(region)) + service := sumHMAC(regionBytes, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// copied from auth_signature_v4.go to break import loop +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// copied from auth_signature_v4.go to break import loop +func calculateSignatureV2(stringToSign string, secret string) string { + hm := hmac.New(sha1.New, []byte(secret)) + hm.Write([]byte(stringToSign)) + return base64.StdEncoding.EncodeToString(hm.Sum(nil)) +} + +func newPostRequestV2(endPoint, bucketName, objectName string, accessKey, secretKey string) (*http.Request, error) { + // Expire the request five minutes from now. + expirationTime := time.Now().UTC().Add(time.Minute * 5) + // Create a new post policy. + policy := newPostPolicyBytesV2(bucketName, objectName, expirationTime) + // Only need the encoding. + encodedPolicy := base64.StdEncoding.EncodeToString(policy) + + // Presign with V4 signature based on the policy. + signature := calculateSignatureV2(encodedPolicy, secretKey) + + formData := map[string]string{ + "AWSAccessKeyId": accessKey, + "bucket": bucketName, + "key": objectName + "/${filename}", + "policy": encodedPolicy, + "signature": signature, + } + + // Create the multipart form. + var buf bytes.Buffer + w := multipart.NewWriter(&buf) + + // Set the normal formData + for k, v := range formData { + w.WriteField(k, v) + } + // Set the File formData + writer, err := w.CreateFormFile("file", "upload.txt") + if err != nil { + // return nil, err + return nil, err + } + writer.Write([]byte("hello world")) + // Close before creating the new request. + w.Close() + + // Set the body equal to the created policy. + reader := bytes.NewReader(buf.Bytes()) + + req, err := http.NewRequest(http.MethodPost, makeTestTargetURL(endPoint, bucketName, "", nil), reader) + if err != nil { + return nil, err + } + + // Set form content-type. + req.Header.Set("Content-Type", w.FormDataContentType()) + return req, nil +} + +func buildGenericPolicy(t time.Time, accessKey, region, bucketName, objectName string, contentLengthRange bool) []byte { + // Expire the request five minutes from now. + expirationTime := t.Add(time.Minute * 5) + + credStr := getCredentialString(accessKey, region, t) + // Create a new post policy. + policy := newPostPolicyBytesV4(credStr, bucketName, objectName, expirationTime) + if contentLengthRange { + policy = newPostPolicyBytesV4WithContentRange(credStr, bucketName, objectName, expirationTime) + } + return policy +} + +func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, region string, + t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) { + // Get the user credential. + credStr := getCredentialString(accessKey, region, t) + + // Only need the encoding. + encodedPolicy := base64.StdEncoding.EncodeToString(policy) + + if corruptedB64 { + encodedPolicy = "%!~&" + encodedPolicy + } + + // Presign with V4 signature based on the policy. + signature := postPresignSignatureV4(encodedPolicy, t, secretKey, region) + + formData := map[string]string{ + "bucket": bucketName, + "key": objectName + "/${filename}", + "x-amz-credential": credStr, + "policy": encodedPolicy, + "x-amz-signature": signature, + "x-amz-date": t.Format(iso8601DateFormat), + "x-amz-algorithm": "AWS4-HMAC-SHA256", + "x-amz-meta-uuid": "1234", + "Content-Encoding": "gzip", + } + + // Add form data + for k, v := range addFormData { + formData[k] = v + } + + // Create the multipart form. + var buf bytes.Buffer + w := multipart.NewWriter(&buf) + + // Set the normal formData + for k, v := range formData { + w.WriteField(k, v) + } + // Set the File formData but don't if we want send an incomplete multipart request + if !corruptedMultipart { + writer, err := w.CreateFormFile("file", "upload.txt") + if err != nil { + // return nil, err + return nil, err + } + writer.Write(objData) + // Close before creating the new request. + w.Close() + } + + // Set the body equal to the created policy. + reader := bytes.NewReader(buf.Bytes()) + + req, err := http.NewRequest(http.MethodPost, makeTestTargetURL(endPoint, bucketName, "", nil), reader) + if err != nil { + return nil, err + } + + // Set form content-type. + req.Header.Set("Content-Type", w.FormDataContentType()) + return req, nil +} + +func newPostRequestV4WithContentLength(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) { + t := time.Now().UTC() + region := "us-east-1" + policy := buildGenericPolicy(t, accessKey, region, bucketName, objectName, true) + return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, region, t, policy, nil, false, false) +} + +func newPostRequestV4(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string) (*http.Request, error) { + t := time.Now().UTC() + region := "us-east-1" + policy := buildGenericPolicy(t, accessKey, region, bucketName, objectName, false) + return newPostRequestV4Generic(endPoint, bucketName, objectName, objData, accessKey, secretKey, region, t, policy, nil, false, false) +} + +// construct URL for http requests for bucket operations. +func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string { + urlStr := endPoint + "/" + if bucketName != "" { + urlStr = urlStr + bucketName + "/" + } + if objectName != "" { + urlStr = urlStr + EncodePath(objectName) + } + if len(queryValues) > 0 { + urlStr = urlStr + "?" + queryValues.Encode() + } + return urlStr +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// getCredentialString generate a credential string. +func getCredentialString(accessKeyID, location string, t time.Time) string { + return accessKeyID + "/" + getScope(t, location) +} + +// getScope generate a string of a specific date, an AWS region, and a service. +func getScope(t time.Time, region string) string { + scope := strings.Join([]string{ + t.Format("20060102"), + region, + string("s3"), + "aws4_request", + }, "/") + return scope +} diff --git a/weed/s3api/policy/postpolicyform.go b/weed/s3api/policy/postpolicyform.go new file mode 100644 index 000000000..3a6f3a882 --- /dev/null +++ b/weed/s3api/policy/postpolicyform.go @@ -0,0 +1,276 @@ +package policy + +/* + * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + "time" +) + +// startWithConds - map which indicates if a given condition supports starts-with policy operator +var startsWithConds = map[string]bool{ + "$acl": true, + "$bucket": false, + "$cache-control": true, + "$content-type": true, + "$content-disposition": true, + "$content-encoding": true, + "$expires": true, + "$key": true, + "$success_action_redirect": true, + "$redirect": true, + "$success_action_status": false, + "$x-amz-algorithm": false, + "$x-amz-credential": false, + "$x-amz-date": false, +} + +// Add policy conditionals. +const ( + policyCondEqual = "eq" + policyCondStartsWith = "starts-with" + policyCondContentLength = "content-length-range" +) + +// toString - Safely convert interface to string without causing panic. +func toString(val interface{}) string { + switch v := val.(type) { + case string: + return v + default: + return "" + } +} + +// toLowerString - safely convert interface to lower string +func toLowerString(val interface{}) string { + return strings.ToLower(toString(val)) +} + +// toInteger _ Safely convert interface to integer without causing panic. +func toInteger(val interface{}) (int64, error) { + switch v := val.(type) { + case float64: + return int64(v), nil + case int64: + return v, nil + case int: + return int64(v), nil + case string: + i, err := strconv.Atoi(v) + return int64(i), err + default: + return 0, errors.New("Invalid number format") + } +} + +// isString - Safely check if val is of type string without causing panic. +func isString(val interface{}) bool { + _, ok := val.(string) + return ok +} + +// ContentLengthRange - policy content-length-range field. +type contentLengthRange struct { + Min int64 + Max int64 + Valid bool // If content-length-range was part of policy +} + +// PostPolicyForm provides strict static type conversion and validation for Amazon S3's POST policy JSON string. +type PostPolicyForm struct { + Expiration time.Time // Expiration date and time of the POST policy. + Conditions struct { // Conditional policy structure. + Policies []struct { + Operator string + Key string + Value string + } + ContentLengthRange contentLengthRange + } +} + +// ParsePostPolicyForm - Parse JSON policy string into typed PostPolicyForm structure. +func ParsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) { + // Convert po into interfaces and + // perform strict type conversion using reflection. + var rawPolicy struct { + Expiration string `json:"expiration"` + Conditions []interface{} `json:"conditions"` + } + + err := json.Unmarshal([]byte(policy), &rawPolicy) + if err != nil { + return ppf, err + } + + parsedPolicy := PostPolicyForm{} + + // Parse expiry time. + parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration) + if err != nil { + return ppf, err + } + + // Parse conditions. + for _, val := range rawPolicy.Conditions { + switch condt := val.(type) { + case map[string]interface{}: // Handle key:value map types. + for k, v := range condt { + if !isString(v) { // Pre-check value type. + // All values must be of type string. + return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", reflect.TypeOf(condt).String(), condt) + } + // {"acl": "public-read" } is an alternate way to indicate - [ "eq", "$acl", "public-read" ] + // In this case we will just collapse this into "eq" for all use cases. + parsedPolicy.Conditions.Policies = append(parsedPolicy.Conditions.Policies, struct { + Operator string + Key string + Value string + }{ + policyCondEqual, "$" + strings.ToLower(k), toString(v), + }) + } + case []interface{}: // Handle array types. + if len(condt) != 3 { // Return error if we have insufficient elements. + return parsedPolicy, fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form", condt, reflect.TypeOf(condt).String()) + } + switch toLowerString(condt[0]) { + case policyCondEqual, policyCondStartsWith: + for _, v := range condt { // Pre-check all values for type. + if !isString(v) { + // All values must be of type string. + return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", reflect.TypeOf(condt).String(), condt) + } + } + operator, matchType, value := toLowerString(condt[0]), toLowerString(condt[1]), toString(condt[2]) + if !strings.HasPrefix(matchType, "$") { + return parsedPolicy, fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", operator, matchType, value) + } + parsedPolicy.Conditions.Policies = append(parsedPolicy.Conditions.Policies, struct { + Operator string + Key string + Value string + }{ + operator, matchType, value, + }) + case policyCondContentLength: + min, err := toInteger(condt[1]) + if err != nil { + return parsedPolicy, err + } + + max, err := toInteger(condt[2]) + if err != nil { + return parsedPolicy, err + } + + parsedPolicy.Conditions.ContentLengthRange = contentLengthRange{ + Min: min, + Max: max, + Valid: true, + } + default: + // Condition should be valid. + return parsedPolicy, fmt.Errorf("Unknown type %s of conditional field value %s found in POST policy form", + reflect.TypeOf(condt).String(), condt) + } + default: + return parsedPolicy, fmt.Errorf("Unknown field %s of type %s found in POST policy form", + condt, reflect.TypeOf(condt).String()) + } + } + return parsedPolicy, nil +} + +// checkPolicyCond returns a boolean to indicate if a condition is satisified according +// to the passed operator +func checkPolicyCond(op string, input1, input2 string) bool { + switch op { + case policyCondEqual: + return input1 == input2 + case policyCondStartsWith: + return strings.HasPrefix(input1, input2) + } + return false +} + +// CheckPostPolicy - apply policy conditions and validate input values. +// (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html) +func CheckPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) error { + // Check if policy document expiry date is still not reached + if !postPolicyForm.Expiration.After(time.Now().UTC()) { + return fmt.Errorf("Invalid according to Policy: Policy expired") + } + // map to store the metadata + metaMap := make(map[string]string) + for _, policy := range postPolicyForm.Conditions.Policies { + if strings.HasPrefix(policy.Key, "$x-amz-meta-") { + formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(policy.Key, "$")) + metaMap[formCanonicalName] = policy.Value + } + } + // Check if any extra metadata field is passed as input + for key := range formValues { + if strings.HasPrefix(key, "X-Amz-Meta-") { + if _, ok := metaMap[key]; !ok { + return fmt.Errorf("Invalid according to Policy: Extra input fields: %s", key) + } + } + } + + // Flag to indicate if all policies conditions are satisfied + var condPassed bool + + // Iterate over policy conditions and check them against received form fields + for _, policy := range postPolicyForm.Conditions.Policies { + // Form fields names are in canonical format, convert conditions names + // to canonical for simplification purpose, so `$key` will become `Key` + formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(policy.Key, "$")) + // Operator for the current policy condition + op := policy.Operator + // If the current policy condition is known + if startsWithSupported, condFound := startsWithConds[policy.Key]; condFound { + // Check if the current condition supports starts-with operator + if op == policyCondStartsWith && !startsWithSupported { + return fmt.Errorf("Invalid according to Policy: Policy Condition failed") + } + // Check if current policy condition is satisfied + condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) + if !condPassed { + return fmt.Errorf("Invalid according to Policy: Policy Condition failed") + } + } else { + // This covers all conditions X-Amz-Meta-* and X-Amz-* + if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") { + // Check if policy condition is satisfied + condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) + if !condPassed { + return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value) + } + } + } + } + + return nil +} diff --git a/weed/s3api/policy/postpolicyform_test.go b/weed/s3api/policy/postpolicyform_test.go new file mode 100644 index 000000000..1a9d78b0e --- /dev/null +++ b/weed/s3api/policy/postpolicyform_test.go @@ -0,0 +1,106 @@ +package policy + +/* + * MinIO Cloud Storage, (C) 2016 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "encoding/base64" + "fmt" + "net/http" + "testing" + "time" +) + +// Test Post Policy parsing and checking conditions +func TestPostPolicyForm(t *testing.T) { + pp := NewPostPolicy() + pp.SetBucket("testbucket") + pp.SetContentType("image/jpeg") + pp.SetUserMetadata("uuid", "14365123651274") + pp.SetKeyStartsWith("user/user1/filename") + pp.SetContentLengthRange(1048579, 10485760) + pp.SetSuccessStatusAction("201") + + type testCase struct { + Bucket string + Key string + XAmzDate string + XAmzAlgorithm string + XAmzCredential string + XAmzMetaUUID string + ContentType string + SuccessActionStatus string + Policy string + Expired bool + expectedErr error + } + + testCases := []testCase{ + // Everything is fine with this test + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: nil}, + // Expired policy document + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Expired: true, expectedErr: fmt.Errorf("Invalid according to Policy: Policy expired")}, + // Different AMZ date + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "2017T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Key which doesn't start with user/user1/filename + {Bucket: "testbucket", Key: "myfile.txt", XAmzDate: "20160727T000000Z", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Incorrect bucket name. + {Bucket: "incorrect", Key: "user/user1/filename/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Incorrect key name + {Bucket: "testbucket", Key: "incorrect", XAmzDate: "20160727T000000Z", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Incorrect date + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "incorrect", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Incorrect ContentType + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "incorrect", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, + // Incorrect Metadata + {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "151274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed: [eq, $x-amz-meta-uuid, 14365123651274]")}, + } + // Validate all the test cases. + for i, tt := range testCases { + formValues := make(http.Header) + formValues.Set("Bucket", tt.Bucket) + formValues.Set("Key", tt.Key) + formValues.Set("Content-Type", tt.ContentType) + formValues.Set("X-Amz-Date", tt.XAmzDate) + formValues.Set("X-Amz-Meta-Uuid", tt.XAmzMetaUUID) + formValues.Set("X-Amz-Algorithm", tt.XAmzAlgorithm) + formValues.Set("X-Amz-Credential", tt.XAmzCredential) + if tt.Expired { + // Expired already. + pp.SetExpires(time.Now().UTC().AddDate(0, 0, -10)) + } else { + // Expires in 10 days. + pp.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) + } + + formValues.Set("Policy", base64.StdEncoding.EncodeToString([]byte(pp.String()))) + formValues.Set("Success_action_status", tt.SuccessActionStatus) + policyBytes, err := base64.StdEncoding.DecodeString(base64.StdEncoding.EncodeToString([]byte(pp.String()))) + if err != nil { + t.Fatal(err) + } + + postPolicyForm, err := ParsePostPolicyForm(string(policyBytes)) + if err != nil { + t.Fatal(err) + } + + err = CheckPostPolicy(formValues, postPolicyForm) + if err != nil && tt.expectedErr != nil && err.Error() != tt.expectedErr.Error() { + t.Fatalf("Test %d:, Expected %s, got %s", i+1, tt.expectedErr.Error(), err.Error()) + } + } +} diff --git a/weed/s3api/s3_constants/s3_actions.go b/weed/s3api/s3_constants/s3_actions.go new file mode 100644 index 000000000..4e484ac98 --- /dev/null +++ b/weed/s3api/s3_constants/s3_actions.go @@ -0,0 +1,9 @@ +package s3_constants + +const ( + ACTION_READ = "Read" + ACTION_WRITE = "Write" + ACTION_ADMIN = "Admin" + ACTION_TAGGING = "Tagging" + ACTION_LIST = "List" +) diff --git a/weed/s3api/s3api_auth.go b/weed/s3api/s3api_auth.go index b680fe1e1..bf5cf5fab 100644 --- a/weed/s3api/s3api_auth.go +++ b/weed/s3api/s3api_auth.go @@ -9,6 +9,8 @@ import ( const ( signV4Algorithm = "AWS4-HMAC-SHA256" signV2Algorithm = "AWS" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" ) // Verify if request has JWT. @@ -23,8 +25,8 @@ func isRequestSignatureV4(r *http.Request) bool { // Verify if request has AWS Signature Version '2'. func isRequestSignatureV2(r *http.Request) bool { - return (!strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && - strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm)) + return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && + strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) } // Verify if request has AWS PreSign Version '4'. diff --git a/weed/s3api/s3api_bucket_handlers.go b/weed/s3api/s3api_bucket_handlers.go index 492d94616..48e8cb047 100644 --- a/weed/s3api/s3api_bucket_handlers.go +++ b/weed/s3api/s3api_bucket_handlers.go @@ -4,21 +4,19 @@ import ( "context" "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "math" "net/http" - "os" "time" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" + "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gorilla/mux" -) - -var ( - OS_UID = uint32(os.Getuid()) - OS_GID = uint32(os.Getgid()) ) type ListAllMyBucketsResult struct { @@ -29,29 +27,44 @@ type ListAllMyBucketsResult struct { func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { + var identity *Identity + var s3Err s3err.ErrorCode + if s3a.iam.isEnabled() { + identity, s3Err = s3a.iam.authUser(r) + if s3Err != s3err.ErrNone { + writeErrorResponse(w, s3Err, r.URL) + return + } + } + var response ListAllMyBucketsResult - entries, err := s3a.list(context.Background(), s3a.option.BucketsPath, "", "", false, math.MaxInt32) + entries, _, err := s3a.list(s3a.option.BucketsPath, "", "", false, math.MaxInt32) if err != nil { - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } + identityId := r.Header.Get(xhttp.AmzIdentityId) + var buckets []*s3.Bucket for _, entry := range entries { if entry.IsDirectory { + if identity != nil && !identity.canDo(s3_constants.ACTION_LIST, entry.Name) { + continue + } buckets = append(buckets, &s3.Bucket{ Name: aws.String(entry.Name), - CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0)), + CreationDate: aws.Time(time.Unix(entry.Attributes.Crtime, 0).UTC()), }) } } response = ListAllMyBucketsResult{ Owner: &s3.Owner{ - ID: aws.String(""), - DisplayName: aws.String(""), + ID: aws.String(identityId), + DisplayName: aws.String(identityId), }, Buckets: buckets, } @@ -61,12 +74,51 @@ func (s3a *S3ApiServer) ListBucketsHandler(w http.ResponseWriter, r *http.Reques func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] + bucket, _ := getBucketAndObject(r) + + // avoid duplicated buckets + errCode := s3err.ErrNone + if err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + if resp, err := client.CollectionList(context.Background(), &filer_pb.CollectionListRequest{ + IncludeEcVolumes: true, + IncludeNormalVolumes: true, + }); err != nil { + glog.Errorf("list collection: %v", err) + return fmt.Errorf("list collections: %v", err) + } else { + for _, c := range resp.Collections { + if bucket == c.Name { + errCode = s3err.ErrBucketAlreadyExists + break + } + } + } + return nil + }); err != nil { + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + if exist, err := s3a.exists(s3a.option.BucketsPath, bucket, true); err == nil && exist { + errCode = s3err.ErrBucketAlreadyExists + } + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + fn := func(entry *filer_pb.Entry) { + if identityId := r.Header.Get(xhttp.AmzIdentityId); identityId != "" { + if entry.Extended == nil { + entry.Extended = make(map[string][]byte) + } + entry.Extended[xhttp.AmzIdentityId] = []byte(identityId) + } + } // create the folder for bucket, but lazily create actual collection - if err := s3a.mkdir(context.Background(), s3a.option.BucketsPath, bucket, nil); err != nil { - writeErrorResponse(w, ErrInternalError, r.URL) + if err := s3a.mkdir(s3a.option.BucketsPath, bucket, fn); err != nil { + glog.Errorf("PutBucketHandler mkdir: %v", err) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } @@ -75,11 +127,14 @@ func (s3a *S3ApiServer) PutBucketHandler(w http.ResponseWriter, r *http.Request) func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] + bucket, _ := getBucketAndObject(r) - ctx := context.Background() - err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { + writeErrorResponse(w, err, r.URL) + return + } + + err := s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { // delete collection deleteCollectionRequest := &filer_pb.DeleteCollectionRequest{ @@ -87,17 +142,17 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque } glog.V(1).Infof("delete collection: %v", deleteCollectionRequest) - if _, err := client.DeleteCollection(ctx, deleteCollectionRequest); err != nil { + if _, err := client.DeleteCollection(context.Background(), deleteCollectionRequest); err != nil { return fmt.Errorf("delete collection %s: %v", bucket, err) } return nil }) - err = s3a.rm(ctx, s3a.option.BucketsPath, bucket, true, false, true) + err = s3a.rm(s3a.option.BucketsPath, bucket, false, true) if err != nil { - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } @@ -106,30 +161,42 @@ func (s3a *S3ApiServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Reque func (s3a *S3ApiServer) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - - ctx := context.Background() + bucket, _ := getBucketAndObject(r) - err := s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { + if err := s3a.checkBucket(r, bucket); err != s3err.ErrNone { + writeErrorResponse(w, err, r.URL) + return + } - request := &filer_pb.LookupDirectoryEntryRequest{ - Directory: s3a.option.BucketsPath, - Name: bucket, - } + writeSuccessResponseEmpty(w) +} - glog.V(1).Infof("lookup bucket: %v", request) - if _, err := client.LookupDirectoryEntry(ctx, request); err != nil { - return fmt.Errorf("lookup bucket %s/%s: %v", s3a.option.BucketsPath, bucket, err) - } +func (s3a *S3ApiServer) checkBucket(r *http.Request, bucket string) s3err.ErrorCode { + entry, err := s3a.getEntry(s3a.option.BucketsPath, bucket) + if entry == nil || err == filer_pb.ErrNotFound { + return s3err.ErrNoSuchBucket + } - return nil - }) + if !s3a.hasAccess(r, entry) { + return s3err.ErrAccessDenied + } + return s3err.ErrNone +} - if err != nil { - writeErrorResponse(w, ErrNoSuchBucket, r.URL) - return +func (s3a *S3ApiServer) hasAccess(r *http.Request, entry *filer_pb.Entry) bool { + isAdmin := r.Header.Get(xhttp.AmzIsAdmin) != "" + if isAdmin { + return true + } + if entry.Extended == nil { + return true } - writeSuccessResponseEmpty(w) + identityId := r.Header.Get(xhttp.AmzIdentityId) + if id, ok := entry.Extended[xhttp.AmzIdentityId]; ok { + if identityId != string(id) { + return false + } + } + return true } diff --git a/weed/s3api/s3api_errors.go b/weed/s3api/s3api_errors.go deleted file mode 100644 index 7ba55ed28..000000000 --- a/weed/s3api/s3api_errors.go +++ /dev/null @@ -1,131 +0,0 @@ -package s3api - -import ( - "encoding/xml" - "net/http" -) - -// APIError structure -type APIError struct { - Code string - Description string - HTTPStatusCode int -} - -// RESTErrorResponse - error response format -type RESTErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string `xml:"Code" json:"Code"` - Message string `xml:"Message" json:"Message"` - Resource string `xml:"Resource" json:"Resource"` - RequestID string `xml:"RequestId" json:"RequestId"` -} - -// ErrorCode type of error status. -type ErrorCode int - -// Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -const ( - ErrNone ErrorCode = iota - ErrMethodNotAllowed - ErrBucketNotEmpty - ErrBucketAlreadyExists - ErrBucketAlreadyOwnedByYou - ErrNoSuchBucket - ErrNoSuchUpload - ErrInvalidBucketName - ErrInvalidDigest - ErrInvalidMaxKeys - ErrInvalidMaxUploads - ErrInvalidMaxParts - ErrInvalidPartNumberMarker - ErrInvalidPart - ErrInternalError - ErrNotImplemented -) - -// error code to APIError structure, these fields carry respective -// descriptions for all the error responses. -var errorCodeResponse = map[ErrorCode]APIError{ - ErrMethodNotAllowed: { - Code: "MethodNotAllowed", - Description: "The specified method is not allowed against this resource.", - HTTPStatusCode: http.StatusMethodNotAllowed, - }, - ErrBucketNotEmpty: { - Code: "BucketNotEmpty", - Description: "The bucket you tried to delete is not empty", - HTTPStatusCode: http.StatusConflict, - }, - ErrBucketAlreadyExists: { - Code: "BucketAlreadyExists", - Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", - HTTPStatusCode: http.StatusConflict, - }, - ErrBucketAlreadyOwnedByYou: { - Code: "BucketAlreadyOwnedByYou", - Description: "Your previous request to create the named bucket succeeded and you already own it.", - HTTPStatusCode: http.StatusConflict, - }, - ErrInvalidBucketName: { - Code: "InvalidBucketName", - Description: "The specified bucket is not valid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidDigest: { - Code: "InvalidDigest", - Description: "The Content-Md5 you specified is not valid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidMaxUploads: { - Code: "InvalidArgument", - Description: "Argument max-uploads must be an integer between 0 and 2147483647", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidMaxKeys: { - Code: "InvalidArgument", - Description: "Argument maxKeys must be an integer between 0 and 2147483647", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidMaxParts: { - Code: "InvalidArgument", - Description: "Argument max-parts must be an integer between 0 and 2147483647", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidPartNumberMarker: { - Code: "InvalidArgument", - Description: "Argument partNumberMarker must be an integer.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrNoSuchBucket: { - Code: "NoSuchBucket", - Description: "The specified bucket does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrNoSuchUpload: { - Code: "NoSuchUpload", - Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - HTTPStatusCode: http.StatusNotFound, - }, - ErrInternalError: { - Code: "InternalError", - Description: "We encountered an internal error, please try again.", - HTTPStatusCode: http.StatusInternalServerError, - }, - - ErrInvalidPart: { - Code: "InvalidPart", - Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrNotImplemented: { - Code: "NotImplemented", - Description: "A header you provided implies functionality that is not implemented", - HTTPStatusCode: http.StatusNotImplemented, - }, -} - -// getAPIError provides API Error for input API error code. -func getAPIError(code ErrorCode) APIError { - return errorCodeResponse[code] -} diff --git a/weed/s3api/s3api_handlers.go b/weed/s3api/s3api_handlers.go index 127be07e3..6935c75bd 100644 --- a/weed/s3api/s3api_handlers.go +++ b/weed/s3api/s3api_handlers.go @@ -2,17 +2,20 @@ package s3api import ( "bytes" - "context" "encoding/base64" "encoding/xml" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "net/http" "net/url" + "strconv" "time" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" ) type mimeType string @@ -37,30 +40,35 @@ func encodeResponse(response interface{}) []byte { return bytesBuffer.Bytes() } -func (s3a *S3ApiServer) withFilerClient(ctx context.Context, fn func(filer_pb.SeaweedFilerClient) error) error { +var _ = filer_pb.FilerClient(&S3ApiServer{}) + +func (s3a *S3ApiServer) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { - return util.WithCachedGrpcClient(ctx, func(grpcConnection *grpc.ClientConn) error { + return pb.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { client := filer_pb.NewSeaweedFilerClient(grpcConnection) return fn(client) }, s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption) } +func (s3a *S3ApiServer) AdjustedUrl(location *filer_pb.Location) string { + return location.Url +} // If none of the http routes match respond with MethodNotAllowed func notFoundHandler(w http.ResponseWriter, r *http.Request) { glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI) - writeErrorResponse(w, ErrMethodNotAllowed, r.URL) + writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL) } -func writeErrorResponse(w http.ResponseWriter, errorCode ErrorCode, reqURL *url.URL) { - apiError := getAPIError(errorCode) +func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) { + apiError := s3err.GetAPIError(errorCode) errorResponse := getRESTErrorResponse(apiError, reqURL.Path) encodedErrorResponse := encodeResponse(errorResponse) writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML) } -func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse { - return RESTErrorResponse{ +func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse { + return s3err.RESTErrorResponse{ Code: err.Code, Message: err.Description, Resource: resource, @@ -70,13 +78,19 @@ func getRESTErrorResponse(err APIError, resource string) RESTErrorResponse { func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { setCommonHeaders(w) + if response != nil { + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + } if mType != mimeNone { w.Header().Set("Content-Type", string(mType)) } w.WriteHeader(statusCode) if response != nil { glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response)) - w.Write(response) + _, err := w.Write(response) + if err != nil { + glog.V(0).Infof("write err: %v", err) + } w.(http.Flusher).Flush() } } diff --git a/weed/s3api/s3api_object_copy_handlers.go b/weed/s3api/s3api_object_copy_handlers.go new file mode 100644 index 000000000..84a85fd78 --- /dev/null +++ b/weed/s3api/s3api_object_copy_handlers.go @@ -0,0 +1,174 @@ +package s3api + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { + + dstBucket, dstObject := getBucketAndObject(r) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + + if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && isReplace(r) { + fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)) + dir, name := fullPath.DirAndName() + entry, err := s3a.getEntry(dir, name) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + } + entry.Extended = weed_server.SaveAmzMetaData(r, entry.Extended, isReplace(r)) + err = s3a.touch(dir, name, entry) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + } + writeSuccessResponseXML(w, encodeResponse(CopyObjectResult{ + ETag: fmt.Sprintf("%x", entry.Attributes.Md5), + LastModified: time.Now().UTC(), + })) + return + } + + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + return + } + + if srcBucket == dstBucket && srcObject == dstObject { + writeErrorResponse(w, s3err.ErrInvalidCopyDest, r.URL) + return + } + + dstUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", + s3a.option.Filer, s3a.option.BucketsPath, dstBucket, dstObject, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + _, _, resp, err := util.DownloadFile(srcUrl) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + return + } + defer util.CloseResponse(resp) + + glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) + etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body) + + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyObjectResult{ + ETag: etag, + LastModified: time.Now().UTC(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} + +func pathToBucketAndObject(path string) (bucket, object string) { + path = strings.TrimPrefix(path, "/") + parts := strings.SplitN(path, "/", 2) + if len(parts) == 2 { + return parts[0], "/" + parts[1] + } + return parts[0], "/" +} + +type CopyPartResult struct { + LastModified time.Time `xml:"LastModified"` + ETag string `xml:"ETag"` +} + +func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) { + // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + dstBucket, _ := getBucketAndObject(r) + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source")) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get("X-Amz-Copy-Source") + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + return + } + + uploadID := r.URL.Query().Get("uploadId") + partIDString := r.URL.Query().Get("partNumber") + + partID, err := strconv.Atoi(partIDString) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidPart, r.URL) + return + } + + // check partID with maximum part ID for multipart objects + if partID > globalMaxPartID { + writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL) + return + } + + rangeHeader := r.Header.Get("x-amz-copy-source-range") + + dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", + s3a.option.Filer, s3a.genUploadsFolder(dstBucket), uploadID, partID, dstBucket) + srcUrl := fmt.Sprintf("http://%s%s/%s%s", + s3a.option.Filer, s3a.option.BucketsPath, srcBucket, srcObject) + + dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, rangeHeader) + if err != nil { + writeErrorResponse(w, s3err.ErrInvalidCopySource, r.URL) + return + } + defer dataReader.Close() + + glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl) + etag, errCode := s3a.putToFiler(r, dstUrl, dataReader) + + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + setEtag(w, etag) + + response := CopyPartResult{ + ETag: etag, + LastModified: time.Now().UTC(), + } + + writeSuccessResponseXML(w, encodeResponse(response)) + +} + +func isReplace(r *http.Request) bool { + return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE" +} diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go index 44e93d297..f1a539ac5 100644 --- a/weed/s3api/s3api_object_handlers.go +++ b/weed/s3api/s3api_object_handlers.go @@ -3,15 +3,24 @@ package s3api import ( "crypto/md5" "encoding/json" + "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" "io" "io/ioutil" "net/http" + "net/url" + "sort" "strings" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/gorilla/mux" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + "github.com/chrislusf/seaweedfs/weed/util" ) var ( @@ -20,6 +29,7 @@ var ( func init() { client = &http.Client{Transport: &http.Transport{ + MaxIdleConns: 1024, MaxIdleConnsPerHost: 1024, }} } @@ -28,50 +38,73 @@ func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) _, err := validateContentMd5(r.Header) if err != nil { - writeErrorResponse(w, ErrInvalidDigest, r.URL) + writeErrorResponse(w, s3err.ErrInvalidDigest, r.URL) return } - rAuthType := getRequestAuthType(r) dataReader := r.Body - if rAuthType == authTypeStreamingSigned { - dataReader = newSignV4ChunkedReader(r) + if s3a.iam.isEnabled() { + rAuthType := getRequestAuthType(r) + var s3ErrCode s3err.ErrorCode + switch rAuthType { + case authTypeStreamingSigned: + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + case authTypeSignedV2, authTypePresignedV2: + _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) + case authTypePresigned, authTypeSigned: + _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) + } + if s3ErrCode != s3err.ErrNone { + writeErrorResponse(w, s3ErrCode, r.URL) + return + } } + defer dataReader.Close() - uploadUrl := fmt.Sprintf("http://%s%s/%s%s?collection=%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object, bucket) + if strings.HasSuffix(object, "/") { + if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil { + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + } else { + uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) - etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) + etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) - if errCode != ErrNone { - writeErrorResponse(w, errCode, r.URL) - return - } + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } - setEtag(w, etag) + setEtag(w, etag) + } writeSuccessResponseEmpty(w) } +func urlPathEscape(object string) string { + var escapedParts []string + for _, part := range strings.Split(object, "/") { + escapedParts = append(escapedParts, url.PathEscape(part)) + } + return strings.Join(escapedParts, "/") +} + func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) if strings.HasSuffix(r.URL.Path, "/") { - writeErrorResponse(w, ErrNotImplemented, r.URL) + writeErrorResponse(w, s3err.ErrNotImplemented, r.URL) return } destUrl := fmt.Sprintf("http://%s%s/%s%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) s3a.proxyToFiler(w, r, destUrl, passThroughResponse) @@ -79,12 +112,10 @@ func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) destUrl := fmt.Sprintf("http://%s%s/%s%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) s3a.proxyToFiler(w, r, destUrl, passThroughResponse) @@ -92,29 +123,152 @@ func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) - destUrl := fmt.Sprintf("http://%s%s/%s%s", - s3a.option.Filer, s3a.option.BucketsPath, bucket, object) + destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true", + s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) - s3a.proxyToFiler(w, r, destUrl, func(proxyResonse *http.Response, w http.ResponseWriter) { - for k, v := range proxyResonse.Header { + s3a.proxyToFiler(w, r, destUrl, func(proxyResponse *http.Response, w http.ResponseWriter) { + for k, v := range proxyResponse.Header { w.Header()[k] = v } w.WriteHeader(http.StatusNoContent) }) +} + +// / ObjectIdentifier carries key name for the object to delete. +type ObjectIdentifier struct { + ObjectName string `xml:"Key"` +} + +// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted. +type DeleteObjectsRequest struct { + // Element to enable quiet mode for the request + Quiet bool + // List of objects to be deleted + Objects []ObjectIdentifier `xml:"Object"` +} + +// DeleteError structure. +type DeleteError struct { + Code string + Message string + Key string +} + +// DeleteObjectsResponse container for multiple object deletes. +type DeleteObjectsResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` + // Collection of all deleted objects + DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` + + // Collection of errors deleting certain objects. + Errors []DeleteError `xml:"Error,omitempty"` } // DeleteMultipleObjectsHandler - Delete multiple objects func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) { - // TODO - writeErrorResponse(w, ErrNotImplemented, r.URL) + + bucket, _ := getBucketAndObject(r) + + deleteXMLBytes, err := ioutil.ReadAll(r.Body) + if err != nil { + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + + deleteObjects := &DeleteObjectsRequest{} + if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil { + writeErrorResponse(w, s3err.ErrMalformedXML, r.URL) + return + } + + var deletedObjects []ObjectIdentifier + var deleteErrors []DeleteError + + directoriesWithDeletion := make(map[string]int) + + s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { + + // delete file entries + for _, object := range deleteObjects.Objects { + + lastSeparator := strings.LastIndex(object.ObjectName, "/") + parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.ObjectName, true, false + if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) { + entryName = object.ObjectName[lastSeparator+1:] + parentDirectoryPath = "/" + object.ObjectName[:lastSeparator] + } + parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath) + + err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive) + if err == nil { + directoriesWithDeletion[parentDirectoryPath]++ + deletedObjects = append(deletedObjects, object) + } else if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) { + deletedObjects = append(deletedObjects, object) + } else { + delete(directoriesWithDeletion, parentDirectoryPath) + deleteErrors = append(deleteErrors, DeleteError{ + Code: "", + Message: err.Error(), + Key: object.ObjectName, + }) + } + } + + // purge empty folders, only checking folders with deletions + for len(directoriesWithDeletion) > 0 { + directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion) + } + + return nil + }) + + deleteResp := DeleteObjectsResponse{} + if !deleteObjects.Quiet { + deleteResp.DeletedObjects = deletedObjects + } + deleteResp.Errors = deleteErrors + + writeSuccessResponseXML(w, encodeResponse(deleteResp)) + } -func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResonse *http.Response, w http.ResponseWriter)) { +func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int) { + var allDirs []string + for dir, _ := range directoriesWithDeletion { + allDirs = append(allDirs, dir) + } + sort.Slice(allDirs, func(i, j int) bool { + return len(allDirs[i]) > len(allDirs[j]) + }) + newDirectoriesWithDeletion = make(map[string]int) + for _, dir := range allDirs { + parentDir, dirName := util.FullPath(dir).DirAndName() + if parentDir == s3a.option.BucketsPath { + continue + } + if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil { + glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err) + } else { + newDirectoriesWithDeletion[parentDir]++ + } + } + return +} + +var passThroughHeaders = []string{ + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", +} + +func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter)) { glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl) @@ -122,15 +276,27 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des if err != nil { glog.Errorf("NewRequest %s: %v", destUrl, err) - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } proxyReq.Header.Set("Host", s3a.option.Filer) proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) - proxyReq.Header.Set("Etag-MD5", "True") for header, values := range r.Header { + // handle s3 related headers + passed := false + for _, h := range passThroughHeaders { + if strings.ToLower(header) == h && len(values) > 0 { + proxyReq.Header.Add(header[len("response-"):], values[0]) + passed = true + break + } + } + if passed { + continue + } + // handle other headers for _, value := range values { proxyReq.Header.Add(header, value) } @@ -140,31 +306,44 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des if postErr != nil { glog.Errorf("post to filer: %v", postErr) - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } - defer resp.Body.Close() + defer util.CloseResponse(resp) + + if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 { + if r.Method != "DELETE" { + writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) + return + } + } responseFn(resp, w) + } -func passThroughResponse(proxyResonse *http.Response, w http.ResponseWriter) { - for k, v := range proxyResonse.Header { + +func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) { + for k, v := range proxyResponse.Header { w.Header()[k] = v } - w.WriteHeader(proxyResonse.StatusCode) - io.Copy(w, proxyResonse.Body) + if proxyResponse.Header.Get("Content-Range") != "" && proxyResponse.StatusCode == 200 { + w.WriteHeader(http.StatusPartialContent) + } else { + w.WriteHeader(proxyResponse.StatusCode) + } + io.Copy(w, proxyResponse.Body) } -func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.ReadCloser) (etag string, code ErrorCode) { +func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code s3err.ErrorCode) { hash := md5.New() - var body io.Reader = io.TeeReader(dataReader, hash) + var body = io.TeeReader(dataReader, hash) proxyReq, err := http.NewRequest("PUT", uploadUrl, body) if err != nil { glog.Errorf("NewRequest %s: %v", uploadUrl, err) - return "", ErrInternalError + return "", s3err.ErrInternalError } proxyReq.Header.Set("Host", s3a.option.Filer) @@ -178,11 +357,9 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader resp, postErr := client.Do(proxyReq) - dataReader.Close() - if postErr != nil { glog.Errorf("post to filer: %v", postErr) - return "", ErrInternalError + return "", s3err.ErrInternalError } defer resp.Body.Close() @@ -190,21 +367,21 @@ func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader resp_body, ra_err := ioutil.ReadAll(resp.Body) if ra_err != nil { - glog.Errorf("upload to filer response read: %v", ra_err) - return etag, ErrInternalError + glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err) + return etag, s3err.ErrInternalError } var ret weed_server.FilerPostResult unmarshal_err := json.Unmarshal(resp_body, &ret) if unmarshal_err != nil { glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body)) - return "", ErrInternalError + return "", s3err.ErrInternalError } if ret.Error != "" { glog.Errorf("upload to filer error: %v", ret.Error) - return "", ErrInternalError + return "", filerErrorToS3Error(ret.Error) } - return etag, ErrNone + return etag, s3err.ErrNone } func setEtag(w http.ResponseWriter, etag string) { @@ -217,10 +394,20 @@ func setEtag(w http.ResponseWriter, etag string) { } } -func getObject(vars map[string]string) string { - object := vars["object"] +func getBucketAndObject(r *http.Request) (bucket, object string) { + vars := mux.Vars(r) + bucket = vars["bucket"] + object = vars["object"] if !strings.HasPrefix(object, "/") { object = "/" + object } - return object + + return +} + +func filerErrorToS3Error(errString string) s3err.ErrorCode { + if strings.HasPrefix(errString, "existing ") && strings.HasSuffix(errString, "is a directory") { + return s3err.ErrExistingObjectIsDirectory + } + return s3err.ErrInternalError } diff --git a/weed/s3api/s3api_object_handlers_postpolicy.go b/weed/s3api/s3api_object_handlers_postpolicy.go new file mode 100644 index 000000000..035302ae6 --- /dev/null +++ b/weed/s3api/s3api_object_handlers_postpolicy.go @@ -0,0 +1,241 @@ +package s3api + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "github.com/chrislusf/seaweedfs/weed/s3api/policy" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/dustin/go-humanize" + "github.com/gorilla/mux" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +func (s3a *S3ApiServer) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html + + bucket := mux.Vars(r)["bucket"] + + reader, err := r.MultipartReader() + if err != nil { + writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL) + return + } + form, err := reader.ReadForm(int64(5 * humanize.MiByte)) + if err != nil { + writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL) + return + } + defer form.RemoveAll() + + fileBody, fileName, fileSize, formValues, err := extractPostPolicyFormValues(form) + if err != nil { + writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL) + return + } + if fileBody == nil { + writeErrorResponse(w, s3err.ErrPOSTFileRequired, r.URL) + return + } + defer fileBody.Close() + + formValues.Set("Bucket", bucket) + + if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") { + formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1)) + } + object := formValues.Get("Key") + + successRedirect := formValues.Get("success_action_redirect") + successStatus := formValues.Get("success_action_status") + var redirectURL *url.URL + if successRedirect != "" { + redirectURL, err = url.Parse(successRedirect) + if err != nil { + writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL) + return + } + } + + // Verify policy signature. + errCode := s3a.iam.doesPolicySignatureMatch(formValues) + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + policyBytes, err := base64.StdEncoding.DecodeString(formValues.Get("Policy")) + if err != nil { + writeErrorResponse(w, s3err.ErrMalformedPOSTRequest, r.URL) + return + } + + // Handle policy if it is set. + if len(policyBytes) > 0 { + + postPolicyForm, err := policy.ParsePostPolicyForm(string(policyBytes)) + if err != nil { + writeErrorResponse(w, s3err.ErrPostPolicyConditionInvalidFormat, r.URL) + return + } + + // Make sure formValues adhere to policy restrictions. + if err = policy.CheckPostPolicy(formValues, postPolicyForm); err != nil { + w.Header().Set("Location", r.URL.Path) + w.WriteHeader(http.StatusTemporaryRedirect) + return + } + + // Ensure that the object size is within expected range, also the file size + // should not exceed the maximum single Put size (5 GiB) + lengthRange := postPolicyForm.Conditions.ContentLengthRange + if lengthRange.Valid { + if fileSize < lengthRange.Min { + writeErrorResponse(w, s3err.ErrEntityTooSmall, r.URL) + return + } + + if fileSize > lengthRange.Max { + writeErrorResponse(w, s3err.ErrEntityTooLarge, r.URL) + return + } + } + } + + uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object)) + + etag, errCode := s3a.putToFiler(r, uploadUrl, fileBody) + + if errCode != s3err.ErrNone { + writeErrorResponse(w, errCode, r.URL) + return + } + + if successRedirect != "" { + // Replace raw query params.. + redirectURL.RawQuery = getRedirectPostRawQuery(bucket, object, etag) + w.Header().Set("Location", redirectURL.String()) + writeResponse(w, http.StatusSeeOther, nil, mimeNone) + return + } + + setEtag(w, etag) + + // Decide what http response to send depending on success_action_status parameter + switch successStatus { + case "201": + resp := encodeResponse(PostResponse{ + Bucket: bucket, + Key: object, + ETag: `"` + etag + `"`, + Location: w.Header().Get("Location"), + }) + writeResponse(w, http.StatusCreated, resp, mimeXML) + case "200": + writeResponse(w, http.StatusOK, nil, mimeNone) + default: + writeSuccessResponseEmpty(w) + } + +} + +// Extract form fields and file data from a HTTP POST Policy +func extractPostPolicyFormValues(form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) { + /// HTML Form values + fileName = "" + + // Canonicalize the form values into http.Header. + formValues = make(http.Header) + for k, v := range form.Value { + formValues[http.CanonicalHeaderKey(k)] = v + } + + // Validate form values. + if err = validateFormFieldSize(formValues); err != nil { + return nil, "", 0, nil, err + } + + // this means that filename="" was not specified for file key and Go has + // an ugly way of handling this situation. Refer here + // https://golang.org/src/mime/multipart/formdata.go#L61 + if len(form.File) == 0 { + var b = &bytes.Buffer{} + for _, v := range formValues["File"] { + b.WriteString(v) + } + fileSize = int64(b.Len()) + filePart = ioutil.NopCloser(b) + return filePart, fileName, fileSize, formValues, nil + } + + // Iterator until we find a valid File field and break + for k, v := range form.File { + canonicalFormName := http.CanonicalHeaderKey(k) + if canonicalFormName == "File" { + if len(v) == 0 { + return nil, "", 0, nil, errors.New("Invalid arguments specified") + } + // Fetch fileHeader which has the uploaded file information + fileHeader := v[0] + // Set filename + fileName = fileHeader.Filename + // Open the uploaded part + filePart, err = fileHeader.Open() + if err != nil { + return nil, "", 0, nil, err + } + // Compute file size + fileSize, err = filePart.(io.Seeker).Seek(0, 2) + if err != nil { + return nil, "", 0, nil, err + } + // Reset Seek to the beginning + _, err = filePart.(io.Seeker).Seek(0, 0) + if err != nil { + return nil, "", 0, nil, err + } + // File found and ready for reading + break + } + } + return filePart, fileName, fileSize, formValues, nil +} + +// Validate form field size for s3 specification requirement. +func validateFormFieldSize(formValues http.Header) error { + // Iterate over form values + for k := range formValues { + // Check if value's field exceeds S3 limit + if int64(len(formValues.Get(k))) > int64(1*humanize.MiByte) { + return errors.New("Data size larger than expected") + } + } + + // Success. + return nil +} + +func getRedirectPostRawQuery(bucket, key, etag string) string { + redirectValues := make(url.Values) + redirectValues.Set("bucket", bucket) + redirectValues.Set("key", key) + redirectValues.Set("etag", "\""+etag+"\"") + return redirectValues.Encode() +} + +// Check to see if Policy is signed correctly. +func (iam *IdentityAccessManagement) doesPolicySignatureMatch(formValues http.Header) s3err.ErrorCode { + // For SignV2 - Signature field will be valid + if _, ok := formValues["Signature"]; ok { + return iam.doesPolicySignatureV2Match(formValues) + } + return iam.doesPolicySignatureV4Match(formValues) +} diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index 72a25e4a5..4ddb24e31 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -1,65 +1,61 @@ package s3api import ( - "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/gorilla/mux" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" "net/http" "net/url" "strconv" "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" ) const ( - maxObjectList = 1000 // Limit number of objects in a listObjectsResponse. - maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse. - maxPartsList = 1000 // Limit number of parts in a listPartsResponse. - globalMaxPartID = 10000 + maxObjectListSizeLimit = 10000 // Limit number of objects in a listObjectsResponse. + maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. + maxPartsList = 10000 // Limit number of parts in a listPartsResponse. + globalMaxPartID = 100000 ) // NewMultipartUploadHandler - New multipart upload. func (s3a *S3ApiServer) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - var object, bucket string - vars := mux.Vars(r) - bucket = vars["bucket"] - object = vars["object"] + bucket, object := getBucketAndObject(r) - response, errCode := s3a.createMultipartUpload(context.Background(), &s3.CreateMultipartUploadInput{ + response, errCode := s3a.createMultipartUpload(&s3.CreateMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), }) - if errCode != ErrNone { + glog.V(2).Info("NewMultipartUploadHandler", string(encodeResponse(response)), errCode) + + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } - // println("NewMultipartUploadHandler", string(encodeResponse(response))) - writeSuccessResponseXML(w, encodeResponse(response)) } // CompleteMultipartUploadHandler - Completes multipart upload. func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.completeMultipartUpload(context.Background(), &s3.CompleteMultipartUploadInput{ + response, errCode := s3a.completeMultipartUpload(&s3.CompleteMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), }) - // println("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode) + glog.V(2).Info("CompleteMultipartUploadHandler", string(encodeResponse(response)), errCode) - if errCode != ErrNone { + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } @@ -70,25 +66,23 @@ func (s3a *S3ApiServer) CompleteMultipartUploadHandler(w http.ResponseWriter, r // AbortMultipartUploadHandler - Aborts multipart upload. func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) // Get upload id. uploadID, _, _, _ := getObjectResources(r.URL.Query()) - response, errCode := s3a.abortMultipartUpload(context.Background(), &s3.AbortMultipartUploadInput{ + response, errCode := s3a.abortMultipartUpload(&s3.AbortMultipartUploadInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), UploadId: aws.String(uploadID), }) - if errCode != ErrNone { + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } - // println("AbortMultipartUploadHandler", string(encodeResponse(response))) + glog.V(2).Info("AbortMultipartUploadHandler", string(encodeResponse(response))) writeSuccessResponseXML(w, encodeResponse(response)) @@ -96,23 +90,22 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht // ListMultipartUploadsHandler - Lists multipart uploads. func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] + bucket, _ := getBucketAndObject(r) prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType := getBucketMultipartResources(r.URL.Query()) if maxUploads < 0 { - writeErrorResponse(w, ErrInvalidMaxUploads, r.URL) + writeErrorResponse(w, s3err.ErrInvalidMaxUploads, r.URL) return } if keyMarker != "" { // Marker not common with prefix is not implemented. if !strings.HasPrefix(keyMarker, prefix) { - writeErrorResponse(w, ErrNotImplemented, r.URL) + writeErrorResponse(w, s3err.ErrNotImplemented, r.URL) return } } - response, errCode := s3a.listMultipartUploads(context.Background(), &s3.ListMultipartUploadsInput{ + response, errCode := s3a.listMultipartUploads(&s3.ListMultipartUploadsInput{ Bucket: aws.String(bucket), Delimiter: aws.String(delimiter), EncodingType: aws.String(encodingType), @@ -122,34 +115,33 @@ func (s3a *S3ApiServer) ListMultipartUploadsHandler(w http.ResponseWriter, r *ht UploadIdMarker: aws.String(uploadIDMarker), }) - if errCode != ErrNone { + glog.V(2).Info("ListMultipartUploadsHandler", string(encodeResponse(response)), errCode) + + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } // TODO handle encodingType - // println("ListMultipartUploadsHandler", string(encodeResponse(response))) writeSuccessResponseXML(w, encodeResponse(response)) } // ListObjectPartsHandler - Lists object parts in a multipart upload. func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - object := getObject(vars) + bucket, object := getBucketAndObject(r) uploadID, partNumberMarker, maxParts, _ := getObjectResources(r.URL.Query()) if partNumberMarker < 0 { - writeErrorResponse(w, ErrInvalidPartNumberMarker, r.URL) + writeErrorResponse(w, s3err.ErrInvalidPartNumberMarker, r.URL) return } if maxParts < 0 { - writeErrorResponse(w, ErrInvalidMaxParts, r.URL) + writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL) return } - response, errCode := s3a.listObjectParts(context.Background(), &s3.ListPartsInput{ + response, errCode := s3a.listObjectParts(&s3.ListPartsInput{ Bucket: aws.String(bucket), Key: objectKey(aws.String(object)), MaxParts: aws.Int64(int64(maxParts)), @@ -157,55 +149,64 @@ func (s3a *S3ApiServer) ListObjectPartsHandler(w http.ResponseWriter, r *http.Re UploadId: aws.String(uploadID), }) - if errCode != ErrNone { + glog.V(2).Info("ListObjectPartsHandler", string(encodeResponse(response)), errCode) + + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } - // println("ListObjectPartsHandler", string(encodeResponse(response))) - writeSuccessResponseXML(w, encodeResponse(response)) } // PutObjectPartHandler - Put an object part in a multipart upload. func (s3a *S3ApiServer) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - bucket := vars["bucket"] - - rAuthType := getRequestAuthType(r) - - ctx := context.Background() + bucket, _ := getBucketAndObject(r) uploadID := r.URL.Query().Get("uploadId") - exists, err := s3a.exists(ctx, s3a.genUploadsFolder(bucket), uploadID, true) + exists, err := s3a.exists(s3a.genUploadsFolder(bucket), uploadID, true) if !exists { - writeErrorResponse(w, ErrNoSuchUpload, r.URL) + writeErrorResponse(w, s3err.ErrNoSuchUpload, r.URL) return } partIDString := r.URL.Query().Get("partNumber") partID, err := strconv.Atoi(partIDString) if err != nil { - writeErrorResponse(w, ErrInvalidPart, r.URL) + writeErrorResponse(w, s3err.ErrInvalidPart, r.URL) return } if partID > globalMaxPartID { - writeErrorResponse(w, ErrInvalidMaxParts, r.URL) + writeErrorResponse(w, s3err.ErrInvalidMaxParts, r.URL) return } dataReader := r.Body - if rAuthType == authTypeStreamingSigned { - dataReader = newSignV4ChunkedReader(r) + if s3a.iam.isEnabled() { + rAuthType := getRequestAuthType(r) + var s3ErrCode s3err.ErrorCode + switch rAuthType { + case authTypeStreamingSigned: + dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r) + case authTypeSignedV2, authTypePresignedV2: + _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r) + case authTypePresigned, authTypeSigned: + _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r) + } + if s3ErrCode != s3err.ErrNone { + writeErrorResponse(w, s3ErrCode, r.URL) + return + } } + defer dataReader.Close() uploadUrl := fmt.Sprintf("http://%s%s/%s/%04d.part?collection=%s", - s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID-1, bucket) + s3a.option.Filer, s3a.genUploadsFolder(bucket), uploadID, partID, bucket) etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader) - if errCode != ErrNone { + if errCode != s3err.ErrNone { writeErrorResponse(w, errCode, r.URL) return } diff --git a/weed/s3api/s3api_object_tagging_handlers.go b/weed/s3api/s3api_object_tagging_handlers.go new file mode 100644 index 000000000..94719834c --- /dev/null +++ b/weed/s3api/s3api_object_tagging_handlers.go @@ -0,0 +1,117 @@ +package s3api + +import ( + "encoding/xml" + "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "io/ioutil" + "net/http" +) + +// GetObjectTaggingHandler - GET object tagging +// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html +func (s3a *S3ApiServer) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { + + bucket, object := getBucketAndObject(r) + + target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) + dir, name := target.DirAndName() + + tags, err := s3a.getTags(dir, name) + if err != nil { + if err == filer_pb.ErrNotFound { + glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) + } else { + glog.Errorf("GetObjectTaggingHandler %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + } + return + } + + writeSuccessResponseXML(w, encodeResponse(FromTags(tags))) + +} + +// PutObjectTaggingHandler Put object tagging +// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html +func (s3a *S3ApiServer) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { + + bucket, object := getBucketAndObject(r) + + target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) + dir, name := target.DirAndName() + + tagging := &Tagging{} + input, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength)) + if err != nil { + glog.Errorf("PutObjectTaggingHandler read input %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + return + } + if err = xml.Unmarshal(input, tagging); err != nil { + glog.Errorf("PutObjectTaggingHandler Unmarshal %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrMalformedXML, r.URL) + return + } + tags := tagging.ToTags() + if len(tags) > 10 { + glog.Errorf("PutObjectTaggingHandler tags %s: %d tags more than 10", r.URL, len(tags)) + writeErrorResponse(w, s3err.ErrInvalidTag, r.URL) + return + } + for k, v := range tags { + if len(k) > 128 { + glog.Errorf("PutObjectTaggingHandler tags %s: tag key %s longer than 128", r.URL, k) + writeErrorResponse(w, s3err.ErrInvalidTag, r.URL) + return + } + if len(v) > 256 { + glog.Errorf("PutObjectTaggingHandler tags %s: tag value %s longer than 256", r.URL, v) + writeErrorResponse(w, s3err.ErrInvalidTag, r.URL) + return + } + } + + if err = s3a.setTags(dir, name, tagging.ToTags()); err != nil { + if err == filer_pb.ErrNotFound { + glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) + } else { + glog.Errorf("PutObjectTaggingHandler setTags %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + } + return + } + + w.WriteHeader(http.StatusNoContent) + +} + +// DeleteObjectTaggingHandler Delete object tagging +// API reference: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html +func (s3a *S3ApiServer) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { + + bucket, object := getBucketAndObject(r) + + target := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, object)) + dir, name := target.DirAndName() + + err := s3a.rmTags(dir, name) + if err != nil { + if err == filer_pb.ErrNotFound { + glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL) + } else { + glog.Errorf("DeleteObjectTaggingHandler %s: %v", r.URL, err) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) + } + return + } + + w.WriteHeader(http.StatusNoContent) +} diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go index aa6849cbd..739cdd8f9 100644 --- a/weed/s3api/s3api_objects_list_handlers.go +++ b/weed/s3api/s3api_objects_list_handlers.go @@ -2,7 +2,9 @@ package s3api import ( "context" + "encoding/xml" "fmt" + "github.com/chrislusf/seaweedfs/weed/glog" "io" "net/http" "net/url" @@ -11,51 +13,72 @@ import ( "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" - "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/gorilla/mux" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" ) -const ( - maxObjectListSizeLimit = 1000 // Limit number of objects in a listObjectsResponse. -) +type ListBucketResultV2 struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult"` + Name string `xml:"Name"` + Prefix string `xml:"Prefix"` + MaxKeys int `xml:"MaxKeys"` + Delimiter string `xml:"Delimiter,omitempty"` + IsTruncated bool `xml:"IsTruncated"` + Contents []ListEntry `xml:"Contents,omitempty"` + CommonPrefixes []PrefixEntry `xml:"CommonPrefixes,omitempty"` + ContinuationToken string `xml:"ContinuationToken,omitempty"` + NextContinuationToken string `xml:"NextContinuationToken,omitempty"` + KeyCount int `xml:"KeyCount"` + StartAfter string `xml:"StartAfter,omitempty"` +} func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { // https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html // collect parameters - vars := mux.Vars(r) - bucket := vars["bucket"] - - glog.V(4).Infof("read v2: %v", vars) + bucket, _ := getBucketAndObject(r) - originalPrefix, marker, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query()) + originalPrefix, continuationToken, startAfter, delimiter, _, maxKeys := getListObjectsV2Args(r.URL.Query()) if maxKeys < 0 { - writeErrorResponse(w, ErrInvalidMaxKeys, r.URL) + writeErrorResponse(w, s3err.ErrInvalidMaxKeys, r.URL) return } if delimiter != "" && delimiter != "/" { - writeErrorResponse(w, ErrNotImplemented, r.URL) + writeErrorResponse(w, s3err.ErrNotImplemented, r.URL) return } - if marker == "" { + marker := continuationToken + if continuationToken == "" { marker = startAfter } - ctx := context.Background() - - response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) + response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter) if err != nil { - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } + responseV2 := &ListBucketResultV2{ + XMLName: response.XMLName, + Name: response.Name, + CommonPrefixes: response.CommonPrefixes, + Contents: response.Contents, + ContinuationToken: continuationToken, + Delimiter: response.Delimiter, + IsTruncated: response.IsTruncated, + KeyCount: len(response.Contents) + len(response.CommonPrefixes), + MaxKeys: response.MaxKeys, + NextContinuationToken: response.NextMarker, + Prefix: response.Prefix, + StartAfter: startAfter, + } - writeSuccessResponseXML(w, encodeResponse(response)) + writeSuccessResponseXML(w, encodeResponse(responseV2)) } func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) { @@ -63,121 +86,203 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html // collect parameters - vars := mux.Vars(r) - bucket := vars["bucket"] - - ctx := context.Background() + bucket, _ := getBucketAndObject(r) originalPrefix, marker, delimiter, maxKeys := getListObjectsV1Args(r.URL.Query()) if maxKeys < 0 { - writeErrorResponse(w, ErrInvalidMaxKeys, r.URL) + writeErrorResponse(w, s3err.ErrInvalidMaxKeys, r.URL) return } if delimiter != "" && delimiter != "/" { - writeErrorResponse(w, ErrNotImplemented, r.URL) + writeErrorResponse(w, s3err.ErrNotImplemented, r.URL) return } - response, err := s3a.listFilerEntries(ctx, bucket, originalPrefix, maxKeys, marker) + response, err := s3a.listFilerEntries(bucket, originalPrefix, maxKeys, marker, delimiter) if err != nil { - writeErrorResponse(w, ErrInternalError, r.URL) + writeErrorResponse(w, s3err.ErrInternalError, r.URL) return } writeSuccessResponseXML(w, encodeResponse(response)) } -func (s3a *S3ApiServer) listFilerEntries(ctx context.Context, bucket, originalPrefix string, maxKeys int, marker string) (response ListBucketResult, err error) { - +func (s3a *S3ApiServer) listFilerEntries(bucket string, originalPrefix string, maxKeys int, marker string, delimiter string) (response ListBucketResult, err error) { // convert full path prefix into directory name and prefix for entry name - dir, prefix := filepath.Split(originalPrefix) - if strings.HasPrefix(dir, "/") { - dir = dir[1:] + reqDir, prefix := filepath.Split(originalPrefix) + if strings.HasPrefix(reqDir, "/") { + reqDir = reqDir[1:] + } + bucketPrefix := fmt.Sprintf("%s/%s/", s3a.option.BucketsPath, bucket) + reqDir = fmt.Sprintf("%s%s", bucketPrefix, reqDir) + if strings.HasSuffix(reqDir, "/") { + // remove trailing "/" + reqDir = reqDir[:len(reqDir)-1] } - // check filer - err = s3a.withFilerClient(ctx, func(client filer_pb.SeaweedFilerClient) error { - - request := &filer_pb.ListEntriesRequest{ - Directory: fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, bucket, dir), - Prefix: prefix, - Limit: uint32(maxKeys + 1), - StartFromFileName: marker, - InclusiveStartFrom: false, - } - - stream, err := client.ListEntries(ctx, request) - if err != nil { - return fmt.Errorf("list buckets: %v", err) - } + var contents []ListEntry + var commonPrefixes []PrefixEntry + var isTruncated bool + var doErr error + var nextMarker string - var contents []ListEntry - var commonPrefixes []PrefixEntry - var counter int - var lastEntryName string - var isTruncated bool - - for { - resp, recvErr := stream.Recv() - if recvErr != nil { - if recvErr == io.EOF { - break - } else { - return recvErr - } - } + // check filer + err = s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error { - entry := resp.Entry - counter++ - if counter > maxKeys { - isTruncated = true - break - } - lastEntryName = entry.Name + _, isTruncated, nextMarker, doErr = s3a.doListFilerEntries(client, reqDir, prefix, maxKeys, marker, delimiter, func(dir string, entry *filer_pb.Entry) { if entry.IsDirectory { - if entry.Name != ".uploads" { + if delimiter == "/" { commonPrefixes = append(commonPrefixes, PrefixEntry{ - Prefix: fmt.Sprintf("%s%s/", dir, entry.Name), + Prefix: fmt.Sprintf("%s/%s/", dir, entry.Name)[len(bucketPrefix):], }) } } else { + storageClass := "STANDARD" + if v, ok := entry.Extended[xhttp.AmzStorageClass]; ok { + storageClass = string(v) + } contents = append(contents, ListEntry{ - Key: fmt.Sprintf("%s%s", dir, entry.Name), - LastModified: time.Unix(entry.Attributes.Mtime, 0), - ETag: "\"" + filer2.ETag(entry.Chunks) + "\"", - Size: int64(filer2.TotalSize(entry.Chunks)), + Key: fmt.Sprintf("%s/%s", dir, entry.Name)[len(bucketPrefix):], + LastModified: time.Unix(entry.Attributes.Mtime, 0).UTC(), + ETag: "\"" + filer.ETag(entry) + "\"", + Size: int64(filer.FileSize(entry)), Owner: CanonicalUser{ ID: fmt.Sprintf("%x", entry.Attributes.Uid), DisplayName: entry.Attributes.UserName, }, - StorageClass: "STANDARD", + StorageClass: StorageClass(storageClass), }) } + }) + if doErr != nil { + return doErr + } + if !isTruncated { + nextMarker = "" } response = ListBucketResult{ Name: bucket, Prefix: originalPrefix, Marker: marker, - NextMarker: lastEntryName, + NextMarker: nextMarker, MaxKeys: maxKeys, - Delimiter: "/", + Delimiter: delimiter, IsTruncated: isTruncated, Contents: contents, CommonPrefixes: commonPrefixes, } - glog.V(4).Infof("read directory: %v, found: %v, %+v", request, counter, response) - return nil }) return } +func (s3a *S3ApiServer) doListFilerEntries(client filer_pb.SeaweedFilerClient, dir, prefix string, maxKeys int, marker, delimiter string, eachEntryFn func(dir string, entry *filer_pb.Entry)) (counter int, isTruncated bool, nextMarker string, err error) { + // invariants + // prefix and marker should be under dir, marker may contain "/" + // maxKeys should be updated for each recursion + + if prefix == "/" && delimiter == "/" { + return + } + if maxKeys <= 0 { + return + } + + if strings.Contains(marker, "/") { + sepIndex := strings.Index(marker, "/") + subDir, subMarker := marker[0:sepIndex], marker[sepIndex+1:] + // println("doListFilerEntries dir", dir+"/"+subDir, "subMarker", subMarker, "maxKeys", maxKeys) + subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+subDir, "", maxKeys, subMarker, delimiter, eachEntryFn) + if subErr != nil { + err = subErr + return + } + isTruncated = isTruncated || subIsTruncated + maxKeys -= subCounter + nextMarker = subDir + "/" + subNextMarker + // finished processing this sub directory + marker = subDir + } + + // now marker is also a direct child of dir + request := &filer_pb.ListEntriesRequest{ + Directory: dir, + Prefix: prefix, + Limit: uint32(maxKeys + 1), + StartFromFileName: marker, + InclusiveStartFrom: false, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, listErr := client.ListEntries(ctx, request) + if listErr != nil { + err = fmt.Errorf("list entires %+v: %v", request, listErr) + return + } + + for { + resp, recvErr := stream.Recv() + if recvErr != nil { + if recvErr == io.EOF { + break + } else { + err = fmt.Errorf("iterating entires %+v: %v", request, recvErr) + return + } + } + if counter >= maxKeys { + isTruncated = true + return + } + entry := resp.Entry + nextMarker = entry.Name + if entry.IsDirectory { + // println("ListEntries", dir, "dir:", entry.Name) + if entry.Name != ".uploads" { // FIXME no need to apply to all directories. this extra also affects maxKeys + if delimiter != "/" { + eachEntryFn(dir, entry) + // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter) + subCounter, subIsTruncated, subNextMarker, subErr := s3a.doListFilerEntries(client, dir+"/"+entry.Name, "", maxKeys-counter, "", delimiter, eachEntryFn) + if subErr != nil { + err = fmt.Errorf("doListFilerEntries2: %v", subErr) + return + } + // println("doListFilerEntries2 dir", dir+"/"+entry.Name, "maxKeys", maxKeys-counter, "subCounter", subCounter, "subNextMarker", subNextMarker, "subIsTruncated", subIsTruncated) + counter += subCounter + nextMarker = entry.Name + "/" + subNextMarker + if subIsTruncated { + isTruncated = true + return + } + } else { + var isEmpty bool + if !s3a.option.AllowEmptyFolder { + if isEmpty, err = s3a.isDirectoryAllEmpty(client, dir, entry.Name); err != nil { + glog.Errorf("check empty folder %s: %v", dir, err) + } + } + if !isEmpty { + eachEntryFn(dir, entry) + counter++ + } + } + } + } else { + // println("ListEntries", dir, "file:", entry.Name) + eachEntryFn(dir, entry) + counter++ + } + } + return +} + func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int) { prefix = values.Get("prefix") token = values.Get("continuation-token") @@ -203,3 +308,57 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, } return } + +func (s3a *S3ApiServer) isDirectoryAllEmpty(filerClient filer_pb.SeaweedFilerClient, parentDir, name string) (isEmpty bool, err error) { + // println("+ isDirectoryAllEmpty", dir, name) + glog.V(4).Infof("+ isEmpty %s/%s", parentDir, name) + defer glog.V(4).Infof("- isEmpty %s/%s %v", parentDir, name, isEmpty) + var fileCounter int + var subDirs []string + currentDir := parentDir + "/" + name + var startFrom string + var isExhausted bool + var foundEntry bool + for fileCounter == 0 && !isExhausted && err == nil { + err = filer_pb.SeaweedList(filerClient, currentDir, "", func(entry *filer_pb.Entry, isLast bool) error { + foundEntry = true + if entry.IsDirectory { + subDirs = append(subDirs, entry.Name) + } else { + fileCounter++ + } + startFrom = entry.Name + isExhausted = isExhausted || isLast + glog.V(4).Infof(" * %s/%s isLast: %t", currentDir, startFrom, isLast) + return nil + }, startFrom, false, 8) + if !foundEntry { + break + } + } + + if err != nil { + return false, err + } + + if fileCounter > 0 { + return false, nil + } + + for _, subDir := range subDirs { + isSubEmpty, subErr := s3a.isDirectoryAllEmpty(filerClient, currentDir, subDir) + if subErr != nil { + return false, subErr + } + if !isSubEmpty { + return false, nil + } + } + + glog.V(1).Infof("deleting empty folder %s", currentDir) + if err = doDeleteEntry(filerClient, parentDir, name, true, true); err != nil { + return + } + + return true, nil +} diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index edf634444..54df29492 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -1,30 +1,43 @@ package s3api import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "net/http" + "strings" + "time" + "github.com/gorilla/mux" "google.golang.org/grpc" - "net/http" ) type S3ApiServerOption struct { Filer string + Port int FilerGrpcAddress string + Config string DomainName string BucketsPath string GrpcDialOption grpc.DialOption + AllowEmptyFolder bool } type S3ApiServer struct { option *S3ApiServerOption + iam *IdentityAccessManagement } func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer *S3ApiServer, err error) { s3ApiServer = &S3ApiServer{ option: option, + iam: NewIdentityAccessManagement(option), } s3ApiServer.registerRouter(router) + go s3ApiServer.subscribeMetaEvents("s3", filer.IamConfigDirecotry+"/"+filer.IamIdentityFile, time.Now().UnixNano()) + return s3ApiServer, nil } @@ -33,55 +46,70 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { apiRouter := router.PathPrefix("/").Subrouter() var routers []*mux.Router if s3a.option.DomainName != "" { - routers = append(routers, apiRouter.Host("{bucket:.+}."+s3a.option.DomainName).Subrouter()) + domainNames := strings.Split(s3a.option.DomainName, ",") + for _, domainName := range domainNames { + routers = append(routers, apiRouter.Host( + fmt.Sprintf("%s.%s:%d", "{bucket:.+}", domainName, s3a.option.Port)).Subrouter()) + routers = append(routers, apiRouter.Host( + fmt.Sprintf("%s.%s", "{bucket:.+}", domainName)).Subrouter()) + } } routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter()) for _, bucket := range routers { // HeadObject - bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(s3a.HeadObjectHandler) + bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET")) // HeadBucket - bucket.Methods("HEAD").HandlerFunc(s3a.HeadBucketHandler) + bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_ADMIN), "GET")) + // CopyObjectPart + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // PutObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // CompleteMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploadId", "{uploadId:.*}") // NewMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(s3a.NewMultipartUploadHandler).Queries("uploads", "") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploads", "") // AbortMultipartUpload - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE), "DELETE")).Queries("uploadId", "{uploadId:.*}") // ListObjectParts - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.ListObjectPartsHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_READ), "GET")).Queries("uploadId", "{uploadId:.*}") // ListMultipartUploads - bucket.Methods("GET").HandlerFunc(s3a.ListMultipartUploadsHandler).Queries("uploads", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_READ), "GET")).Queries("uploads", "") + // GetObjectTagging + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectTaggingHandler, ACTION_READ), "GET")).Queries("tagging", "") + // PutObjectTagging + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectTaggingHandler, ACTION_TAGGING), "PUT")).Queries("tagging", "") + // DeleteObjectTagging + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING), "DELETE")).Queries("tagging", "") + + // CopyObject + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY")) // PutObject - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(s3a.PutObjectHandler) + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE), "PUT")) // PutBucket - bucket.Methods("PUT").HandlerFunc(s3a.PutBucketHandler) + bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketHandler, ACTION_ADMIN), "PUT")) // DeleteObject - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(s3a.DeleteObjectHandler) + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE), "DELETE")) // DeleteBucket - bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketHandler) + bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE")) // ListObjectsV2 - bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV2Handler).Queries("list-type", "2") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_LIST), "LIST")).Queries("list-type", "2") // GetObject, but directory listing is not supported - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(s3a.GetObjectHandler) + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET")) // ListObjectsV1 (Legacy) - bucket.Methods("GET").HandlerFunc(s3a.ListObjectsV1Handler) + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST")) + + // PostPolicy + bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST")) // DeleteMultipleObjects - bucket.Methods("POST").HandlerFunc(s3a.DeleteMultipleObjectsHandler).Queries("delete", "") + bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE), "DELETE")).Queries("delete", "") /* - // CopyObject - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectHandler) - - // CopyObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(s3a.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // not implemented // GetBucketLocation @@ -96,14 +124,12 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { bucket.Methods("PUT").HandlerFunc(s3a.PutBucketPolicyHandler).Queries("policy", "") // DeleteBucketPolicy bucket.Methods("DELETE").HandlerFunc(s3a.DeleteBucketPolicyHandler).Queries("policy", "") - // PostPolicy - bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(s3a.PostPolicyBucketHandler) */ } // ListBuckets - apiRouter.Methods("GET").Path("/").HandlerFunc(s3a.ListBucketsHandler) + apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.ListBucketsHandler, "LIST")) // NotFound apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler) diff --git a/weed/s3api/s3api_test.go b/weed/s3api/s3api_test.go new file mode 100644 index 000000000..026766beb --- /dev/null +++ b/weed/s3api/s3api_test.go @@ -0,0 +1,32 @@ +package s3api + +import ( + "testing" + "time" +) + +func TestCopyObjectResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + + response := CopyObjectResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} + +func TestCopyPartResponse(t *testing.T) { + + // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + + response := CopyPartResult{ + ETag: "12345678", + LastModified: time.Now(), + } + + println(string(encodeResponse(response))) + +} diff --git a/weed/s3api/s3err/s3-error.go b/weed/s3api/s3err/s3-error.go new file mode 100644 index 000000000..224378ec5 --- /dev/null +++ b/weed/s3api/s3err/s3-error.go @@ -0,0 +1,61 @@ +package s3err + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Non exhaustive list of AWS S3 standard error responses - +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist.", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + // Add new API errors here. +} diff --git a/weed/s3api/s3err/s3api_errors.go b/weed/s3api/s3err/s3api_errors.go new file mode 100644 index 000000000..a3f7bb25e --- /dev/null +++ b/weed/s3api/s3err/s3api_errors.go @@ -0,0 +1,359 @@ +package s3err + +import ( + "encoding/xml" + "fmt" + "net/http" +) + +// APIError structure +type APIError struct { + Code string + Description string + HTTPStatusCode int +} + +// RESTErrorResponse - error response format +type RESTErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string `xml:"Code" json:"Code"` + Message string `xml:"Message" json:"Message"` + Resource string `xml:"Resource" json:"Resource"` + RequestID string `xml:"RequestId" json:"RequestId"` + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// Error - Returns S3 error string. +func (e RESTErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } + return e.Message +} + +// ErrorCode type of error status. +type ErrorCode int + +// Error codes, see full list at http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +const ( + ErrNone ErrorCode = iota + ErrAccessDenied + ErrMethodNotAllowed + ErrBucketNotEmpty + ErrBucketAlreadyExists + ErrBucketAlreadyOwnedByYou + ErrNoSuchBucket + ErrNoSuchKey + ErrNoSuchUpload + ErrInvalidBucketName + ErrInvalidDigest + ErrInvalidMaxKeys + ErrInvalidMaxUploads + ErrInvalidMaxParts + ErrInvalidPartNumberMarker + ErrInvalidPart + ErrInternalError + ErrInvalidCopyDest + ErrInvalidCopySource + ErrInvalidTag + ErrAuthHeaderEmpty + ErrSignatureVersionNotSupported + ErrMalformedPOSTRequest + ErrPOSTFileRequired + ErrPostPolicyConditionInvalidFormat + ErrEntityTooSmall + ErrEntityTooLarge + ErrMissingFields + ErrMissingCredTag + ErrCredMalformed + ErrMalformedXML + ErrMalformedDate + ErrMalformedPresignedDate + ErrMalformedCredentialDate + ErrMissingSignHeadersTag + ErrMissingSignTag + ErrUnsignedHeaders + ErrInvalidQueryParams + ErrInvalidQuerySignatureAlgo + ErrExpiredPresignRequest + ErrMalformedExpires + ErrNegativeExpires + ErrMaximumExpires + ErrSignatureDoesNotMatch + ErrContentSHA256Mismatch + ErrInvalidAccessKeyID + ErrRequestNotReadyYet + ErrMissingDateHeader + ErrInvalidRequest + ErrNotImplemented + + ErrExistingObjectIsDirectory +) + +// error code to APIError structure, these fields carry respective +// descriptions for all the error responses. +var errorCodeResponse = map[ErrorCode]APIError{ + ErrAccessDenied: { + Code: "AccessDenied", + Description: "Access Denied.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMethodNotAllowed: { + Code: "MethodNotAllowed", + Description: "The specified method is not allowed against this resource.", + HTTPStatusCode: http.StatusMethodNotAllowed, + }, + ErrBucketNotEmpty: { + Code: "BucketNotEmpty", + Description: "The bucket you tried to delete is not empty", + HTTPStatusCode: http.StatusConflict, + }, + ErrBucketAlreadyExists: { + Code: "BucketAlreadyExists", + Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", + HTTPStatusCode: http.StatusConflict, + }, + ErrBucketAlreadyOwnedByYou: { + Code: "BucketAlreadyOwnedByYou", + Description: "Your previous request to create the named bucket succeeded and you already own it.", + HTTPStatusCode: http.StatusConflict, + }, + ErrInvalidBucketName: { + Code: "InvalidBucketName", + Description: "The specified bucket is not valid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidDigest: { + Code: "InvalidDigest", + Description: "The Content-Md5 you specified is not valid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxUploads: { + Code: "InvalidArgument", + Description: "Argument max-uploads must be an integer between 0 and 2147483647", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxKeys: { + Code: "InvalidArgument", + Description: "Argument maxKeys must be an integer between 0 and 2147483647", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxParts: { + Code: "InvalidArgument", + Description: "Argument max-parts must be an integer between 0 and 2147483647", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidPartNumberMarker: { + Code: "InvalidArgument", + Description: "Argument partNumberMarker must be an integer.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNoSuchBucket: { + Code: "NoSuchBucket", + Description: "The specified bucket does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchKey: { + Code: "NoSuchKey", + Description: "The specified key does not exist.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchUpload: { + Code: "NoSuchUpload", + Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrInternalError: { + Code: "InternalError", + Description: "We encountered an internal error, please try again.", + HTTPStatusCode: http.StatusInternalServerError, + }, + + ErrInvalidPart: { + Code: "InvalidPart", + Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrInvalidCopyDest: { + Code: "InvalidRequest", + Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCopySource: { + Code: "InvalidArgument", + Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidTag: { + Code: "InvalidArgument", + Description: "The Tag value you have provided is invalid", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedXML: { + Code: "MalformedXML", + Description: "The XML you provided was not well-formed or did not validate against our published schema.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrAuthHeaderEmpty: { + Code: "InvalidArgument", + Description: "Authorization header is invalid -- one and only one ' ' (space) required.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSignatureVersionNotSupported: { + Code: "InvalidRequest", + Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedPOSTRequest: { + Code: "MalformedPOSTRequest", + Description: "The body of your POST request is not well-formed multipart/form-data.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrPOSTFileRequired: { + Code: "InvalidArgument", + Description: "POST requires exactly one file upload per request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrPostPolicyConditionInvalidFormat: { + Code: "PostPolicyInvalidKeyName", + Description: "Invalid according to Policy: Policy Condition failed", + HTTPStatusCode: http.StatusForbidden, + }, + ErrEntityTooSmall: { + Code: "EntityTooSmall", + Description: "Your proposed upload is smaller than the minimum allowed object size.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEntityTooLarge: { + Code: "EntityTooLarge", + Description: "Your proposed upload exceeds the maximum allowed object size.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingFields: { + Code: "MissingFields", + Description: "Missing fields in request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingCredTag: { + Code: "InvalidRequest", + Description: "Missing Credential field for this request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrCredMalformed: { + Code: "AuthorizationQueryParametersError", + Description: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedDate: { + Code: "MalformedDate", + Description: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedPresignedDate: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Date must be in the ISO8601 Long Format \"yyyyMMdd'T'HHmmss'Z'\"", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignHeadersTag: { + Code: "InvalidArgument", + Description: "Signature header missing SignedHeaders field.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignTag: { + Code: "AccessDenied", + Description: "Signature header missing Signature field.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrUnsignedHeaders: { + Code: "AccessDenied", + Description: "There were headers present in the request which were not signed", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQueryParams: { + Code: "AuthorizationQueryParametersError", + Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQuerySignatureAlgo: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Algorithm only supports \"AWS4-HMAC-SHA256\".", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrExpiredPresignRequest: { + Code: "AccessDenied", + Description: "Request has expired", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMalformedExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires should be a number", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNegativeExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires must be non-negative", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMaximumExpires: { + Code: "AuthorizationQueryParametersError", + Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrInvalidAccessKeyID: { + Code: "InvalidAccessKeyId", + Description: "The access key ID you provided does not exist in our records.", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrRequestNotReadyYet: { + Code: "AccessDenied", + Description: "Request is not valid yet", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrSignatureDoesNotMatch: { + Code: "SignatureDoesNotMatch", + Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + HTTPStatusCode: http.StatusForbidden, + }, + + ErrContentSHA256Mismatch: { + Code: "XAmzContentSHA256Mismatch", + Description: "The provided 'x-amz-content-sha256' header does not match what was computed.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingDateHeader: { + Code: "AccessDenied", + Description: "AWS authentication requires a valid Date or x-amz-date header", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRequest: { + Code: "InvalidRequest", + Description: "Invalid Request", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNotImplemented: { + Code: "NotImplemented", + Description: "A header you provided implies functionality that is not implemented", + HTTPStatusCode: http.StatusNotImplemented, + }, + ErrExistingObjectIsDirectory: { + Code: "ExistingObjectIsDirectory", + Description: "Existing Object is a directory.", + HTTPStatusCode: http.StatusConflict, + }, +} + +// GetAPIError provides API Error for input API error code. +func GetAPIError(code ErrorCode) APIError { + return errorCodeResponse[code] +} diff --git a/weed/s3api/stats.go b/weed/s3api/stats.go new file mode 100644 index 000000000..b667b32a0 --- /dev/null +++ b/weed/s3api/stats.go @@ -0,0 +1,38 @@ +package s3api + +import ( + stats_collect "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" + "net/http" + "strconv" + "time" +) + +type StatusRecorder struct { + http.ResponseWriter + Status int +} + +func NewStatusResponseWriter(w http.ResponseWriter) *StatusRecorder { + return &StatusRecorder{w, http.StatusOK} +} + +func (r *StatusRecorder) WriteHeader(status int) { + r.Status = status + r.ResponseWriter.WriteHeader(status) +} + +func (r *StatusRecorder) Flush() { + r.ResponseWriter.(http.Flusher).Flush() +} + +func track(f http.HandlerFunc, action string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Server", "SeaweedFS S3 "+util.VERSION) + recorder := NewStatusResponseWriter(w) + start := time.Now() + f(recorder, r) + stats_collect.S3RequestHistogram.WithLabelValues(action).Observe(time.Since(start).Seconds()) + stats_collect.S3RequestCounter.WithLabelValues(action, strconv.Itoa(recorder.Status)).Inc() + } +} diff --git a/weed/s3api/tags.go b/weed/s3api/tags.go new file mode 100644 index 000000000..9ff7d1fba --- /dev/null +++ b/weed/s3api/tags.go @@ -0,0 +1,38 @@ +package s3api + +import ( + "encoding/xml" +) + +type Tag struct { + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +type TagSet struct { + Tag []Tag `xml:"Tag"` +} + +type Tagging struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging"` + TagSet TagSet `xml:"TagSet"` +} + +func (t *Tagging) ToTags() map[string]string { + output := make(map[string]string) + for _, tag := range t.TagSet.Tag { + output[tag.Key] = tag.Value + } + return output +} + +func FromTags(tags map[string]string) (t *Tagging) { + t = &Tagging{} + for k, v := range tags { + t.TagSet.Tag = append(t.TagSet.Tag, Tag{ + Key: k, + Value: v, + }) + } + return +} diff --git a/weed/s3api/tags_test.go b/weed/s3api/tags_test.go new file mode 100644 index 000000000..887843d6f --- /dev/null +++ b/weed/s3api/tags_test.go @@ -0,0 +1,50 @@ +package s3api + +import ( + "encoding/xml" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestXMLUnmarshall(t *testing.T) { + + input := ` + + + + key1 + value1 + + + +` + + tags := &Tagging{} + + xml.Unmarshal([]byte(input), tags) + + assert.Equal(t, len(tags.TagSet.Tag), 1) + assert.Equal(t, tags.TagSet.Tag[0].Key, "key1") + assert.Equal(t, tags.TagSet.Tag[0].Value, "value1") + +} + +func TestXMLMarshall(t *testing.T) { + tags := &Tagging{ + TagSet: TagSet{ + []Tag{ + { + Key: "key1", + Value: "value1", + }, + }, + }, + } + + actual := string(encodeResponse(tags)) + + expected := ` +key1value1` + assert.Equal(t, expected, actual) + +} diff --git a/weed/security/guard.go b/weed/security/guard.go index 17fe2ea9e..87ec91ec1 100644 --- a/weed/security/guard.go +++ b/weed/security/guard.go @@ -62,7 +62,7 @@ func NewGuard(whiteList []string, signingKey string, expiresAfterSec int, readSi return g } -func (g *Guard) WhiteList(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) { +func (g *Guard) WhiteList(f http.HandlerFunc) http.HandlerFunc { if !g.isWriteActive { //if no security needed, just skip all checking return f diff --git a/weed/security/tls.go b/weed/security/tls.go index e81ba4831..7d3ffcdca 100644 --- a/weed/security/tls.go +++ b/weed/security/tls.go @@ -1,31 +1,46 @@ package security import ( + "context" "crypto/tls" "crypto/x509" - "github.com/spf13/viper" + "github.com/chrislusf/seaweedfs/weed/util" + grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" "io/ioutil" + "strings" - "github.com/chrislusf/seaweedfs/weed/glog" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + + "github.com/chrislusf/seaweedfs/weed/glog" ) -func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { +type Authenticator struct { + AllowedWildcardDomain string + AllowedCommonNames map[string]bool +} + +func LoadServerTLS(config *util.ViperProxy, component string) (grpc.ServerOption, grpc.ServerOption) { if config == nil { - return nil + return nil, nil } // load cert/key, ca cert cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) if err != nil { - glog.Errorf("load cert/key error: %v", err) - return nil + glog.V(1).Infof("load cert: %s / key: %s error: %v", + config.GetString(component+".cert"), + config.GetString(component+".key"), + err) + return nil, nil } - caCert, err := ioutil.ReadFile(config.GetString("ca")) + caCert, err := ioutil.ReadFile(config.GetString("grpc.ca")) if err != nil { - glog.Errorf("read ca cert file error: %v", err) - return nil + glog.V(1).Infof("read ca cert file %s error: %v", config.GetString("grpc.ca"), err) + return nil, nil } caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) @@ -35,23 +50,41 @@ func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption { ClientAuth: tls.RequireAndVerifyClientCert, }) - return grpc.Creds(ta) + allowedCommonNames := config.GetString(component + ".allowed_commonNames") + allowedWildcardDomain := config.GetString("grpc.allowed_wildcard_domain") + if allowedCommonNames != "" || allowedWildcardDomain != "" { + allowedCommonNamesMap := make(map[string]bool) + for _, s := range strings.Split(allowedCommonNames, ",") { + allowedCommonNamesMap[s] = true + } + auther := Authenticator{ + AllowedCommonNames: allowedCommonNamesMap, + AllowedWildcardDomain: allowedWildcardDomain, + } + return grpc.Creds(ta), grpc.UnaryInterceptor(grpc_auth.UnaryServerInterceptor(auther.Authenticate)) + } + return grpc.Creds(ta), nil } -func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { +func LoadClientTLS(config *util.ViperProxy, component string) grpc.DialOption { if config == nil { return grpc.WithInsecure() } + certFileName, keyFileName, caFileName := config.GetString(component+".cert"), config.GetString(component+".key"), config.GetString("grpc.ca") + if certFileName == "" || keyFileName == "" || caFileName == "" { + return grpc.WithInsecure() + } + // load cert/key, cacert - cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key")) + cert, err := tls.LoadX509KeyPair(certFileName, keyFileName) if err != nil { - glog.Errorf("load cert/key error: %v", err) + glog.V(1).Infof("load cert/key error: %v", err) return grpc.WithInsecure() } - caCert, err := ioutil.ReadFile(config.GetString("ca")) + caCert, err := ioutil.ReadFile(caFileName) if err != nil { - glog.Errorf("read ca cert file error: %v", err) + glog.V(1).Infof("read ca cert file error: %v", err) return grpc.WithInsecure() } caCertPool := x509.NewCertPool() @@ -64,3 +97,28 @@ func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption { }) return grpc.WithTransportCredentials(ta) } + +func (a Authenticator) Authenticate(ctx context.Context) (newCtx context.Context, err error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Error(codes.Unauthenticated, "no peer found") + } + + tlsAuth, ok := p.AuthInfo.(credentials.TLSInfo) + if !ok { + return ctx, status.Error(codes.Unauthenticated, "unexpected peer transport credentials") + } + if len(tlsAuth.State.VerifiedChains) == 0 || len(tlsAuth.State.VerifiedChains[0]) == 0 { + return ctx, status.Error(codes.Unauthenticated, "could not verify peer certificate") + } + + commonName := tlsAuth.State.VerifiedChains[0][0].Subject.CommonName + if a.AllowedWildcardDomain != "" && strings.HasSuffix(commonName, a.AllowedWildcardDomain) { + return ctx, nil + } + if _, ok := a.AllowedCommonNames[commonName]; ok { + return ctx, nil + } + + return ctx, status.Errorf(codes.Unauthenticated, "invalid subject common name: %s", commonName) +} diff --git a/weed/sequence/snowflake_sequencer.go b/weed/sequence/snowflake_sequencer.go new file mode 100644 index 000000000..300449fa0 --- /dev/null +++ b/weed/sequence/snowflake_sequencer.go @@ -0,0 +1,46 @@ +package sequence + +import ( + "fmt" + "hash/fnv" + + "github.com/bwmarrin/snowflake" + "github.com/chrislusf/seaweedfs/weed/glog" +) + +// a simple snowflake Sequencer +type SnowflakeSequencer struct { + node *snowflake.Node +} + +func NewSnowflakeSequencer(nodeid string) (*SnowflakeSequencer, error) { + nodeid_hash := hash(nodeid) & 0x3ff + glog.V(0).Infof("use snowflake seq id generator, nodeid:%s hex_of_nodeid: %x", nodeid, nodeid_hash) + node, err := snowflake.NewNode(int64(nodeid_hash)) + if err != nil { + fmt.Println(err) + return nil, err + } + + sequencer := &SnowflakeSequencer{node: node} + return sequencer, nil +} + +func hash(s string) uint32 { + h := fnv.New32a() + h.Write([]byte(s)) + return h.Sum32() +} + +func (m *SnowflakeSequencer) NextFileId(count uint64) uint64 { + return uint64(m.node.Generate().Int64()) +} + +// ignore setmax as we are snowflake +func (m *SnowflakeSequencer) SetMax(seenValue uint64) { +} + +// return a new id as no Peek is stored +func (m *SnowflakeSequencer) Peek() uint64 { + return uint64(m.node.Generate().Int64()) +} diff --git a/weed/server/common.go b/weed/server/common.go index 888ddec49..5c5f1b8eb 100644 --- a/weed/server/common.go +++ b/weed/server/common.go @@ -1,10 +1,11 @@ package weed_server import ( - "bytes" "encoding/json" "errors" "fmt" + "io" + "mime/multipart" "net/http" "path/filepath" "strconv" @@ -37,14 +38,22 @@ func init() { func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) (err error) { var bytes []byte - if r.FormValue("pretty") != "" { - bytes, err = json.MarshalIndent(obj, "", " ") - } else { - bytes, err = json.Marshal(obj) + if obj != nil { + if r.FormValue("pretty") != "" { + bytes, err = json.MarshalIndent(obj, "", " ") + } else { + bytes, err = json.Marshal(obj) + } } if err != nil { return } + + if httpStatus >= 400 { + glog.V(0).Infof("response method:%s URL:%s with httpStatus:%d and JSON:%s", + r.Method, r.URL.String(), httpStatus, string(bytes)) + } + callback := r.FormValue("callback") if callback == "" { w.Header().Set("Content-Type", "application/json") @@ -77,7 +86,8 @@ func writeJson(w http.ResponseWriter, r *http.Request, httpStatus int, obj inter // wrapper for writeJson - just logs errors func writeJsonQuiet(w http.ResponseWriter, r *http.Request, httpStatus int, obj interface{}) { if err := writeJson(w, r, httpStatus, obj); err != nil { - glog.V(0).Infof("error writing JSON %+v status %d: %v", obj, httpStatus, err) + glog.V(0).Infof("error writing JSON status %d: %v", httpStatus, err) + glog.V(1).Infof("JSON content: %+v", obj) } } func writeJsonError(w http.ResponseWriter, r *http.Request, httpStatus int, err error) { @@ -90,7 +100,7 @@ func debug(params ...interface{}) { glog.V(4).Infoln(params...) } -func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string, grpcDialOption grpc.DialOption) { +func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterFn operation.GetMasterFn, grpcDialOption grpc.DialOption) { m := make(map[string]interface{}) if r.Method != "POST" { writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!")) @@ -98,13 +108,13 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st } debug("parsing upload file...") - fname, data, mimeType, pairMap, isGzipped, originalDataSize, lastModified, _, _, pe := needle.ParseUpload(r) + pu, pe := needle.ParseUpload(r, 256*1024*1024) if pe != nil { writeJsonError(w, r, http.StatusBadRequest, pe) return } - debug("assigning file id for", fname) + debug("assigning file id for", pu.FileName) r.ParseForm() count := uint64(1) if r.FormValue("count") != "" { @@ -117,32 +127,34 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st ar := &operation.VolumeAssignRequest{ Count: count, DataCenter: r.FormValue("dataCenter"), + Rack: r.FormValue("rack"), Replication: r.FormValue("replication"), Collection: r.FormValue("collection"), Ttl: r.FormValue("ttl"), + DiskType: r.FormValue("disk"), } - assignResult, ae := operation.Assign(masterUrl, grpcDialOption, ar) + assignResult, ae := operation.Assign(masterFn, grpcDialOption, ar) if ae != nil { writeJsonError(w, r, http.StatusInternalServerError, ae) return } url := "http://" + assignResult.Url + "/" + assignResult.Fid - if lastModified != 0 { - url = url + "?ts=" + strconv.FormatUint(lastModified, 10) + if pu.ModifiedTime != 0 { + url = url + "?ts=" + strconv.FormatUint(pu.ModifiedTime, 10) } debug("upload file to store", url) - uploadResult, err := operation.Upload(url, fname, bytes.NewReader(data), isGzipped, mimeType, pairMap, assignResult.Auth) + uploadResult, err := operation.UploadData(url, pu.FileName, false, pu.Data, pu.IsGzipped, pu.MimeType, pu.PairMap, assignResult.Auth) if err != nil { writeJsonError(w, r, http.StatusInternalServerError, err) return } - m["fileName"] = fname + m["fileName"] = pu.FileName m["fid"] = assignResult.Fid m["fileUrl"] = assignResult.PublicUrl + "/" + assignResult.Fid - m["size"] = originalDataSize + m["size"] = pu.OriginalDataSize m["eTag"] = uploadResult.ETag writeJsonQuiet(w, r, http.StatusCreated, m) return @@ -183,19 +195,19 @@ func parseURLPath(path string) (vid, fid, filename, ext string, isVolumeIdOnly b func statsHealthHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() writeJsonQuiet(w, r, http.StatusOK, m) } func statsCounterHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() m["Counters"] = serverStats writeJsonQuiet(w, r, http.StatusOK, m) } func statsMemoryHandler(w http.ResponseWriter, r *http.Request) { m := make(map[string]interface{}) - m["Version"] = util.VERSION + m["Version"] = util.Version() m["Memory"] = stats.MemStat() writeJsonQuiet(w, r, http.StatusOK, m) } @@ -209,3 +221,106 @@ func handleStaticResources2(r *mux.Router) { r.Handle("/favicon.ico", http.FileServer(statikFS)) r.PathPrefix("/seaweedfsstatic/").Handler(http.StripPrefix("/seaweedfsstatic", http.FileServer(statikFS))) } + +func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, filename string) { + if filename != "" { + contentDisposition := "inline" + if r.FormValue("dl") != "" { + if dl, _ := strconv.ParseBool(r.FormValue("dl")); dl { + contentDisposition = "attachment" + } + } + w.Header().Set("Content-Disposition", contentDisposition+`; filename="`+fileNameEscaper.Replace(filename)+`"`) + } +} + +func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) { + rangeReq := r.Header.Get("Range") + + if rangeReq == "" { + w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) + if err := writeFn(w, 0, totalSize); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return + } + + //the rest is dealing with partial content request + //mostly copy from src/pkg/net/http/fs.go + ranges, err := parseRange(rangeReq, totalSize) + if err != nil { + http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) + return + } + if sumRangesSize(ranges) > totalSize { + // The total number of bytes in all the ranges + // is larger than the size of the file by + // itself, so this is probably an attack, or a + // dumb client. Ignore the range request. + return + } + if len(ranges) == 0 { + return + } + if len(ranges) == 1 { + // RFC 2616, Section 14.16: + // "When an HTTP message includes the content of a single + // range (for example, a response to a request for a + // single range, or to a request for a set of ranges + // that overlap without any holes), this content is + // transmitted with a Content-Range header, and a + // Content-Length header showing the number of bytes + // actually transferred. + // ... + // A response to a request for a single range MUST NOT + // be sent using the multipart/byteranges media type." + ra := ranges[0] + w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) + w.Header().Set("Content-Range", ra.contentRange(totalSize)) + + err = writeFn(w, ra.start, ra.length) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + return + } + + // process multiple ranges + for _, ra := range ranges { + if ra.start > totalSize { + http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) + return + } + } + sendSize := rangesMIMESize(ranges, mimeType, totalSize) + pr, pw := io.Pipe() + mw := multipart.NewWriter(pw) + w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) + sendContent := pr + defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. + go func() { + for _, ra := range ranges { + part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) + if e != nil { + pw.CloseWithError(e) + return + } + if e = writeFn(part, ra.start, ra.length); e != nil { + pw.CloseWithError(e) + return + } + } + mw.Close() + pw.Close() + }() + if w.Header().Get("Content-Encoding") == "" { + w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) + } + w.WriteHeader(http.StatusPartialContent) + if _, err := io.CopyN(w, sendContent, sendSize); err != nil { + http.Error(w, "Internal Error", http.StatusInternalServerError) + return + } +} diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go index a84feec2d..3821de6a9 100644 --- a/weed/server/filer_grpc_server.go +++ b/weed/server/filer_grpc_server.go @@ -6,89 +6,98 @@ import ( "os" "path/filepath" "strconv" - "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) LookupDirectoryEntry(ctx context.Context, req *filer_pb.LookupDirectoryEntryRequest) (*filer_pb.LookupDirectoryEntryResponse, error) { - entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name)))) + glog.V(4).Infof("LookupDirectoryEntry %s", filepath.Join(req.Directory, req.Name)) + + entry, err := fs.filer.FindEntry(ctx, util.JoinPath(req.Directory, req.Name)) + if err == filer_pb.ErrNotFound { + return &filer_pb.LookupDirectoryEntryResponse{}, err + } if err != nil { + glog.V(3).Infof("LookupDirectoryEntry %s: %+v, ", filepath.Join(req.Directory, req.Name), err) return nil, err } return &filer_pb.LookupDirectoryEntryResponse{ Entry: &filer_pb.Entry{ - Name: req.Name, - IsDirectory: entry.IsDirectory(), - Attributes: filer2.EntryAttributeToPb(entry), - Chunks: entry.Chunks, - Extended: entry.Extended, + Name: req.Name, + IsDirectory: entry.IsDirectory(), + Attributes: filer.EntryAttributeToPb(entry), + Chunks: entry.Chunks, + Extended: entry.Extended, + HardLinkId: entry.HardLinkId, + HardLinkCounter: entry.HardLinkCounter, + Content: entry.Content, }, }, nil } -func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) error { +func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream filer_pb.SeaweedFiler_ListEntriesServer) (err error) { + + glog.V(4).Infof("ListEntries %v", req) limit := int(req.Limit) if limit == 0 { limit = fs.option.DirListingLimit } - paginationLimit := filer2.PaginationSize + paginationLimit := filer.PaginationSize if limit < paginationLimit { paginationLimit = limit } lastFileName := req.StartFromFileName includeLastFile := req.InclusiveStartFrom + var listErr error for limit > 0 { - entries, err := fs.filer.ListDirectoryEntries(stream.Context(), filer2.FullPath(req.Directory), lastFileName, includeLastFile, paginationLimit) - if err != nil { - return err - } - if len(entries) == 0 { - return nil - } - - includeLastFile = false - - for _, entry := range entries { - - lastFileName = entry.Name() - - if req.Prefix != "" { - if !strings.HasPrefix(entry.Name(), req.Prefix) { - continue - } - } - - if err := stream.Send(&filer_pb.ListEntriesResponse{ + var hasEntries bool + lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", "", func(entry *filer.Entry) bool { + hasEntries = true + if err = stream.Send(&filer_pb.ListEntriesResponse{ Entry: &filer_pb.Entry{ - Name: entry.Name(), - IsDirectory: entry.IsDirectory(), - Chunks: entry.Chunks, - Attributes: filer2.EntryAttributeToPb(entry), - Extended: entry.Extended, + Name: entry.Name(), + IsDirectory: entry.IsDirectory(), + Chunks: entry.Chunks, + Attributes: filer.EntryAttributeToPb(entry), + Extended: entry.Extended, + HardLinkId: entry.HardLinkId, + HardLinkCounter: entry.HardLinkCounter, + Content: entry.Content, }, }); err != nil { - return err + return false } + limit-- if limit == 0 { - return nil + return false } - } + return true + }) - if len(entries) < paginationLimit { - break + if listErr != nil { + return listErr + } + if err != nil { + return err } + if !hasEntries { + return nil + } + + includeLastFile = false } @@ -126,46 +135,75 @@ func (fs *FilerServer) LookupVolume(ctx context.Context, req *filer_pb.LookupVol return resp, nil } +func (fs *FilerServer) lookupFileId(fileId string) (targetUrls []string, err error) { + fid, err := needle.ParseFileIdFromString(fileId) + if err != nil { + return nil, err + } + locations, found := fs.filer.MasterClient.GetLocations(uint32(fid.VolumeId)) + if !found || len(locations) == 0 { + return nil, fmt.Errorf("not found volume %d in %s", fid.VolumeId, fileId) + } + for _, loc := range locations { + targetUrls = append(targetUrls, fmt.Sprintf("http://%s/%s", loc.Url, fileId)) + } + return +} + func (fs *FilerServer) CreateEntry(ctx context.Context, req *filer_pb.CreateEntryRequest) (resp *filer_pb.CreateEntryResponse, err error) { - fullpath := filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))) - chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) + glog.V(4).Infof("CreateEntry %v/%v", req.Directory, req.Entry.Name) - if req.Entry.Attributes == nil { - return nil, fmt.Errorf("can not create entry with empty attributes") - } + resp = &filer_pb.CreateEntryResponse{} - err = fs.filer.CreateEntry(ctx, &filer2.Entry{ - FullPath: fullpath, - Attr: filer2.PbToEntryAttribute(req.Entry.Attributes), - Chunks: chunks, - }) + chunks, garbage, err2 := fs.cleanupChunks(util.Join(req.Directory, req.Entry.Name), nil, req.Entry) + if err2 != nil { + return &filer_pb.CreateEntryResponse{}, fmt.Errorf("CreateEntry cleanupChunks %s %s: %v", req.Directory, req.Entry.Name, err2) + } - if err == nil { - fs.filer.DeleteChunks(garbages) + createErr := fs.filer.CreateEntry(ctx, &filer.Entry{ + FullPath: util.JoinPath(req.Directory, req.Entry.Name), + Attr: filer.PbToEntryAttribute(req.Entry.Attributes), + Chunks: chunks, + Extended: req.Entry.Extended, + HardLinkId: filer.HardLinkId(req.Entry.HardLinkId), + HardLinkCounter: req.Entry.HardLinkCounter, + Content: req.Entry.Content, + }, req.OExcl, req.IsFromOtherCluster, req.Signatures) + + if createErr == nil { + fs.filer.DeleteChunks(garbage) + } else { + glog.V(3).Infof("CreateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), createErr) + resp.Error = createErr.Error() } - return &filer_pb.CreateEntryResponse{}, err + return } func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntryRequest) (*filer_pb.UpdateEntryResponse, error) { - fullpath := filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name)) - entry, err := fs.filer.FindEntry(ctx, filer2.FullPath(fullpath)) + glog.V(4).Infof("UpdateEntry %v", req) + + fullpath := util.Join(req.Directory, req.Entry.Name) + entry, err := fs.filer.FindEntry(ctx, util.FullPath(fullpath)) if err != nil { return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("not found %s: %v", fullpath, err) } - // remove old chunks if not included in the new ones - unusedChunks := filer2.MinusChunks(entry.Chunks, req.Entry.Chunks) - - chunks, garbages := filer2.CompactFileChunks(req.Entry.Chunks) + chunks, garbage, err2 := fs.cleanupChunks(fullpath, entry, req.Entry) + if err2 != nil { + return &filer_pb.UpdateEntryResponse{}, fmt.Errorf("UpdateEntry cleanupChunks %s: %v", fullpath, err2) + } - newEntry := &filer2.Entry{ - FullPath: filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Entry.Name))), - Attr: entry.Attr, - Extended: req.Entry.Extended, - Chunks: chunks, + newEntry := &filer.Entry{ + FullPath: util.JoinPath(req.Directory, req.Entry.Name), + Attr: entry.Attr, + Extended: req.Entry.Extended, + Chunks: chunks, + HardLinkId: filer.HardLinkId(req.Entry.HardLinkId), + HardLinkCounter: req.Entry.HardLinkCounter, + Content: req.Entry.Content, } glog.V(3).Infof("updating %s: %+v, chunks %d: %v => %+v, chunks %d: %v, extended: %v => %v", @@ -188,76 +226,166 @@ func (fs *FilerServer) UpdateEntry(ctx context.Context, req *filer_pb.UpdateEntr } - if filer2.EqualEntry(entry, newEntry) { + if filer.EqualEntry(entry, newEntry) { return &filer_pb.UpdateEntryResponse{}, err } if err = fs.filer.UpdateEntry(ctx, entry, newEntry); err == nil { - fs.filer.DeleteChunks(unusedChunks) - fs.filer.DeleteChunks(garbages) - } + fs.filer.DeleteChunks(garbage) + + fs.filer.NotifyUpdateEvent(ctx, entry, newEntry, true, req.IsFromOtherCluster, req.Signatures) - fs.filer.NotifyUpdateEvent(entry, newEntry, true) + } else { + glog.V(3).Infof("UpdateEntry %s: %v", filepath.Join(req.Directory, req.Entry.Name), err) + } return &filer_pb.UpdateEntryResponse{}, err } -func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { - err = fs.filer.DeleteEntryMetaAndData(ctx, filer2.FullPath(filepath.ToSlash(filepath.Join(req.Directory, req.Name))), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData) - return &filer_pb.DeleteEntryResponse{}, err -} +func (fs *FilerServer) cleanupChunks(fullpath string, existingEntry *filer.Entry, newEntry *filer_pb.Entry) (chunks, garbage []*filer_pb.FileChunk, err error) { -func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) { + // remove old chunks if not included in the new ones + if existingEntry != nil { + garbage, err = filer.MinusChunks(fs.lookupFileId, existingEntry.Chunks, newEntry.Chunks) + if err != nil { + return newEntry.Chunks, nil, fmt.Errorf("MinusChunks: %v", err) + } + } - ttlStr := "" - if req.TtlSec > 0 { - ttlStr = strconv.Itoa(int(req.TtlSec)) + // files with manifest chunks are usually large and append only, skip calculating covered chunks + manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(newEntry.Chunks) + + chunks, coveredChunks := filer.CompactFileChunks(fs.lookupFileId, nonManifestChunks) + garbage = append(garbage, coveredChunks...) + + if newEntry.Attributes != nil { + so := fs.detectStorageOption(fullpath, + newEntry.Attributes.Collection, + newEntry.Attributes.Replication, + newEntry.Attributes.TtlSec, + newEntry.Attributes.DiskType, + "", + "", + ) + chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), chunks) + if err != nil { + // not good, but should be ok + glog.V(0).Infof("MaybeManifestize: %v", err) + } } - var altRequest *operation.VolumeAssignRequest + chunks = append(chunks, manifestChunks...) + + return +} - dataCenter := req.DataCenter - if dataCenter == "" { - dataCenter = fs.option.DataCenter +func (fs *FilerServer) AppendToEntry(ctx context.Context, req *filer_pb.AppendToEntryRequest) (*filer_pb.AppendToEntryResponse, error) { + + glog.V(4).Infof("AppendToEntry %v", req) + + fullpath := util.NewFullPath(req.Directory, req.EntryName) + var offset int64 = 0 + entry, err := fs.filer.FindEntry(ctx, fullpath) + if err == filer_pb.ErrNotFound { + entry = &filer.Entry{ + FullPath: fullpath, + Attr: filer.Attr{ + Crtime: time.Now(), + Mtime: time.Now(), + Mode: os.FileMode(0644), + Uid: OS_UID, + Gid: OS_GID, + }, + } + } else { + offset = int64(filer.TotalSize(entry.Chunks)) } - assignRequest := &operation.VolumeAssignRequest{ - Count: uint64(req.Count), - Replication: req.Replication, - Collection: req.Collection, - Ttl: ttlStr, - DataCenter: dataCenter, + for _, chunk := range req.Chunks { + chunk.Offset = offset + offset += int64(chunk.Size) } - if dataCenter != "" { - altRequest = &operation.VolumeAssignRequest{ - Count: uint64(req.Count), - Replication: req.Replication, - Collection: req.Collection, - Ttl: ttlStr, - DataCenter: "", - } + + entry.Chunks = append(entry.Chunks, req.Chunks...) + so := fs.detectStorageOption(string(fullpath), entry.Collection, entry.Replication, entry.TtlSec, entry.DiskType, "", "") + entry.Chunks, err = filer.MaybeManifestize(fs.saveAsChunk(so), entry.Chunks) + if err != nil { + // not good, but should be ok + glog.V(0).Infof("MaybeManifestize: %v", err) } - assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest) + + err = fs.filer.CreateEntry(context.Background(), entry, false, false, nil) + + return &filer_pb.AppendToEntryResponse{}, err +} + +func (fs *FilerServer) DeleteEntry(ctx context.Context, req *filer_pb.DeleteEntryRequest) (resp *filer_pb.DeleteEntryResponse, err error) { + + glog.V(4).Infof("DeleteEntry %v", req) + + err = fs.filer.DeleteEntryMetaAndData(ctx, util.JoinPath(req.Directory, req.Name), req.IsRecursive, req.IgnoreRecursiveError, req.IsDeleteData, req.IsFromOtherCluster, req.Signatures) + resp = &filer_pb.DeleteEntryResponse{} + if err != nil && err != filer_pb.ErrNotFound { + resp.Error = err.Error() + } + return resp, nil +} + +func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVolumeRequest) (resp *filer_pb.AssignVolumeResponse, err error) { + + so := fs.detectStorageOption(req.Path, req.Collection, req.Replication, req.TtlSec, req.DiskType, req.DataCenter, req.Rack) + + assignRequest, altRequest := so.ToAssignRequests(int(req.Count)) + + assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest) if err != nil { - return nil, fmt.Errorf("assign volume: %v", err) + glog.V(3).Infof("AssignVolume: %v", err) + return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume: %v", err)}, nil } if assignResult.Error != "" { - return nil, fmt.Errorf("assign volume result: %v", assignResult.Error) + glog.V(3).Infof("AssignVolume error: %v", assignResult.Error) + return &filer_pb.AssignVolumeResponse{Error: fmt.Sprintf("assign volume result: %v", assignResult.Error)}, nil } return &filer_pb.AssignVolumeResponse{ - FileId: assignResult.Fid, - Count: int32(assignResult.Count), - Url: assignResult.Url, - PublicUrl: assignResult.PublicUrl, - Auth: string(assignResult.Auth), - }, err + FileId: assignResult.Fid, + Count: int32(assignResult.Count), + Url: assignResult.Url, + PublicUrl: assignResult.PublicUrl, + Auth: string(assignResult.Auth), + Collection: so.Collection, + Replication: so.Replication, + }, nil +} + +func (fs *FilerServer) CollectionList(ctx context.Context, req *filer_pb.CollectionListRequest) (resp *filer_pb.CollectionListResponse, err error) { + + glog.V(4).Infof("CollectionList %v", req) + resp = &filer_pb.CollectionListResponse{} + + err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + masterResp, err := client.CollectionList(context.Background(), &master_pb.CollectionListRequest{ + IncludeNormalVolumes: req.IncludeNormalVolumes, + IncludeEcVolumes: req.IncludeEcVolumes, + }) + if err != nil { + return err + } + for _, c := range masterResp.Collections { + resp.Collections = append(resp.Collections, &filer_pb.Collection{Name: c.Name}) + } + return nil + }) + + return } func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.DeleteCollectionRequest) (resp *filer_pb.DeleteCollectionResponse, err error) { - err = fs.filer.MasterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - _, err := client.CollectionDelete(ctx, &master_pb.CollectionDeleteRequest{ + glog.V(4).Infof("DeleteCollection %v", req) + + err = fs.filer.MasterClient.WithClient(func(client master_pb.SeaweedClient) error { + _, err := client.CollectionDelete(context.Background(), &master_pb.CollectionDeleteRequest{ Name: req.GetCollection(), }) return err @@ -268,13 +396,23 @@ func (fs *FilerServer) DeleteCollection(ctx context.Context, req *filer_pb.Delet func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsRequest) (resp *filer_pb.StatisticsResponse, err error) { - input := &master_pb.StatisticsRequest{ - Replication: req.Replication, - Collection: req.Collection, - Ttl: req.Ttl, - } + var output *master_pb.StatisticsResponse + + err = fs.filer.MasterClient.WithClient(func(masterClient master_pb.SeaweedClient) error { + grpcResponse, grpcErr := masterClient.Statistics(context.Background(), &master_pb.StatisticsRequest{ + Replication: req.Replication, + Collection: req.Collection, + Ttl: req.Ttl, + DiskType: req.DiskType, + }) + if grpcErr != nil { + return grpcErr + } + + output = grpcResponse + return nil + }) - output, err := operation.Statistics(fs.filer.GetMaster(), fs.grpcDialOption, input) if err != nil { return nil, err } @@ -288,10 +426,91 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR func (fs *FilerServer) GetFilerConfiguration(ctx context.Context, req *filer_pb.GetFilerConfigurationRequest) (resp *filer_pb.GetFilerConfigurationResponse, err error) { - return &filer_pb.GetFilerConfigurationResponse{ - Masters: fs.option.Masters, - Collection: fs.option.Collection, - Replication: fs.option.DefaultReplication, - MaxMb: uint32(fs.option.MaxMB), - }, nil + t := &filer_pb.GetFilerConfigurationResponse{ + Masters: fs.option.Masters, + Collection: fs.option.Collection, + Replication: fs.option.DefaultReplication, + MaxMb: uint32(fs.option.MaxMB), + DirBuckets: fs.filer.DirBucketsPath, + Cipher: fs.filer.Cipher, + Signature: fs.filer.Signature, + MetricsAddress: fs.metricsAddress, + MetricsIntervalSec: int32(fs.metricsIntervalSec), + } + + glog.V(4).Infof("GetFilerConfiguration: %v", t) + + return t, nil +} + +func (fs *FilerServer) KeepConnected(stream filer_pb.SeaweedFiler_KeepConnectedServer) error { + + req, err := stream.Recv() + if err != nil { + return err + } + + clientName := fmt.Sprintf("%s:%d", req.Name, req.GrpcPort) + m := make(map[string]bool) + for _, tp := range req.Resources { + m[tp] = true + } + fs.brokersLock.Lock() + fs.brokers[clientName] = m + glog.V(0).Infof("+ broker %v", clientName) + fs.brokersLock.Unlock() + + defer func() { + fs.brokersLock.Lock() + delete(fs.brokers, clientName) + glog.V(0).Infof("- broker %v: %v", clientName, err) + fs.brokersLock.Unlock() + }() + + for { + if err := stream.Send(&filer_pb.KeepConnectedResponse{}); err != nil { + glog.V(0).Infof("send broker %v: %+v", clientName, err) + return err + } + // println("replied") + + if _, err := stream.Recv(); err != nil { + glog.V(0).Infof("recv broker %v: %v", clientName, err) + return err + } + // println("received") + } + +} + +func (fs *FilerServer) LocateBroker(ctx context.Context, req *filer_pb.LocateBrokerRequest) (resp *filer_pb.LocateBrokerResponse, err error) { + + resp = &filer_pb.LocateBrokerResponse{} + + fs.brokersLock.Lock() + defer fs.brokersLock.Unlock() + + var localBrokers []*filer_pb.LocateBrokerResponse_Resource + + for b, m := range fs.brokers { + if _, found := m[req.Resource]; found { + resp.Found = true + resp.Resources = []*filer_pb.LocateBrokerResponse_Resource{ + { + GrpcAddresses: b, + ResourceCount: int32(len(m)), + }, + } + return + } + localBrokers = append(localBrokers, &filer_pb.LocateBrokerResponse_Resource{ + GrpcAddresses: b, + ResourceCount: int32(len(m)), + }) + } + + resp.Resources = localBrokers + + return resp, nil + } diff --git a/weed/server/filer_grpc_server_kv.go b/weed/server/filer_grpc_server_kv.go new file mode 100644 index 000000000..3cb47115e --- /dev/null +++ b/weed/server/filer_grpc_server_kv.go @@ -0,0 +1,42 @@ +package weed_server + +import ( + "context" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func (fs *FilerServer) KvGet(ctx context.Context, req *filer_pb.KvGetRequest) (*filer_pb.KvGetResponse, error) { + + value, err := fs.filer.Store.KvGet(ctx, req.Key) + if err == filer.ErrKvNotFound { + return &filer_pb.KvGetResponse{}, nil + } + + if err != nil { + return &filer_pb.KvGetResponse{Error: err.Error()}, nil + } + + return &filer_pb.KvGetResponse{ + Value: value, + }, nil + +} + +// KvPut sets the key~value. if empty value, delete the kv entry +func (fs *FilerServer) KvPut(ctx context.Context, req *filer_pb.KvPutRequest) (*filer_pb.KvPutResponse, error) { + + if len(req.Value) == 0 { + if err := fs.filer.Store.KvDelete(ctx, req.Key); err != nil { + return &filer_pb.KvPutResponse{Error: err.Error()}, nil + } + } + + err := fs.filer.Store.KvPut(ctx, req.Key, req.Value) + if err != nil { + return &filer_pb.KvPutResponse{Error: err.Error()}, nil + } + + return &filer_pb.KvPutResponse{}, nil + +} diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go index dfa59e7fe..eadb970d5 100644 --- a/weed/server/filer_grpc_server_rename.go +++ b/weed/server/filer_grpc_server_rename.go @@ -3,61 +3,67 @@ package weed_server import ( "context" "fmt" - "github.com/chrislusf/seaweedfs/weed/filer2" + "path/filepath" + + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "path/filepath" + "github.com/chrislusf/seaweedfs/weed/util" ) func (fs *FilerServer) AtomicRenameEntry(ctx context.Context, req *filer_pb.AtomicRenameEntryRequest) (*filer_pb.AtomicRenameEntryResponse, error) { glog.V(1).Infof("AtomicRenameEntry %v", req) + oldParent := util.FullPath(filepath.ToSlash(req.OldDirectory)) + newParent := util.FullPath(filepath.ToSlash(req.NewDirectory)) + + if err := fs.filer.CanRename(oldParent, newParent); err != nil { + return nil, err + } + ctx, err := fs.filer.BeginTransaction(ctx) if err != nil { return nil, err } - oldParent := filer2.FullPath(filepath.ToSlash(req.OldDirectory)) - oldEntry, err := fs.filer.FindEntry(ctx, oldParent.Child(req.OldName)) if err != nil { fs.filer.RollbackTransaction(ctx) return nil, fmt.Errorf("%s/%s not found: %v", req.OldDirectory, req.OldName, err) } - var events MoveEvents - moveErr := fs.moveEntry(ctx, oldParent, oldEntry, filer2.FullPath(filepath.ToSlash(req.NewDirectory)), req.NewName, &events) + moveErr := fs.moveEntry(ctx, oldParent, oldEntry, newParent, req.NewName) if moveErr != nil { fs.filer.RollbackTransaction(ctx) - return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, err) + return nil, fmt.Errorf("%s/%s move error: %v", req.OldDirectory, req.OldName, moveErr) } else { if commitError := fs.filer.CommitTransaction(ctx); commitError != nil { fs.filer.RollbackTransaction(ctx) - return nil, fmt.Errorf("%s/%s move commit error: %v", req.OldDirectory, req.OldName, err) + return nil, fmt.Errorf("%s/%s move commit error: %v", req.OldDirectory, req.OldName, commitError) } } - for _, entry := range events.newEntries { - fs.filer.NotifyUpdateEvent(nil, entry, false) - } - for _, entry := range events.oldEntries { - fs.filer.NotifyUpdateEvent(entry, nil, false) - } - return &filer_pb.AtomicRenameEntryResponse{}, nil } -func (fs *FilerServer) moveEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { - if entry.IsDirectory() { - if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName, events); err != nil { - return err +func (fs *FilerServer) moveEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string) error { + + if err := fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, func() error { + if entry.IsDirectory() { + if err := fs.moveFolderSubEntries(ctx, oldParent, entry, newParent, newName); err != nil { + return err + } } + return nil + }); err != nil { + return fmt.Errorf("fail to move %s => %s: %v", oldParent.Child(entry.Name()), newParent.Child(newName), err) } - return fs.moveSelfEntry(ctx, oldParent, entry, newParent, newName, events) + + return nil } -func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { +func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string) error { currentDirPath := oldParent.Child(entry.Name()) newDirPath := newParent.Child(newName) @@ -68,7 +74,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer includeLastFile := false for { - entries, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024) + entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "", "") if err != nil { return err } @@ -78,19 +84,19 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent filer for _, item := range entries { lastFileName = item.Name() // println("processing", lastFileName) - err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name(), events) + err := fs.moveEntry(ctx, currentDirPath, item, newDirPath, item.Name()) if err != nil { return err } } - if len(entries) < 1024 { + if !hasMore { break } } return nil } -func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullPath, entry *filer2.Entry, newParent filer2.FullPath, newName string, events *MoveEvents) error { +func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent util.FullPath, entry *filer.Entry, newParent util.FullPath, newName string, moveFolderSubEntries func() error) error { oldPath, newPath := oldParent.Child(entry.Name()), newParent.Child(newName) @@ -102,29 +108,30 @@ func (fs *FilerServer) moveSelfEntry(ctx context.Context, oldParent filer2.FullP } // add to new directory - newEntry := &filer2.Entry{ + newEntry := &filer.Entry{ FullPath: newPath, Attr: entry.Attr, Chunks: entry.Chunks, + Extended: entry.Extended, + Content: entry.Content, } - createErr := fs.filer.CreateEntry(ctx, newEntry) + createErr := fs.filer.CreateEntry(ctx, newEntry, false, false, nil) if createErr != nil { return createErr } + if moveFolderSubEntries != nil { + if moveChildrenErr := moveFolderSubEntries(); moveChildrenErr != nil { + return moveChildrenErr + } + } + // delete old entry - deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false) + deleteErr := fs.filer.DeleteEntryMetaAndData(ctx, oldPath, false, false, false, false, nil) if deleteErr != nil { return deleteErr } - events.oldEntries = append(events.oldEntries, entry) - events.newEntries = append(events.newEntries, newEntry) return nil } - -type MoveEvents struct { - oldEntries []*filer2.Entry - newEntries []*filer2.Entry -} diff --git a/weed/server/filer_grpc_server_sub_meta.go b/weed/server/filer_grpc_server_sub_meta.go new file mode 100644 index 000000000..d9f91b125 --- /dev/null +++ b/weed/server/filer_grpc_server_sub_meta.go @@ -0,0 +1,202 @@ +package weed_server + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/util/log_buffer" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (fs *FilerServer) SubscribeMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer) error { + + peerAddress := findClientAddress(stream.Context(), 0) + + clientName := fs.addClient(req.ClientName, peerAddress) + + defer fs.deleteClient(clientName) + + lastReadTime := time.Unix(0, req.SinceNs) + glog.V(0).Infof(" %v starts to subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + + eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName, req.Signature) + + eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn) + + var processedTsNs int64 + var err error + + for { + + processedTsNs, err = fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn) + if err != nil { + return fmt.Errorf("reading from persisted logs: %v", err) + } + + if processedTsNs != 0 { + lastReadTime = time.Unix(0, processedTsNs) + } + + lastReadTime, err = fs.filer.MetaAggregator.MetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool { + fs.filer.MetaAggregator.ListenersLock.Lock() + fs.filer.MetaAggregator.ListenersCond.Wait() + fs.filer.MetaAggregator.ListenersLock.Unlock() + return true + }, eachLogEntryFn) + if err != nil { + if err == log_buffer.ResumeFromDiskError { + continue + } + glog.Errorf("processed to %v: %v", lastReadTime, err) + time.Sleep(3127 * time.Millisecond) + if err != log_buffer.ResumeError { + break + } + } + } + + return err + +} + +func (fs *FilerServer) SubscribeLocalMetadata(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeLocalMetadataServer) error { + + peerAddress := findClientAddress(stream.Context(), 0) + + clientName := fs.addClient(req.ClientName, peerAddress) + + defer fs.deleteClient(clientName) + + lastReadTime := time.Unix(0, req.SinceNs) + glog.V(0).Infof(" %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + + eachEventNotificationFn := fs.eachEventNotificationFn(req, stream, clientName, req.Signature) + + eachLogEntryFn := eachLogEntryFn(eachEventNotificationFn) + + var processedTsNs int64 + var err error + + for { + // println("reading from persisted logs ...") + processedTsNs, err = fs.filer.ReadPersistedLogBuffer(lastReadTime, eachLogEntryFn) + if err != nil { + return fmt.Errorf("reading from persisted logs: %v", err) + } + + if processedTsNs != 0 { + lastReadTime = time.Unix(0, processedTsNs) + } + // glog.V(0).Infof("after local log reads, %v local subscribe %s from %+v", clientName, req.PathPrefix, lastReadTime) + + // println("reading from in memory logs ...") + + lastReadTime, err = fs.filer.LocalMetaLogBuffer.LoopProcessLogData(lastReadTime, func() bool { + fs.listenersLock.Lock() + fs.listenersCond.Wait() + fs.listenersLock.Unlock() + return true + }, eachLogEntryFn) + if err != nil { + if err == log_buffer.ResumeFromDiskError { + continue + } + glog.Errorf("processed to %v: %v", lastReadTime, err) + time.Sleep(3127 * time.Millisecond) + if err != log_buffer.ResumeError { + break + } + } + } + + return err + +} + +func eachLogEntryFn(eachEventNotificationFn func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error) func(logEntry *filer_pb.LogEntry) error { + return func(logEntry *filer_pb.LogEntry) error { + event := &filer_pb.SubscribeMetadataResponse{} + if err := proto.Unmarshal(logEntry.Data, event); err != nil { + glog.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) + return fmt.Errorf("unexpected unmarshal filer_pb.SubscribeMetadataResponse: %v", err) + } + + if err := eachEventNotificationFn(event.Directory, event.EventNotification, event.TsNs); err != nil { + return err + } + + return nil + } +} + +func (fs *FilerServer) eachEventNotificationFn(req *filer_pb.SubscribeMetadataRequest, stream filer_pb.SeaweedFiler_SubscribeMetadataServer, clientName string, clientSignature int32) func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error { + return func(dirPath string, eventNotification *filer_pb.EventNotification, tsNs int64) error { + + foundSelf := false + for _, sig := range eventNotification.Signatures { + if sig == clientSignature && clientSignature != 0 { + return nil + } + if sig == fs.filer.Signature { + foundSelf = true + } + } + if !foundSelf { + eventNotification.Signatures = append(eventNotification.Signatures, fs.filer.Signature) + } + + // get complete path to the file or directory + var entryName string + if eventNotification.OldEntry != nil { + entryName = eventNotification.OldEntry.Name + } else if eventNotification.NewEntry != nil { + entryName = eventNotification.NewEntry.Name + } + + fullpath := util.Join(dirPath, entryName) + + // skip on filer internal meta logs + if strings.HasPrefix(fullpath, filer.SystemLogDir) { + return nil + } + + if !strings.HasPrefix(fullpath, req.PathPrefix) { + if eventNotification.NewParentPath != "" { + newFullPath := util.Join(eventNotification.NewParentPath, entryName) + if !strings.HasPrefix(newFullPath, req.PathPrefix) { + return nil + } + } else { + return nil + } + } + + message := &filer_pb.SubscribeMetadataResponse{ + Directory: dirPath, + EventNotification: eventNotification, + TsNs: tsNs, + } + // println("sending", dirPath, entryName) + if err := stream.Send(message); err != nil { + glog.V(0).Infof("=> client %v: %+v", clientName, err) + return err + } + return nil + } +} + +func (fs *FilerServer) addClient(clientType string, clientAddress string) (clientName string) { + clientName = clientType + "@" + clientAddress + glog.V(0).Infof("+ listener %v", clientName) + return +} + +func (fs *FilerServer) deleteClient(clientName string) { + glog.V(0).Infof("- listener %v", clientName) +} diff --git a/weed/server/filer_server.go b/weed/server/filer_server.go index 41ba81366..2734223ea 100644 --- a/weed/server/filer_server.go +++ b/weed/server/filer_server.go @@ -5,23 +5,35 @@ import ( "fmt" "net/http" "os" + "sync" "time" + "github.com/chrislusf/seaweedfs/weed/stats" + + "google.golang.org/grpc" + + "github.com/chrislusf/seaweedfs/weed/util/grace" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb" - "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" - "google.golang.org/grpc" - "github.com/chrislusf/seaweedfs/weed/filer2" - _ "github.com/chrislusf/seaweedfs/weed/filer2/cassandra" - _ "github.com/chrislusf/seaweedfs/weed/filer2/etcd" - _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb" - _ "github.com/chrislusf/seaweedfs/weed/filer2/leveldb2" - _ "github.com/chrislusf/seaweedfs/weed/filer2/mysql" - _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" - _ "github.com/chrislusf/seaweedfs/weed/filer2/redis" - _ "github.com/chrislusf/seaweedfs/weed/filer2/tikv" + "github.com/chrislusf/seaweedfs/weed/filer" + _ "github.com/chrislusf/seaweedfs/weed/filer/cassandra" + _ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7" + _ "github.com/chrislusf/seaweedfs/weed/filer/etcd" + _ "github.com/chrislusf/seaweedfs/weed/filer/hbase" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2" + _ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3" + _ "github.com/chrislusf/seaweedfs/weed/filer/mongodb" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql" + _ "github.com/chrislusf/seaweedfs/weed/filer/mysql2" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres" + _ "github.com/chrislusf/seaweedfs/weed/filer/postgres2" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis" + _ "github.com/chrislusf/seaweedfs/weed/filer/redis2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/notification" _ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs" @@ -30,46 +42,74 @@ import ( _ "github.com/chrislusf/seaweedfs/weed/notification/kafka" _ "github.com/chrislusf/seaweedfs/weed/notification/log" "github.com/chrislusf/seaweedfs/weed/security" - "github.com/spf13/viper" ) type FilerOption struct { - Masters []string - Collection string - DefaultReplication string - RedirectOnRead bool - DisableDirListing bool - MaxMB int - DirListingLimit int - DataCenter string - DefaultLevelDbDir string - DisableHttp bool - Port int + Masters []string + Collection string + DefaultReplication string + DisableDirListing bool + MaxMB int + DirListingLimit int + DataCenter string + Rack string + DefaultLevelDbDir string + DisableHttp bool + Host string + Port uint32 + recursiveDelete bool + Cipher bool + SaveToFilerLimit int64 + Filers []string + ConcurrentUploadLimit int64 } type FilerServer struct { option *FilerOption secret security.SigningKey - filer *filer2.Filer + filer *filer.Filer grpcDialOption grpc.DialOption + + // metrics read from the master + metricsAddress string + metricsIntervalSec int + + // notifying clients + listenersLock sync.Mutex + listenersCond *sync.Cond + + brokers map[string]map[string]bool + brokersLock sync.Mutex + + inFlightDataSize int64 + inFlightDataLimitCond *sync.Cond } func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) { fs = &FilerServer{ - option: option, - grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"), + option: option, + grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.filer"), + brokers: make(map[string]map[string]bool), + inFlightDataLimitCond: sync.NewCond(new(sync.Mutex)), } + fs.listenersCond = sync.NewCond(&fs.listenersLock) if len(option.Masters) == 0 { glog.Fatal("master list is required!") } - fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption) + fs.filer = filer.NewFiler(option.Masters, fs.grpcDialOption, option.Host, option.Port, option.Collection, option.DefaultReplication, option.DataCenter, func() { + fs.listenersCond.Broadcast() + }) + fs.filer.Cipher = option.Cipher + + fs.checkWithMaster() + go stats.LoopPushingMetric("filer", stats.SourceName(fs.option.Port), fs.metricsAddress, fs.metricsIntervalSec) go fs.filer.KeepConnectedToMaster() - v := viper.GetViper() + v := util.GetViper() if !util.LoadConfiguration("filer", false) { v.Set("leveldb2.enabled", true) v.Set("leveldb2.dir", option.DefaultLevelDbDir) @@ -77,56 +117,73 @@ func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) if os.IsNotExist(err) { os.MkdirAll(option.DefaultLevelDbDir, 0755) } + glog.V(0).Infof("default to create filer store dir in %s", option.DefaultLevelDbDir) + } else { + glog.Warningf("skipping default store dir in %s", option.DefaultLevelDbDir) } util.LoadConfiguration("notification", false) + fs.option.recursiveDelete = v.GetBool("filer.options.recursive_delete") + v.SetDefault("filer.options.buckets_folder", "/buckets") + fs.filer.DirBucketsPath = v.GetString("filer.options.buckets_folder") + // TODO deprecated, will be be removed after 2020-12-31 + // replaced by https://github.com/chrislusf/seaweedfs/wiki/Path-Specific-Configuration + fs.filer.FsyncBuckets = v.GetStringSlice("filer.options.buckets_fsync") fs.filer.LoadConfiguration(v) - notification.LoadConfiguration(v.Sub("notification")) + notification.LoadConfiguration(v, "notification.") handleStaticResources(defaultMux) if !option.DisableHttp { defaultMux.HandleFunc("/", fs.filerHandler) } if defaultMux != readonlyMux { + handleStaticResources(readonlyMux) readonlyMux.HandleFunc("/", fs.readonlyFilerHandler) } - maybeStartMetrics(fs, option) + fs.filer.AggregateFromPeers(fmt.Sprintf("%s:%d", option.Host, option.Port), option.Filers) + + fs.filer.LoadBuckets() + + fs.filer.LoadFilerConf() + + grace.OnInterrupt(func() { + fs.filer.Shutdown() + }) return fs, nil } -func maybeStartMetrics(fs *FilerServer, option *FilerOption) { +func (fs *FilerServer) checkWithMaster() { + + for _, master := range fs.option.Masters { + _, err := pb.ParseServerToGrpcAddress(master) + if err != nil { + glog.Fatalf("invalid master address %s: %v", master, err) + } + } + isConnected := false - var metricsAddress string - var metricsIntervalSec int - var readErr error for !isConnected { - metricsAddress, metricsIntervalSec, readErr = readFilerConfiguration(fs.grpcDialOption, option.Masters[0]) - if readErr == nil { - isConnected = true - } else { - time.Sleep(7 * time.Second) + for _, master := range fs.option.Masters { + readErr := operation.WithMasterServerClient(master, fs.grpcDialOption, func(masterClient master_pb.SeaweedClient) error { + resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get master %s configuration: %v", master, err) + } + fs.metricsAddress, fs.metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds) + if fs.option.DefaultReplication == "" { + fs.option.DefaultReplication = resp.DefaultReplication + } + return nil + }) + if readErr == nil { + isConnected = true + } else { + time.Sleep(7 * time.Second) + } } } - if metricsAddress == "" && metricsIntervalSec <= 0 { - return - } - go stats.LoopPushingMetric("filer", stats.SourceName(option.Port), stats.FilerGather, - func() (addr string, intervalSeconds int) { - return metricsAddress, metricsIntervalSec - }) -} -func readFilerConfiguration(grpcDialOption grpc.DialOption, masterGrpcAddress string) (metricsAddress string, metricsIntervalSec int, err error) { - err = operation.WithMasterServerClient(masterGrpcAddress, grpcDialOption, func(masterClient master_pb.SeaweedClient) error { - resp, err := masterClient.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) - if err != nil { - return fmt.Errorf("get master %s configuration: %v", masterGrpcAddress, err) - } - metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSeconds) - return nil - }) - return } diff --git a/weed/server/filer_server_handlers.go b/weed/server/filer_server_handlers.go index b6bfc3b04..ed6bbb6f6 100644 --- a/weed/server/filer_server_handlers.go +++ b/weed/server/filer_server_handlers.go @@ -1,14 +1,36 @@ package weed_server import ( + "github.com/chrislusf/seaweedfs/weed/util" "net/http" + "strings" + "sync/atomic" "time" "github.com/chrislusf/seaweedfs/weed/stats" ) func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { + start := time.Now() + + // proxy to volume servers + var fileId string + if strings.HasPrefix(r.RequestURI, "/?proxyChunkId=") { + fileId = r.RequestURI[len("/?proxyChunkId="):] + } + if fileId != "" { + stats.FilerRequestCounter.WithLabelValues("proxy").Inc() + fs.proxyToVolumeServer(w, r, fileId) + stats.FilerRequestHistogram.WithLabelValues("proxy").Observe(time.Since(start).Seconds()) + return + } + + w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION) + if r.Header.Get("Origin") != "" { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") + } switch r.Method { case "GET": stats.FilerRequestCounter.WithLabelValues("get").Inc() @@ -20,20 +42,53 @@ func (fs *FilerServer) filerHandler(w http.ResponseWriter, r *http.Request) { stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) case "DELETE": stats.FilerRequestCounter.WithLabelValues("delete").Inc() - fs.DeleteHandler(w, r) + if _, ok := r.URL.Query()["tagging"]; ok { + fs.DeleteTaggingHandler(w, r) + } else { + fs.DeleteHandler(w, r) + } stats.FilerRequestHistogram.WithLabelValues("delete").Observe(time.Since(start).Seconds()) - case "PUT": - stats.FilerRequestCounter.WithLabelValues("put").Inc() - fs.PostHandler(w, r) - stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) - case "POST": - stats.FilerRequestCounter.WithLabelValues("post").Inc() - fs.PostHandler(w, r) - stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) + case "POST", "PUT": + + // wait until in flight data is less than the limit + contentLength := getContentLength(r) + fs.inFlightDataLimitCond.L.Lock() + for atomic.LoadInt64(&fs.inFlightDataSize) > fs.option.ConcurrentUploadLimit { + fs.inFlightDataLimitCond.Wait() + } + atomic.AddInt64(&fs.inFlightDataSize, contentLength) + fs.inFlightDataLimitCond.L.Unlock() + defer func() { + atomic.AddInt64(&fs.inFlightDataSize, -contentLength) + fs.inFlightDataLimitCond.Signal() + }() + + if r.Method == "PUT" { + stats.FilerRequestCounter.WithLabelValues("put").Inc() + if _, ok := r.URL.Query()["tagging"]; ok { + fs.PutTaggingHandler(w, r) + } else { + fs.PostHandler(w, r, contentLength) + } + stats.FilerRequestHistogram.WithLabelValues("put").Observe(time.Since(start).Seconds()) + } else { // method == "POST" + stats.FilerRequestCounter.WithLabelValues("post").Inc() + fs.PostHandler(w, r, contentLength) + stats.FilerRequestHistogram.WithLabelValues("post").Observe(time.Since(start).Seconds()) + } + case "OPTIONS": + stats.FilerRequestCounter.WithLabelValues("options").Inc() + OptionsHandler(w, r, false) + stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) } } func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Server", "SeaweedFS Filer "+util.VERSION) + if r.Header.Get("Origin") != "" { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") + } start := time.Now() switch r.Method { case "GET": @@ -44,5 +99,18 @@ func (fs *FilerServer) readonlyFilerHandler(w http.ResponseWriter, r *http.Reque stats.FilerRequestCounter.WithLabelValues("head").Inc() fs.GetOrHeadHandler(w, r, false) stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) + case "OPTIONS": + stats.FilerRequestCounter.WithLabelValues("options").Inc() + OptionsHandler(w, r, true) + stats.FilerRequestHistogram.WithLabelValues("head").Observe(time.Since(start).Seconds()) + } +} + +func OptionsHandler(w http.ResponseWriter, r *http.Request, isReadOnly bool) { + if isReadOnly { + w.Header().Add("Access-Control-Allow-Methods", "GET, OPTIONS") + } else { + w.Header().Add("Access-Control-Allow-Methods", "PUT, POST, GET, DELETE, OPTIONS") } + w.Header().Add("Access-Control-Allow-Headers", "*") } diff --git a/weed/server/filer_server_handlers_proxy.go b/weed/server/filer_server_handlers_proxy.go new file mode 100644 index 000000000..b8b28790b --- /dev/null +++ b/weed/server/filer_server_handlers_proxy.go @@ -0,0 +1,67 @@ +package weed_server + +import ( + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/util" + "io" + "math/rand" + "net/http" +) + +var ( + client *http.Client +) + +func init() { + client = &http.Client{Transport: &http.Transport{ + MaxIdleConns: 1024, + MaxIdleConnsPerHost: 1024, + }} +} + +func (fs *FilerServer) proxyToVolumeServer(w http.ResponseWriter, r *http.Request, fileId string) { + + urlStrings, err := fs.filer.MasterClient.GetLookupFileIdFunction()(fileId) + if err != nil { + glog.Errorf("locate %s: %v", fileId, err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + if len(urlStrings) == 0 { + w.WriteHeader(http.StatusNotFound) + return + } + + proxyReq, err := http.NewRequest(r.Method, urlStrings[rand.Intn(len(urlStrings))], r.Body) + if err != nil { + glog.Errorf("NewRequest %s: %v", urlStrings[0], err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + proxyReq.Header.Set("Host", r.Host) + proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr) + + for header, values := range r.Header { + for _, value := range values { + proxyReq.Header.Add(header, value) + } + } + + proxyResponse, postErr := client.Do(proxyReq) + + if postErr != nil { + glog.Errorf("post to filer: %v", postErr) + w.WriteHeader(http.StatusInternalServerError) + return + } + defer util.CloseResponse(proxyResponse) + + for k, v := range proxyResponse.Header { + w.Header()[k] = v + } + w.WriteHeader(proxyResponse.StatusCode) + io.Copy(w, proxyResponse.Body) + +} diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go index ba21298ba..6bc09e953 100644 --- a/weed/server/filer_server_handlers_read.go +++ b/weed/server/filer_server_handlers_read.go @@ -1,19 +1,22 @@ package weed_server import ( + "bytes" "context" "io" - "io/ioutil" "mime" - "mime/multipart" "net/http" "net/url" - "path" + "path/filepath" "strconv" "strings" + "time" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/images" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/stats" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -26,13 +29,13 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, path = path[:len(path)-1] } - entry, err := fs.filer.FindEntry(context.Background(), filer2.FullPath(path)) + entry, err := fs.filer.FindEntry(context.Background(), util.FullPath(path)) if err != nil { if path == "/" { fs.listDirectoryHandler(w, r) return } - if err == filer2.ErrNotFound { + if err == filer_pb.ErrNotFound { glog.V(1).Infof("Not found %s: %v", path, err) stats.FilerRequestCounter.WithLabelValues("read.notfound").Inc() w.WriteHeader(http.StatusNotFound) @@ -58,196 +61,107 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request, return } - if len(entry.Chunks) == 0 { - glog.V(1).Infof("no file chunks for %s, attr=%+v", path, entry.Attr) - stats.FilerRequestCounter.WithLabelValues("read.nocontent").Inc() - w.WriteHeader(http.StatusNoContent) - return - } - w.Header().Set("Accept-Ranges", "bytes") - if r.Method == "HEAD" { - w.Header().Set("Content-Length", strconv.FormatInt(int64(filer2.TotalSize(entry.Chunks)), 10)) - w.Header().Set("Last-Modified", entry.Attr.Mtime.Format(http.TimeFormat)) - setEtag(w, filer2.ETag(entry.Chunks)) - return - } - - if len(entry.Chunks) == 1 { - fs.handleSingleChunk(w, r, entry) - return - } - - fs.handleMultipleChunks(w, r, entry) - -} - -func (fs *FilerServer) handleSingleChunk(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { - - fileId := entry.Chunks[0].GetFileIdString() - - urlString, err := fs.filer.MasterClient.LookupFileId(fileId) - if err != nil { - glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err) - w.WriteHeader(http.StatusNotFound) - return - } - - if fs.option.RedirectOnRead { - stats.FilerRequestCounter.WithLabelValues("redirect").Inc() - http.Redirect(w, r, urlString, http.StatusFound) - return - } - - u, _ := url.Parse(urlString) - q := u.Query() - for key, values := range r.URL.Query() { - for _, value := range values { - q.Add(key, value) - } - } - u.RawQuery = q.Encode() - request := &http.Request{ - Method: r.Method, - URL: u, - Proto: r.Proto, - ProtoMajor: r.ProtoMajor, - ProtoMinor: r.ProtoMinor, - Header: r.Header, - Body: r.Body, - Host: r.Host, - ContentLength: r.ContentLength, - } - glog.V(3).Infoln("retrieving from", u) - resp, do_err := util.Do(request) - if do_err != nil { - glog.V(0).Infoln("failing to connect to volume server", do_err.Error()) - writeJsonError(w, r, http.StatusInternalServerError, do_err) - return - } - defer func() { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - for k, v := range resp.Header { - w.Header()[k] = v - } - if entry.Attr.Mime != "" { - w.Header().Set("Content-Type", entry.Attr.Mime) - } - w.WriteHeader(resp.StatusCode) - io.Copy(w, resp.Body) -} - -func (fs *FilerServer) handleMultipleChunks(w http.ResponseWriter, r *http.Request, entry *filer2.Entry) { + // mime type mimeType := entry.Attr.Mime if mimeType == "" { - if ext := path.Ext(entry.Name()); ext != "" { + if ext := filepath.Ext(entry.Name()); ext != "" { mimeType = mime.TypeByExtension(ext) } } if mimeType != "" { w.Header().Set("Content-Type", mimeType) } - setEtag(w, filer2.ETag(entry.Chunks)) - totalSize := int64(filer2.TotalSize(entry.Chunks)) + // if modified since + if !entry.Attr.Mtime.IsZero() { + w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat)) + if r.Header.Get("If-Modified-Since") != "" { + if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil { + if !t.Before(entry.Attr.Mtime) { + w.WriteHeader(http.StatusNotModified) + return + } + } + } + } - rangeReq := r.Header.Get("Range") + // print out the header from extended properties + for k, v := range entry.Extended { + w.Header().Set(k, string(v)) + } - if rangeReq == "" { - w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) - if err := fs.writeContent(w, entry, 0, int(totalSize)); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + //Seaweed custom header are not visible to Vue or javascript + seaweedHeaders := []string{} + for header, _ := range w.Header() { + if strings.HasPrefix(header, "Seaweed-") { + seaweedHeaders = append(seaweedHeaders, header) } - return } + seaweedHeaders = append(seaweedHeaders, "Content-Disposition") + w.Header().Set("Access-Control-Expose-Headers", strings.Join(seaweedHeaders, ",")) - //the rest is dealing with partial content request - //mostly copy from src/pkg/net/http/fs.go - ranges, err := parseRange(rangeReq, totalSize) - if err != nil { - http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) - return - } - if sumRangesSize(ranges) > totalSize { - // The total number of bytes in all the ranges - // is larger than the size of the file by - // itself, so this is probably an attack, or a - // dumb client. Ignore the range request. - return + //set tag count + if r.Method == "GET" { + tagCount := 0 + for k := range entry.Extended { + if strings.HasPrefix(k, xhttp.AmzObjectTagging+"-") { + tagCount++ + } + } + if tagCount > 0 { + w.Header().Set(xhttp.AmzTagCount, strconv.Itoa(tagCount)) + } } - if len(ranges) == 0 { + + // set etag + etag := filer.ETagEntry(entry) + if inm := r.Header.Get("If-None-Match"); inm == "\""+etag+"\"" { + w.WriteHeader(http.StatusNotModified) return } - if len(ranges) == 1 { - // RFC 2616, Section 14.16: - // "When an HTTP message includes the content of a single - // range (for example, a response to a request for a - // single range, or to a request for a set of ranges - // that overlap without any holes), this content is - // transmitted with a Content-Range header, and a - // Content-Length header showing the number of bytes - // actually transferred. - // ... - // A response to a request for a single range MUST NOT - // be sent using the multipart/byteranges media type." - ra := ranges[0] - w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10)) - w.Header().Set("Content-Range", ra.contentRange(totalSize)) - w.WriteHeader(http.StatusPartialContent) - - err = fs.writeContent(w, entry, ra.start, int(ra.length)) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } + setEtag(w, etag) + + filename := entry.Name() + filename = url.QueryEscape(filename) + adjustHeaderContentDisposition(w, r, filename) + + totalSize := int64(entry.Size()) + + if r.Method == "HEAD" { + w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10)) + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, true) + }) return } - // process multiple ranges - for _, ra := range ranges { - if ra.start > totalSize { - http.Error(w, "Out of Range", http.StatusRequestedRangeNotSatisfiable) - return - } - } - sendSize := rangesMIMESize(ranges, mimeType, totalSize) - pr, pw := io.Pipe() - mw := multipart.NewWriter(pw) - w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) - sendContent := pr - defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. - go func() { - for _, ra := range ranges { - part, e := mw.CreatePart(ra.mimeHeader(mimeType, totalSize)) - if e != nil { - pw.CloseWithError(e) - return - } - if e = fs.writeContent(part, entry, ra.start, int(ra.length)); e != nil { - pw.CloseWithError(e) + if rangeReq := r.Header.Get("Range"); rangeReq == "" { + ext := filepath.Ext(filename) + width, height, mode, shouldResize := shouldResizeImages(ext, r) + if shouldResize { + data, err := filer.ReadAll(fs.filer.MasterClient, entry.Chunks) + if err != nil { + glog.Errorf("failed to read %s: %v", path, err) + w.WriteHeader(http.StatusNotModified) return } + rs, _, _ := images.Resized(ext, bytes.NewReader(data), width, height, mode) + io.Copy(w, rs) + return } - mw.Close() - pw.Close() - }() - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - w.WriteHeader(http.StatusPartialContent) - if _, err := io.CopyN(w, sendContent, sendSize); err != nil { - http.Error(w, "Internal Error", http.StatusInternalServerError) - return } -} - -func (fs *FilerServer) writeContent(w io.Writer, entry *filer2.Entry, offset int64, size int) error { - - return filer2.StreamContent(fs.filer.MasterClient, w, entry.Chunks, offset, size) + processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error { + if offset+size <= int64(len(entry.Content)) { + _, err := writer.Write(entry.Content[offset : offset+size]) + if err != nil { + glog.Errorf("failed to write entry content: %v", err) + } + return err + } + return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, false) + }) } diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go index 87e864559..307c411b6 100644 --- a/weed/server/filer_server_handlers_read_dir.go +++ b/weed/server/filer_server_handlers_read_dir.go @@ -2,14 +2,17 @@ package weed_server import ( "context" + "encoding/base64" + "fmt" + "github.com/skip2/go-qrcode" "net/http" "strconv" "strings" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" ui "github.com/chrislusf/seaweedfs/weed/server/filer_ui" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" ) // listDirectoryHandler lists directories and folers under a directory @@ -31,8 +34,10 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque } lastFileName := r.FormValue("lastFileName") + namePattern := r.FormValue("namePattern") + namePatternExclude := r.FormValue("namePatternExclude") - entries, err := fs.filer.ListDirectoryEntries(context.Background(), filer2.FullPath(path), lastFileName, false, limit) + entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern, namePatternExclude) if err != nil { glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err) @@ -40,7 +45,6 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque return } - shouldDisplayLoadMore := len(entries) == limit if path == "/" { path = "" } @@ -65,21 +69,30 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque lastFileName, shouldDisplayLoadMore, }) - } else { - ui.StatusTpl.Execute(w, struct { - Path string - Breadcrumbs []ui.Breadcrumb - Entries interface{} - Limit int - LastFileName string - ShouldDisplayLoadMore bool - }{ - path, - ui.ToBreadcrumb(path), - entries, - limit, - lastFileName, - shouldDisplayLoadMore, - }) + return + } + + var qrImageString string + img, err := qrcode.Encode(fmt.Sprintf("http://%s:%d%s", fs.option.Host, fs.option.Port, r.URL.Path), qrcode.Medium, 128) + if err == nil { + qrImageString = base64.StdEncoding.EncodeToString(img) } + + ui.StatusTpl.Execute(w, struct { + Path string + Breadcrumbs []ui.Breadcrumb + Entries interface{} + Limit int + LastFileName string + ShouldDisplayLoadMore bool + QrImage string + }{ + path, + ui.ToBreadcrumb(path), + entries, + limit, + lastFileName, + shouldDisplayLoadMore, + qrImageString, + }) } diff --git a/weed/server/filer_server_handlers_tagging.go b/weed/server/filer_server_handlers_tagging.go new file mode 100644 index 000000000..50b3a2c06 --- /dev/null +++ b/weed/server/filer_server_handlers_tagging.go @@ -0,0 +1,102 @@ +package weed_server + +import ( + "context" + "net/http" + "strings" + + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// add or replace one file Seaweed- prefixed attributes +// curl -X PUT -H "Seaweed-Name1: value1" http://localhost:8888/path/to/a/file?tagging +func (fs *FilerServer) PutTaggingHandler(w http.ResponseWriter, r *http.Request) { + + ctx := context.Background() + + path := r.URL.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + + existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path)) + if err != nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + if existingEntry == nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + + if existingEntry.Extended == nil { + existingEntry.Extended = make(map[string][]byte) + } + + for header, values := range r.Header { + if strings.HasPrefix(header, needle.PairNamePrefix) { + for _, value := range values { + existingEntry.Extended[header] = []byte(value) + } + } + } + + if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil); dbErr != nil { + glog.V(0).Infof("failing to update %s tagging : %v", path, dbErr) + writeJsonError(w, r, http.StatusInternalServerError, err) + return + } + + writeJsonQuiet(w, r, http.StatusAccepted, nil) + return +} + +// remove all Seaweed- prefixed attributes +// curl -X DELETE http://localhost:8888/path/to/a/file?tagging +func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Request) { + + ctx := context.Background() + + path := r.URL.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + + existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path)) + if err != nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + if existingEntry == nil { + writeJsonError(w, r, http.StatusNotFound, err) + return + } + + if existingEntry.Extended == nil { + existingEntry.Extended = make(map[string][]byte) + } + + hasDeletion := false + for header, _ := range existingEntry.Extended { + if strings.HasPrefix(header, needle.PairNamePrefix) { + delete(existingEntry.Extended, header) + hasDeletion = true + } + } + + if !hasDeletion { + writeJsonQuiet(w, r, http.StatusNotModified, nil) + return + } + + if dbErr := fs.filer.CreateEntry(ctx, existingEntry, false, false, nil); dbErr != nil { + glog.V(0).Infof("failing to delete %s tagging : %v", path, dbErr) + writeJsonError(w, r, http.StatusInternalServerError, err) + return + } + + writeJsonQuiet(w, r, http.StatusAccepted, nil) + return +} diff --git a/weed/server/filer_server_handlers_write.go b/weed/server/filer_server_handlers_write.go index 236e7027d..95eba9d3d 100644 --- a/weed/server/filer_server_handlers_write.go +++ b/weed/server/filer_server_handlers_write.go @@ -2,26 +2,17 @@ package weed_server import ( "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "mime" "net/http" - "net/url" "os" - filenamePath "path" - "strconv" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -32,271 +23,130 @@ var ( type FilerPostResult struct { Name string `json:"name,omitempty"` - Size uint32 `json:"size,omitempty"` + Size int64 `json:"size,omitempty"` Error string `json:"error,omitempty"` Fid string `json:"fid,omitempty"` Url string `json:"url,omitempty"` } -func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request, replication, collection string, dataCenter string) (fileId, urlLocation string, auth security.EncodedJwt, err error) { +func (fs *FilerServer) assignNewFileInfo(so *operation.StorageOption) (fileId, urlLocation string, auth security.EncodedJwt, err error) { stats.FilerRequestCounter.WithLabelValues("assign").Inc() start := time.Now() defer func() { stats.FilerRequestHistogram.WithLabelValues("assign").Observe(time.Since(start).Seconds()) }() - ar := &operation.VolumeAssignRequest{ - Count: 1, - Replication: replication, - Collection: collection, - Ttl: r.URL.Query().Get("ttl"), - DataCenter: dataCenter, - } - var altRequest *operation.VolumeAssignRequest - if dataCenter != "" { - altRequest = &operation.VolumeAssignRequest{ - Count: 1, - Replication: replication, - Collection: collection, - Ttl: r.URL.Query().Get("ttl"), - DataCenter: "", - } - } + ar, altRequest := so.ToAssignRequests(1) - assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest) + assignResult, ae := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, ar, altRequest) if ae != nil { glog.Errorf("failing to assign a file id: %v", ae) - writeJsonError(w, r, http.StatusInternalServerError, ae) err = ae return } fileId = assignResult.Fid urlLocation = "http://" + assignResult.Url + "/" + assignResult.Fid + if so.Fsync { + urlLocation += "?fsync=true" + } auth = assignResult.Auth return } -func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request) { +func (fs *FilerServer) PostHandler(w http.ResponseWriter, r *http.Request, contentLength int64) { ctx := context.Background() query := r.URL.Query() - replication := query.Get("replication") - if replication == "" { - replication = fs.option.DefaultReplication - } - collection := query.Get("collection") - if collection == "" { - collection = fs.option.Collection - } - dataCenter := query.Get("dataCenter") - if dataCenter == "" { - dataCenter = fs.option.DataCenter - } + so := fs.detectStorageOption0(r.RequestURI, + query.Get("collection"), + query.Get("replication"), + query.Get("ttl"), + query.Get("disk"), + query.Get("dataCenter"), + query.Get("rack"), + ) + + fs.autoChunk(ctx, w, r, contentLength, so) + util.CloseRequest(r) - if autoChunked := fs.autoChunk(ctx, w, r, replication, collection, dataCenter); autoChunked { - return - } +} - fileId, urlLocation, auth, err := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) +// curl -X DELETE http://localhost:8888/path/to +// curl -X DELETE http://localhost:8888/path/to?recursive=true +// curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true +// curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true +func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { - if err != nil || fileId == "" || urlLocation == "" { - glog.V(0).Infof("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter) - return + isRecursive := r.FormValue("recursive") == "true" + if !isRecursive && fs.option.recursiveDelete { + if r.FormValue("recursive") != "false" { + isRecursive = true + } } + ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true" + skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true" - glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) - - u, _ := url.Parse(urlLocation) - - // This allows a client to generate a chunk manifest and submit it to the filer -- it is a little off - // because they need to provide FIDs instead of file paths... - cm, _ := strconv.ParseBool(query.Get("cm")) - if cm { - q := u.Query() - q.Set("cm", "true") - u.RawQuery = q.Encode() + objectPath := r.URL.Path + if len(r.URL.Path) > 1 && strings.HasSuffix(objectPath, "/") { + objectPath = objectPath[0 : len(objectPath)-1] } - glog.V(4).Infoln("post to", u) - ret, err := fs.uploadToVolumeServer(r, u, auth, w, fileId) + err := fs.filer.DeleteEntryMetaAndData(context.Background(), util.FullPath(objectPath), isRecursive, ignoreRecursiveError, !skipChunkDeletion, false, nil) if err != nil { + glog.V(1).Infoln("deleting", objectPath, ":", err.Error()) + httpStatus := http.StatusInternalServerError + if err == filer_pb.ErrNotFound { + httpStatus = http.StatusNoContent + } + writeJsonError(w, r, httpStatus, err) return } - if err = fs.updateFilerStore(ctx, r, w, replication, collection, ret, fileId); err != nil { - return - } - - // send back post result - reply := FilerPostResult{ - Name: ret.Name, - Size: ret.Size, - Error: ret.Error, - Fid: fileId, - Url: urlLocation, - } - setEtag(w, ret.ETag) - writeJsonQuiet(w, r, http.StatusCreated, reply) + w.WriteHeader(http.StatusNoContent) } -// update metadata in filer store -func (fs *FilerServer) updateFilerStore(ctx context.Context, r *http.Request, w http.ResponseWriter, - replication string, collection string, ret operation.UploadResult, fileId string) (err error) { - - stats.FilerRequestCounter.WithLabelValues("postStoreWrite").Inc() - start := time.Now() - defer func() { - stats.FilerRequestHistogram.WithLabelValues("postStoreWrite").Observe(time.Since(start).Seconds()) - }() +func (fs *FilerServer) detectStorageOption(requestURI, qCollection, qReplication string, ttlSeconds int32, diskType string, dataCenter, rack string) *operation.StorageOption { + collection := util.Nvl(qCollection, fs.option.Collection) + replication := util.Nvl(qReplication, fs.option.DefaultReplication) - modeStr := r.URL.Query().Get("mode") - if modeStr == "" { - modeStr = "0660" + // required by buckets folder + bucketDefaultReplication, fsync := "", false + if strings.HasPrefix(requestURI, fs.filer.DirBucketsPath+"/") { + collection = fs.filer.DetectBucket(util.FullPath(requestURI)) + bucketDefaultReplication, fsync = fs.filer.ReadBucketOption(collection) } - mode, err := strconv.ParseUint(modeStr, 8, 32) - if err != nil { - glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) - mode = 0660 - } - - path := r.URL.Path - if strings.HasSuffix(path, "/") { - if ret.Name != "" { - path += ret.Name - } - } - existingEntry, err := fs.filer.FindEntry(ctx, filer2.FullPath(path)) - crTime := time.Now() - if err == nil && existingEntry != nil { - crTime = existingEntry.Crtime - } - entry := &filer2.Entry{ - FullPath: filer2.FullPath(path), - Attr: filer2.Attr{ - Mtime: time.Now(), - Crtime: crTime, - Mode: os.FileMode(mode), - Uid: OS_UID, - Gid: OS_GID, - Replication: replication, - Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), - }, - Chunks: []*filer_pb.FileChunk{{ - FileId: fileId, - Size: uint64(ret.Size), - Mtime: time.Now().UnixNano(), - ETag: ret.ETag, - }}, - } - if ext := filenamePath.Ext(path); ext != "" { - entry.Attr.Mime = mime.TypeByExtension(ext) - } - // glog.V(4).Infof("saving %s => %+v", path, entry) - if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { - fs.filer.DeleteChunks(entry.Chunks) - glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) - writeJsonError(w, r, http.StatusInternalServerError, dbErr) - err = dbErr - return + if replication == "" { + replication = bucketDefaultReplication } - return nil -} - -// send request to volume server -func (fs *FilerServer) uploadToVolumeServer(r *http.Request, u *url.URL, auth security.EncodedJwt, w http.ResponseWriter, fileId string) (ret operation.UploadResult, err error) { - - stats.FilerRequestCounter.WithLabelValues("postUpload").Inc() - start := time.Now() - defer func() { stats.FilerRequestHistogram.WithLabelValues("postUpload").Observe(time.Since(start).Seconds()) }() + rule := fs.filer.FilerConf.MatchStorageRule(requestURI) - request := &http.Request{ - Method: r.Method, - URL: u, - Proto: r.Proto, - ProtoMajor: r.ProtoMajor, - ProtoMinor: r.ProtoMinor, - Header: r.Header, - Body: r.Body, - Host: r.Host, - ContentLength: r.ContentLength, - } - if auth != "" { - request.Header.Set("Authorization", "BEARER "+string(auth)) - } - resp, doErr := util.Do(request) - if doErr != nil { - glog.Errorf("failing to connect to volume server %s: %v, %+v", r.RequestURI, doErr, r.Method) - writeJsonError(w, r, http.StatusInternalServerError, doErr) - err = doErr - return - } - defer func() { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - etag := resp.Header.Get("ETag") - respBody, raErr := ioutil.ReadAll(resp.Body) - if raErr != nil { - glog.V(0).Infoln("failing to upload to volume server", r.RequestURI, raErr.Error()) - writeJsonError(w, r, http.StatusInternalServerError, raErr) - err = raErr - return - } - glog.V(4).Infoln("post result", string(respBody)) - unmarshalErr := json.Unmarshal(respBody, &ret) - if unmarshalErr != nil { - glog.V(0).Infoln("failing to read upload resonse", r.RequestURI, string(respBody)) - writeJsonError(w, r, http.StatusInternalServerError, unmarshalErr) - err = unmarshalErr - return - } - if ret.Error != "" { - err = errors.New(ret.Error) - glog.V(0).Infoln("failing to post to volume server", r.RequestURI, ret.Error) - writeJsonError(w, r, http.StatusInternalServerError, err) - return - } - // find correct final path - path := r.URL.Path - if strings.HasSuffix(path, "/") { - if ret.Name != "" { - path += ret.Name - } else { - err = fmt.Errorf("can not to write to folder %s without a file name", path) - fs.filer.DeleteFileByFileId(fileId) - glog.V(0).Infoln("Can not to write to folder", path, "without a file name!") - writeJsonError(w, r, http.StatusInternalServerError, err) - return + if ttlSeconds == 0 { + ttl, err := needle.ReadTTL(rule.GetTtl()) + if err != nil { + glog.Errorf("fail to parse %s ttl setting %s: %v", rule.LocationPrefix, rule.Ttl, err) } + ttlSeconds = int32(ttl.Minutes()) * 60 } - if etag != "" { - ret.ETag = etag + + return &operation.StorageOption{ + Replication: util.Nvl(replication, rule.Replication), + Collection: util.Nvl(collection, rule.Collection), + DataCenter: util.Nvl(dataCenter, fs.option.DataCenter), + Rack: util.Nvl(rack, fs.option.Rack), + TtlSeconds: ttlSeconds, + DiskType: util.Nvl(diskType, rule.DiskType), + Fsync: fsync || rule.Fsync, + VolumeGrowthCount: rule.VolumeGrowthCount, } - return } -// curl -X DELETE http://localhost:8888/path/to -// curl -X DELETE http://localhost:8888/path/to?recursive=true -// curl -X DELETE http://localhost:8888/path/to?recursive=true&ignoreRecursiveError=true -// curl -X DELETE http://localhost:8888/path/to?recursive=true&skipChunkDeletion=true -func (fs *FilerServer) DeleteHandler(w http.ResponseWriter, r *http.Request) { - - isRecursive := r.FormValue("recursive") == "true" - ignoreRecursiveError := r.FormValue("ignoreRecursiveError") == "true" - skipChunkDeletion := r.FormValue("skipChunkDeletion") == "true" +func (fs *FilerServer) detectStorageOption0(requestURI, qCollection, qReplication string, qTtl string, diskType string, dataCenter, rack string) *operation.StorageOption { - err := fs.filer.DeleteEntryMetaAndData(context.Background(), filer2.FullPath(r.URL.Path), isRecursive, ignoreRecursiveError, !skipChunkDeletion) + ttl, err := needle.ReadTTL(qTtl) if err != nil { - glog.V(1).Infoln("deleting", r.URL.Path, ":", err.Error()) - httpStatus := http.StatusInternalServerError - if err == filer2.ErrNotFound { - httpStatus = http.StatusNotFound - } - writeJsonError(w, r, httpStatus, err) - return + glog.Errorf("fail to parse ttl %s: %v", qTtl, err) } - w.WriteHeader(http.StatusNoContent) + return fs.detectStorageOption(requestURI, qCollection, qReplication, int32(ttl.Minutes())*60, diskType, dataCenter, rack) } diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 8ff7ab2c0..c4f10d94e 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -1,31 +1,27 @@ package weed_server import ( - "bytes" "context" + "fmt" "io" - "io/ioutil" "net/http" + "os" "path" "strconv" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer2" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" - "github.com/chrislusf/seaweedfs/weed/security" + xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http" "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/util" ) -func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, - replication string, collection string, dataCenter string) bool { - if r.Method != "POST" { - glog.V(4).Infoln("AutoChunking not supported for method", r.Method) - return false - } +func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, contentLength int64, so *operation.StorageOption) { // autoChunking can be set at the command-line level or as a query param. Query param overrides command-line query := r.URL.Query() @@ -35,174 +31,308 @@ func (fs *FilerServer) autoChunk(ctx context.Context, w http.ResponseWriter, r * if maxMB <= 0 && fs.option.MaxMB > 0 { maxMB = int32(fs.option.MaxMB) } - if maxMB <= 0 { - glog.V(4).Infoln("AutoChunking not enabled") - return false - } - glog.V(4).Infoln("AutoChunking level set to", maxMB, "(MB)") chunkSize := 1024 * 1024 * maxMB - contentLength := int64(0) - if contentLengthHeader := r.Header["Content-Length"]; len(contentLengthHeader) == 1 { - contentLength, _ = strconv.ParseInt(contentLengthHeader[0], 10, 64) - if contentLength <= int64(chunkSize) { - glog.V(4).Infoln("Content-Length of", contentLength, "is less than the chunk size of", chunkSize, "so autoChunking will be skipped.") - return false - } - } + stats.FilerRequestCounter.WithLabelValues("chunk").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("chunk").Observe(time.Since(start).Seconds()) + }() - if contentLength <= 0 { - glog.V(4).Infoln("Content-Length value is missing or unexpected so autoChunking will be skipped.") - return false + var reply *FilerPostResult + var err error + var md5bytes []byte + if r.Method == "POST" { + if r.Header.Get("Content-Type") == "" && strings.HasSuffix(r.URL.Path, "/") { + reply, err = fs.mkdir(ctx, w, r) + } else { + reply, md5bytes, err = fs.doPostAutoChunk(ctx, w, r, chunkSize, contentLength, so) + } + } else { + reply, md5bytes, err = fs.doPutAutoChunk(ctx, w, r, chunkSize, contentLength, so) } - - reply, err := fs.doAutoChunk(ctx, w, r, contentLength, chunkSize, replication, collection, dataCenter) if err != nil { - writeJsonError(w, r, http.StatusInternalServerError, err) + if strings.HasPrefix(err.Error(), "read input:") { + writeJsonError(w, r, 499, err) + } else if strings.HasSuffix(err.Error(), "is a file") { + writeJsonError(w, r, http.StatusConflict, err) + } else { + writeJsonError(w, r, http.StatusInternalServerError, err) + } } else if reply != nil { + if len(md5bytes) > 0 { + w.Header().Set("Content-MD5", util.Base64Encode(md5bytes)) + } writeJsonQuiet(w, r, http.StatusCreated, reply) } - return true } -func (fs *FilerServer) doAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, - contentLength int64, chunkSize int32, replication string, collection string, dataCenter string) (filerResult *FilerPostResult, replyerr error) { - - stats.FilerRequestCounter.WithLabelValues("postAutoChunk").Inc() - start := time.Now() - defer func() { - stats.FilerRequestHistogram.WithLabelValues("postAutoChunk").Observe(time.Since(start).Seconds()) - }() +func (fs *FilerServer) doPostAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, contentLength int64, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) { multipartReader, multipartReaderErr := r.MultipartReader() if multipartReaderErr != nil { - return nil, multipartReaderErr + return nil, nil, multipartReaderErr } part1, part1Err := multipartReader.NextPart() if part1Err != nil { - return nil, part1Err + return nil, nil, part1Err } fileName := part1.FileName() if fileName != "" { fileName = path.Base(fileName) } + contentType := part1.Header.Get("Content-Type") + if contentType == "application/octet-stream" { + contentType = "" + } - var fileChunks []*filer_pb.FileChunk + fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, part1, chunkSize, fileName, contentType, contentLength, so) + if err != nil { + return nil, nil, err + } - totalBytesRead := int64(0) - tmpBufferSize := int32(1024 * 1024) - tmpBuffer := bytes.NewBuffer(make([]byte, 0, tmpBufferSize)) - chunkBuf := make([]byte, chunkSize+tmpBufferSize, chunkSize+tmpBufferSize) // chunk size plus a little overflow - chunkBufOffset := int32(0) - chunkOffset := int64(0) - writtenChunks := 0 + md5bytes = md5Hash.Sum(nil) + filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent) - filerResult = &FilerPostResult{ - Name: fileName, - } + return +} - for totalBytesRead < contentLength { - tmpBuffer.Reset() - bytesRead, readErr := io.CopyN(tmpBuffer, part1, int64(tmpBufferSize)) - readFully := readErr != nil && readErr == io.EOF - tmpBuf := tmpBuffer.Bytes() - bytesToCopy := tmpBuf[0:int(bytesRead)] +func (fs *FilerServer) doPutAutoChunk(ctx context.Context, w http.ResponseWriter, r *http.Request, chunkSize int32, contentLength int64, so *operation.StorageOption) (filerResult *FilerPostResult, md5bytes []byte, replyerr error) { - copy(chunkBuf[chunkBufOffset:chunkBufOffset+int32(bytesRead)], bytesToCopy) - chunkBufOffset = chunkBufOffset + int32(bytesRead) + fileName := path.Base(r.URL.Path) + contentType := r.Header.Get("Content-Type") + if contentType == "application/octet-stream" { + contentType = "" + } - if chunkBufOffset >= chunkSize || readFully || (chunkBufOffset > 0 && bytesRead == 0) { - writtenChunks = writtenChunks + 1 - fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(w, r, replication, collection, dataCenter) - if assignErr != nil { - return nil, assignErr - } + fileChunks, md5Hash, chunkOffset, err, smallContent := fs.uploadReaderToChunks(w, r, r.Body, chunkSize, fileName, contentType, contentLength, so) + if err != nil { + return nil, nil, err + } - // upload the chunk to the volume server - chunkName := fileName + "_chunk_" + strconv.FormatInt(int64(len(fileChunks)+1), 10) - uploadErr := fs.doUpload(urlLocation, w, r, chunkBuf[0:chunkBufOffset], chunkName, "", fileId, auth) - if uploadErr != nil { - return nil, uploadErr - } + md5bytes = md5Hash.Sum(nil) + filerResult, replyerr = fs.saveMetaData(ctx, r, fileName, contentType, so, md5bytes, fileChunks, chunkOffset, smallContent) - // Save to chunk manifest structure - fileChunks = append(fileChunks, - &filer_pb.FileChunk{ - FileId: fileId, - Offset: chunkOffset, - Size: uint64(chunkBufOffset), - Mtime: time.Now().UnixNano(), - }, - ) - - // reset variables for the next chunk - chunkBufOffset = 0 - chunkOffset = totalBytesRead + int64(bytesRead) - } + return +} - totalBytesRead = totalBytesRead + int64(bytesRead) +func isAppend(r *http.Request) bool { + return r.URL.Query().Get("op") == "append" +} - if bytesRead == 0 || readFully { - break - } +func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileName string, contentType string, so *operation.StorageOption, md5bytes []byte, fileChunks []*filer_pb.FileChunk, chunkOffset int64, content []byte) (filerResult *FilerPostResult, replyerr error) { - if readErr != nil { - return nil, readErr - } + // detect file mode + modeStr := r.URL.Query().Get("mode") + if modeStr == "" { + modeStr = "0660" + } + mode, err := strconv.ParseUint(modeStr, 8, 32) + if err != nil { + glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) + mode = 0660 } + // fix the path path := r.URL.Path if strings.HasSuffix(path, "/") { if fileName != "" { path += fileName } + } else { + if fileName != "" { + if possibleDirEntry, findDirErr := fs.filer.FindEntry(ctx, util.FullPath(path)); findDirErr == nil { + if possibleDirEntry.IsDirectory() { + path += "/" + fileName + } + } + } } - glog.V(4).Infoln("saving", path) - entry := &filer2.Entry{ - FullPath: filer2.FullPath(path), - Attr: filer2.Attr{ - Mtime: time.Now(), - Crtime: time.Now(), - Mode: 0660, - Uid: OS_UID, - Gid: OS_GID, - Replication: replication, - Collection: collection, - TtlSec: int32(util.ParseInt(r.URL.Query().Get("ttl"), 0)), - }, - Chunks: fileChunks, + var entry *filer.Entry + var mergedChunks []*filer_pb.FileChunk + // when it is an append + if isAppend(r) { + existingEntry, findErr := fs.filer.FindEntry(ctx, util.FullPath(path)) + if findErr != nil && findErr != filer_pb.ErrNotFound { + glog.V(0).Infof("failing to find %s: %v", path, findErr) + } + entry = existingEntry + } + if entry != nil { + entry.Mtime = time.Now() + entry.Md5 = nil + // adjust chunk offsets + for _, chunk := range fileChunks { + chunk.Offset += int64(entry.FileSize) + } + mergedChunks = append(entry.Chunks, fileChunks...) + entry.FileSize += uint64(chunkOffset) + + // TODO + if len(entry.Content) > 0 { + replyerr = fmt.Errorf("append to small file is not supported yet") + return + } + + } else { + glog.V(4).Infoln("saving", path) + mergedChunks = fileChunks + entry = &filer.Entry{ + FullPath: util.FullPath(path), + Attr: filer.Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: os.FileMode(mode), + Uid: OS_UID, + Gid: OS_GID, + Replication: so.Replication, + Collection: so.Collection, + TtlSec: so.TtlSeconds, + DiskType: so.DiskType, + Mime: contentType, + Md5: md5bytes, + FileSize: uint64(chunkOffset), + }, + Content: content, + } } - if dbErr := fs.filer.CreateEntry(ctx, entry); dbErr != nil { - fs.filer.DeleteChunks(entry.Chunks) + + // maybe compact entry chunks + mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), mergedChunks) + if replyerr != nil { + glog.V(0).Infof("manifestize %s: %v", r.RequestURI, replyerr) + return + } + entry.Chunks = mergedChunks + + filerResult = &FilerPostResult{ + Name: fileName, + Size: int64(entry.FileSize), + } + + if entry.Extended == nil { + entry.Extended = make(map[string][]byte) + } + + SaveAmzMetaData(r, entry.Extended, false) + + for k, v := range r.Header { + if len(v) > 0 && strings.HasPrefix(k, needle.PairNamePrefix) { + entry.Extended[k] = []byte(v[0]) + } + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil { + fs.filer.DeleteChunks(fileChunks) replyerr = dbErr filerResult.Error = dbErr.Error() glog.V(0).Infof("failing to write %s to filer server : %v", path, dbErr) + } + return filerResult, replyerr +} + +func (fs *FilerServer) saveAsChunk(so *operation.StorageOption) filer.SaveDataAsChunkFunctionType { + + return func(reader io.Reader, name string, offset int64) (*filer_pb.FileChunk, string, string, error) { + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr := fs.assignNewFileInfo(so) + if assignErr != nil { + return nil, "", "", assignErr + } + + // upload the chunk to the volume server + uploadResult, uploadErr, _ := operation.Upload(urlLocation, name, fs.option.Cipher, reader, false, "", nil, auth) + if uploadErr != nil { + return nil, "", "", uploadErr + } + + return uploadResult.ToPbFileChunk(fileId, offset), so.Collection, so.Replication, nil + } +} + +func (fs *FilerServer) mkdir(ctx context.Context, w http.ResponseWriter, r *http.Request) (filerResult *FilerPostResult, replyerr error) { + + // detect file mode + modeStr := r.URL.Query().Get("mode") + if modeStr == "" { + modeStr = "0660" + } + mode, err := strconv.ParseUint(modeStr, 8, 32) + if err != nil { + glog.Errorf("Invalid mode format: %s, use 0660 by default", modeStr) + mode = 0660 + } + + // fix the path + path := r.URL.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + + existingEntry, err := fs.filer.FindEntry(ctx, util.FullPath(path)) + if err == nil && existingEntry != nil { + replyerr = fmt.Errorf("dir %s already exists", path) return } - return + glog.V(4).Infoln("mkdir", path) + entry := &filer.Entry{ + FullPath: util.FullPath(path), + Attr: filer.Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: os.FileMode(mode) | os.ModeDir, + Uid: OS_UID, + Gid: OS_GID, + }, + } + + filerResult = &FilerPostResult{ + Name: util.FullPath(path).Name(), + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil { + replyerr = dbErr + filerResult.Error = dbErr.Error() + glog.V(0).Infof("failing to create dir %s on filer server : %v", path, dbErr) + } + return filerResult, replyerr } -func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, - chunkBuf []byte, fileName string, contentType string, fileId string, auth security.EncodedJwt) (err error) { +func SaveAmzMetaData(r *http.Request, existing map[string][]byte, isReplace bool) (metadata map[string][]byte) { - stats.FilerRequestCounter.WithLabelValues("postAutoChunkUpload").Inc() - start := time.Now() - defer func() { - stats.FilerRequestHistogram.WithLabelValues("postAutoChunkUpload").Observe(time.Since(start).Seconds()) - }() + metadata = make(map[string][]byte) + if !isReplace { + for k, v := range existing { + metadata[k] = v + } + } - ioReader := ioutil.NopCloser(bytes.NewBuffer(chunkBuf)) - uploadResult, uploadError := operation.Upload(urlLocation, fileName, ioReader, false, contentType, nil, auth) - if uploadResult != nil { - glog.V(0).Infoln("Chunk upload result. Name:", uploadResult.Name, "Fid:", fileId, "Size:", uploadResult.Size) + if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" { + metadata[xhttp.AmzStorageClass] = []byte(sc) } - if uploadError != nil { - err = uploadError + + if tags := r.Header.Get(xhttp.AmzObjectTagging); tags != "" { + for _, v := range strings.Split(tags, "&") { + tag := strings.Split(v, "=") + if len(tag) == 2 { + metadata[xhttp.AmzObjectTagging+"-"+tag[0]] = []byte(tag[1]) + } + } } + + for header, values := range r.Header { + if strings.HasPrefix(header, xhttp.AmzUserMetaPrefix) { + for _, value := range values { + metadata[header] = []byte(value) + } + } + } + return + } diff --git a/weed/server/filer_server_handlers_write_cipher.go b/weed/server/filer_server_handlers_write_cipher.go new file mode 100644 index 000000000..8334d1618 --- /dev/null +++ b/weed/server/filer_server_handlers_write_cipher.go @@ -0,0 +1,91 @@ +package weed_server + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/storage/needle" + "github.com/chrislusf/seaweedfs/weed/util" +) + +// handling single chunk POST or PUT upload +func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request, so *operation.StorageOption) (filerResult *FilerPostResult, err error) { + + fileId, urlLocation, auth, err := fs.assignNewFileInfo(so) + + if err != nil || fileId == "" || urlLocation == "" { + return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, so.Collection, so.DataCenter) + } + + glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation) + + // Note: encrypt(gzip(data)), encrypt data first, then gzip + + sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024 + + pu, err := needle.ParseUpload(r, sizeLimit) + uncompressedData := pu.Data + if pu.IsGzipped { + uncompressedData = pu.UncompressedData + } + if pu.MimeType == "" { + pu.MimeType = http.DetectContentType(uncompressedData) + // println("detect2 mimetype to", pu.MimeType) + } + + uploadResult, uploadError := operation.UploadData(urlLocation, pu.FileName, true, uncompressedData, false, pu.MimeType, pu.PairMap, auth) + if uploadError != nil { + return nil, fmt.Errorf("upload to volume server: %v", uploadError) + } + + // Save to chunk manifest structure + fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0)} + + // fmt.Printf("uploaded: %+v\n", uploadResult) + + path := r.URL.Path + if strings.HasSuffix(path, "/") { + if pu.FileName != "" { + path += pu.FileName + } + } + + entry := &filer.Entry{ + FullPath: util.FullPath(path), + Attr: filer.Attr{ + Mtime: time.Now(), + Crtime: time.Now(), + Mode: 0660, + Uid: OS_UID, + Gid: OS_GID, + Replication: so.Replication, + Collection: so.Collection, + TtlSec: so.TtlSeconds, + DiskType: so.DiskType, + Mime: pu.MimeType, + Md5: util.Base64Md5ToBytes(pu.ContentMd5), + }, + Chunks: fileChunks, + } + + filerResult = &FilerPostResult{ + Name: pu.FileName, + Size: int64(pu.OriginalDataSize), + } + + if dbErr := fs.filer.CreateEntry(ctx, entry, false, false, nil); dbErr != nil { + fs.filer.DeleteChunks(entry.Chunks) + err = dbErr + filerResult.Error = dbErr.Error() + return + } + + return +} diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go new file mode 100644 index 000000000..3ab45453e --- /dev/null +++ b/weed/server/filer_server_handlers_write_upload.go @@ -0,0 +1,105 @@ +package weed_server + +import ( + "crypto/md5" + "hash" + "io" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/security" + "github.com/chrislusf/seaweedfs/weed/stats" + "github.com/chrislusf/seaweedfs/weed/util" +) + +func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) { + var fileChunks []*filer_pb.FileChunk + + md5Hash := md5.New() + var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash)) + + chunkOffset := int64(0) + var smallContent []byte + + for { + limitedReader := io.LimitReader(partReader, int64(chunkSize)) + + data, err := ioutil.ReadAll(limitedReader) + if err != nil { + return nil, nil, 0, err, nil + } + if chunkOffset == 0 && !isAppend(r) { + if len(data) < int(fs.option.SaveToFilerLimit) || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 { + smallContent = data + chunkOffset += int64(len(data)) + break + } + } + dataReader := util.NewBytesReader(data) + + // retry to assign a different file id + var fileId, urlLocation string + var auth security.EncodedJwt + var assignErr, uploadErr error + var uploadResult *operation.UploadResult + for i := 0; i < 3; i++ { + // assign one file id for one chunk + fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so) + if assignErr != nil { + return nil, nil, 0, assignErr, nil + } + + // upload the chunk to the volume server + uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth) + if uploadErr != nil { + time.Sleep(251 * time.Millisecond) + continue + } + break + } + if uploadErr != nil { + return nil, nil, 0, uploadErr, nil + } + + // if last chunk exhausted the reader exactly at the border + if uploadResult.Size == 0 { + break + } + + // Save to chunk manifest structure + fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset)) + + glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size)) + + // reset variables for the next chunk + chunkOffset = chunkOffset + int64(uploadResult.Size) + + // if last chunk was not at full chunk size, but already exhausted the reader + if int64(uploadResult.Size) < int64(chunkSize) { + break + } + } + + return fileChunks, md5Hash, chunkOffset, nil, smallContent +} + +func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) { + + stats.FilerRequestCounter.WithLabelValues("chunkUpload").Inc() + start := time.Now() + defer func() { + stats.FilerRequestHistogram.WithLabelValues("chunkUpload").Observe(time.Since(start).Seconds()) + }() + + uploadResult, err, data := operation.Upload(urlLocation, fileName, fs.option.Cipher, limitedReader, false, contentType, pairMap, auth) + if uploadResult != nil && uploadResult.RetryCount > 0 { + stats.FilerRequestCounter.WithLabelValues("chunkUploadRetry").Add(float64(uploadResult.RetryCount)) + } + return uploadResult, err, data +} diff --git a/weed/server/filer_server_rocksdb.go b/weed/server/filer_server_rocksdb.go new file mode 100644 index 000000000..5fcc7e88f --- /dev/null +++ b/weed/server/filer_server_rocksdb.go @@ -0,0 +1,7 @@ +// +build rocksdb + +package weed_server + +import ( + _ "github.com/chrislusf/seaweedfs/weed/filer/rocksdb" +) diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go index 2f0df7f91..5016117a8 100644 --- a/weed/server/filer_ui/breadcrumb.go +++ b/weed/server/filer_ui/breadcrumb.go @@ -1,8 +1,9 @@ -package master_ui +package filer_ui import ( - "path/filepath" "strings" + + "github.com/chrislusf/seaweedfs/weed/util" ) type Breadcrumb struct { @@ -16,7 +17,7 @@ func ToBreadcrumb(fullpath string) (crumbs []Breadcrumb) { for i := 0; i < len(parts); i++ { crumb := Breadcrumb{ Name: parts[i] + " /", - Link: "/" + filepath.ToSlash(filepath.Join(parts[0:i+1]...)), + Link: "/" + util.Join(parts[0:i+1]...), } if !strings.HasSuffix(crumb.Link, "/") { crumb.Link += "/" diff --git a/weed/server/filer_ui/templates.go b/weed/server/filer_ui/templates.go index e532b27e2..648b97f22 100644 --- a/weed/server/filer_ui/templates.go +++ b/weed/server/filer_ui/templates.go @@ -1,20 +1,31 @@ -package master_ui +package filer_ui import ( "github.com/dustin/go-humanize" "html/template" + "net/url" + "strings" ) +func printpath(parts ...string) string { + concat := strings.Join(parts, "") + escaped := url.PathEscape(concat) + return strings.ReplaceAll(escaped, "%2F", "/") +} + var funcMap = template.FuncMap{ "humanizeBytes": humanize.Bytes, + "printpath": printpath, } var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` - SeaweedFS Filer - + SeaweedFS Filer + + @@ -50,7 +66,7 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
{{ range $entry := .Breadcrumbs }} - + {{ $entry.Name }} {{ end }} @@ -69,11 +85,11 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(` {{if $entry.IsDirectory}} - + {{ $entry.Name }} {{else}} - + {{ $entry.Name }} {{end}} @@ -107,6 +123,14 @@ var StatusTpl = template.Must(template.New("status").Funcs(funcMap).Parse(`
{{end}} + +
+
+ + +